rippled
PeerImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/consensus/RCLValidations.h>
21 #include <ripple/app/ledger/InboundLedgers.h>
22 #include <ripple/app/ledger/InboundTransactions.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/ledger/TransactionMaster.h>
25 #include <ripple/app/misc/HashRouter.h>
26 #include <ripple/app/misc/LoadFeeTrack.h>
27 #include <ripple/app/misc/NetworkOPs.h>
28 #include <ripple/app/misc/Transaction.h>
29 #include <ripple/app/misc/ValidatorList.h>
30 #include <ripple/app/tx/apply.h>
31 #include <ripple/basics/UptimeClock.h>
32 #include <ripple/basics/base64.h>
33 #include <ripple/basics/random.h>
34 #include <ripple/basics/safe_cast.h>
35 #include <ripple/beast/core/LexicalCast.h>
36 #include <ripple/beast/core/SemanticVersion.h>
37 #include <ripple/nodestore/DatabaseShard.h>
38 #include <ripple/overlay/Cluster.h>
39 #include <ripple/overlay/impl/PeerImp.h>
40 #include <ripple/overlay/impl/Tuning.h>
41 #include <ripple/overlay/predicates.h>
42 #include <ripple/protocol/digest.h>
43 
44 #include <boost/algorithm/string.hpp>
45 #include <boost/algorithm/string/predicate.hpp>
46 #include <boost/beast/core/ostream.hpp>
47 
48 #include <algorithm>
49 #include <memory>
50 #include <mutex>
51 #include <numeric>
52 #include <sstream>
53 
54 using namespace std::chrono_literals;
55 
56 namespace ripple {
57 
58 namespace {
60 std::chrono::milliseconds constexpr peerHighLatency{300};
61 
63 std::chrono::seconds constexpr peerTimerInterval{60};
64 } // namespace
65 
66 PeerImp::PeerImp(
67  Application& app,
68  id_t id,
70  http_request_type&& request,
71  PublicKey const& publicKey,
73  Resource::Consumer consumer,
74  std::unique_ptr<stream_type>&& stream_ptr,
75  OverlayImpl& overlay)
76  : Child(overlay)
77  , app_(app)
78  , id_(id)
79  , sink_(app_.journal("Peer"), makePrefix(id))
80  , p_sink_(app_.journal("Protocol"), makePrefix(id))
81  , journal_(sink_)
82  , p_journal_(p_sink_)
83  , stream_ptr_(std::move(stream_ptr))
84  , socket_(stream_ptr_->next_layer().socket())
85  , stream_(*stream_ptr_)
86  , strand_(socket_.get_executor())
87  , timer_(waitable_timer{socket_.get_executor()})
88  , remote_address_(slot->remote_endpoint())
89  , overlay_(overlay)
90  , inbound_(true)
91  , protocol_(protocol)
92  , tracking_(Tracking::unknown)
93  , trackingTime_(clock_type::now())
94  , publicKey_(publicKey)
95  , lastPingTime_(clock_type::now())
96  , creationTime_(clock_type::now())
97  , squelch_(app_.journal("Squelch"))
98  , usage_(consumer)
100  , slot_(slot)
101  , request_(std::move(request))
102  , headers_(request_)
103  , compressionEnabled_(
105  headers_,
107  "lz4",
108  app_.config().COMPRESSION)
109  ? Compressed::On
110  : Compressed::Off)
111  , txReduceRelayEnabled_(peerFeatureEnabled(
112  headers_,
113  FEATURE_TXRR,
114  app_.config().TX_REDUCE_RELAY_ENABLE))
115  , vpReduceRelayEnabled_(peerFeatureEnabled(
116  headers_,
117  FEATURE_VPRR,
118  app_.config().VP_REDUCE_RELAY_ENABLE))
119  , ledgerReplayEnabled_(peerFeatureEnabled(
120  headers_,
122  app_.config().LEDGER_REPLAY))
123  , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
124 {
125  JLOG(journal_.info()) << "compression enabled "
126  << (compressionEnabled_ == Compressed::On)
127  << " vp reduce-relay enabled "
128  << vpReduceRelayEnabled_
129  << " tx reduce-relay enabled "
130  << txReduceRelayEnabled_ << " on " << remote_address_
131  << " " << id_;
132 }
133 
135 {
136  const bool inCluster{cluster()};
137 
142 
143  if (inCluster)
144  {
145  JLOG(journal_.warn()) << name() << " left cluster";
146  }
147 }
148 
149 // Helper function to check for valid uint256 values in protobuf buffers
150 static bool
152 {
153  return pBuffStr.size() == uint256::size();
154 }
155 
156 void
158 {
159  if (!strand_.running_in_this_thread())
160  return post(strand_, std::bind(&PeerImp::run, shared_from_this()));
161 
162  auto parseLedgerHash =
163  [](std::string const& value) -> std::optional<uint256> {
164  if (uint256 ret; ret.parseHex(value))
165  return ret;
166 
167  if (auto const s = base64_decode(value); s.size() == uint256::size())
168  return uint256{s};
169 
170  return std::nullopt;
171  };
172 
173  std::optional<uint256> closed;
174  std::optional<uint256> previous;
175 
176  if (auto const iter = headers_.find("Closed-Ledger");
177  iter != headers_.end())
178  {
179  closed = parseLedgerHash(iter->value().to_string());
180 
181  if (!closed)
182  fail("Malformed handshake data (1)");
183  }
184 
185  if (auto const iter = headers_.find("Previous-Ledger");
186  iter != headers_.end())
187  {
188  previous = parseLedgerHash(iter->value().to_string());
189 
190  if (!previous)
191  fail("Malformed handshake data (2)");
192  }
193 
194  if (previous && !closed)
195  fail("Malformed handshake data (3)");
196 
197  {
199  if (closed)
200  closedLedgerHash_ = *closed;
201  if (previous)
202  previousLedgerHash_ = *previous;
203  }
204 
205  if (inbound_)
206  doAccept();
207  else
208  doProtocolStart();
209 
210  // Anything else that needs to be done with the connection should be
211  // done in doProtocolStart
212 }
213 
214 void
216 {
217  if (!strand_.running_in_this_thread())
218  return post(strand_, std::bind(&PeerImp::stop, shared_from_this()));
219  if (socket_.is_open())
220  {
221  // The rationale for using different severity levels is that
222  // outbound connections are under our control and may be logged
223  // at a higher level, but inbound connections are more numerous and
224  // uncontrolled so to prevent log flooding the severity is reduced.
225  //
226  if (inbound_)
227  {
228  JLOG(journal_.debug()) << "Stop";
229  }
230  else
231  {
232  JLOG(journal_.info()) << "Stop";
233  }
234  }
235  close();
236 }
237 
238 //------------------------------------------------------------------------------
239 
240 void
242 {
243  if (!strand_.running_in_this_thread())
244  return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
245  if (gracefulClose_)
246  return;
247  if (detaching_)
248  return;
249 
250  auto validator = m->getValidatorKey();
251  if (validator && !squelch_.expireSquelch(*validator))
252  return;
253 
255  safe_cast<TrafficCount::category>(m->getCategory()),
256  false,
257  static_cast<int>(m->getBuffer(compressionEnabled_).size()));
258 
259  auto sendq_size = send_queue_.size();
260 
261  if (sendq_size < Tuning::targetSendQueue)
262  {
263  // To detect a peer that does not read from their
264  // side of the connection, we expect a peer to have
265  // a small senq periodically
266  large_sendq_ = 0;
267  }
268  else if (auto sink = journal_.debug();
269  sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
270  {
271  std::string const n = name();
272  sink << (n.empty() ? remote_address_.to_string() : n)
273  << " sendq: " << sendq_size;
274  }
275 
276  send_queue_.push(m);
277 
278  if (sendq_size != 0)
279  return;
280 
281  boost::asio::async_write(
282  stream_,
283  boost::asio::buffer(
284  send_queue_.front()->getBuffer(compressionEnabled_)),
285  bind_executor(
286  strand_,
287  std::bind(
290  std::placeholders::_1,
291  std::placeholders::_2)));
292 }
293 
294 void
296 {
297  if (!strand_.running_in_this_thread())
298  return post(
300 
301  if (!txQueue_.empty())
302  {
303  protocol::TMHaveTransactions ht;
304  std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
305  ht.add_hashes(hash.data(), hash.size());
306  });
307  JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
308  txQueue_.clear();
309  send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
310  }
311 }
312 
313 void
315 {
316  if (!strand_.running_in_this_thread())
317  return post(
319 
321  {
322  JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
323  sendTxQueue();
324  }
325 
326  txQueue_.insert(hash);
327  JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
328 }
329 
330 void
332 {
333  if (!strand_.running_in_this_thread())
334  return post(
335  strand_,
337 
338  auto removed = txQueue_.erase(hash);
339  JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
340 }
341 
342 void
344 {
345  if ((usage_.charge(fee) == Resource::drop) && usage_.disconnect() &&
346  strand_.running_in_this_thread())
347  {
348  // Sever the connection
350  fail("charge: Resources");
351  }
352 }
353 
354 //------------------------------------------------------------------------------
355 
356 bool
358 {
359  auto const iter = headers_.find("Crawl");
360  if (iter == headers_.end())
361  return false;
362  return boost::iequals(iter->value(), "public");
363 }
364 
365 bool
367 {
368  return static_cast<bool>(app_.cluster().member(publicKey_));
369 }
370 
373 {
374  if (inbound_)
375  return headers_["User-Agent"].to_string();
376  return headers_["Server"].to_string();
377 }
378 
381 {
383 
384  ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
385  ret[jss::address] = remote_address_.to_string();
386 
387  if (inbound_)
388  ret[jss::inbound] = true;
389 
390  if (cluster())
391  {
392  ret[jss::cluster] = true;
393 
394  if (auto const n = name(); !n.empty())
395  // Could move here if Json::Value supported moving from a string
396  ret[jss::name] = n;
397  }
398 
399  if (auto const d = domain(); !d.empty())
400  ret[jss::server_domain] = domain();
401 
402  if (auto const nid = headers_["Network-ID"].to_string(); !nid.empty())
403  ret[jss::network_id] = nid;
404 
405  ret[jss::load] = usage_.balance();
406 
407  if (auto const version = getVersion(); !version.empty())
408  ret[jss::version] = version;
409 
410  ret[jss::protocol] = to_string(protocol_);
411 
412  {
414  if (latency_)
415  ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
416  }
417 
418  ret[jss::uptime] = static_cast<Json::UInt>(
419  std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
420 
421  std::uint32_t minSeq, maxSeq;
422  ledgerRange(minSeq, maxSeq);
423 
424  if ((minSeq != 0) || (maxSeq != 0))
425  ret[jss::complete_ledgers] =
426  std::to_string(minSeq) + " - " + std::to_string(maxSeq);
427 
428  switch (tracking_.load())
429  {
430  case Tracking::diverged:
431  ret[jss::track] = "diverged";
432  break;
433 
434  case Tracking::unknown:
435  ret[jss::track] = "unknown";
436  break;
437 
438  case Tracking::converged:
439  // Nothing to do here
440  break;
441  }
442 
443  uint256 closedLedgerHash;
444  protocol::TMStatusChange last_status;
445  {
447  closedLedgerHash = closedLedgerHash_;
448  last_status = last_status_;
449  }
450 
451  if (closedLedgerHash != beast::zero)
452  ret[jss::ledger] = to_string(closedLedgerHash);
453 
454  if (last_status.has_newstatus())
455  {
456  switch (last_status.newstatus())
457  {
458  case protocol::nsCONNECTING:
459  ret[jss::status] = "connecting";
460  break;
461 
462  case protocol::nsCONNECTED:
463  ret[jss::status] = "connected";
464  break;
465 
466  case protocol::nsMONITORING:
467  ret[jss::status] = "monitoring";
468  break;
469 
470  case protocol::nsVALIDATING:
471  ret[jss::status] = "validating";
472  break;
473 
474  case protocol::nsSHUTTING:
475  ret[jss::status] = "shutting";
476  break;
477 
478  default:
479  JLOG(p_journal_.warn())
480  << "Unknown status: " << last_status.newstatus();
481  }
482  }
483 
484  ret[jss::metrics] = Json::Value(Json::objectValue);
485  ret[jss::metrics][jss::total_bytes_recv] =
486  std::to_string(metrics_.recv.total_bytes());
487  ret[jss::metrics][jss::total_bytes_sent] =
488  std::to_string(metrics_.sent.total_bytes());
489  ret[jss::metrics][jss::avg_bps_recv] =
490  std::to_string(metrics_.recv.average_bytes());
491  ret[jss::metrics][jss::avg_bps_sent] =
492  std::to_string(metrics_.sent.average_bytes());
493 
494  return ret;
495 }
496 
497 bool
499 {
500  switch (f)
501  {
503  return protocol_ >= make_protocol(2, 1);
505  return protocol_ >= make_protocol(2, 2);
507  return ledgerReplayEnabled_;
508  }
509  return false;
510 }
511 
512 //------------------------------------------------------------------------------
513 
514 bool
515 PeerImp::hasLedger(uint256 const& hash, std::uint32_t seq) const
516 {
517  {
519  if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
521  return true;
522  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
523  recentLedgers_.end())
524  return true;
525  }
526 
527  if (seq >= app_.getNodeStore().earliestLedgerSeq())
528  {
530  auto const it{shardInfos_.find(publicKey_)};
531  if (it != shardInfos_.end())
532  {
533  auto const shardIndex{app_.getNodeStore().seqToShardIndex(seq)};
534  return boost::icl::contains(it->second.finalized(), shardIndex);
535  }
536  }
537  return false;
538 }
539 
540 void
542 {
544 
545  minSeq = minLedger_;
546  maxSeq = maxLedger_;
547 }
548 
549 bool
550 PeerImp::hasTxSet(uint256 const& hash) const
551 {
553  return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
554  recentTxSets_.end();
555 }
556 
557 void
559 {
560  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
561  // guarded by recentLock_.
565 }
566 
567 bool
569 {
571  return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
572  (uMax <= maxLedger_);
573 }
574 
575 //------------------------------------------------------------------------------
576 
577 void
579 {
580  assert(strand_.running_in_this_thread());
581  if (socket_.is_open())
582  {
583  detaching_ = true; // DEPRECATED
584  error_code ec;
585  timer_.cancel(ec);
586  socket_.close(ec);
588  if (inbound_)
589  {
590  JLOG(journal_.debug()) << "Closed";
591  }
592  else
593  {
594  JLOG(journal_.info()) << "Closed";
595  }
596  }
597 }
598 
599 void
601 {
602  if (!strand_.running_in_this_thread())
603  return post(
604  strand_,
605  std::bind(
606  (void (Peer::*)(std::string const&)) & PeerImp::fail,
608  reason));
610  {
611  std::string const n = name();
612  JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
613  << " failed: " << reason;
614  }
615  close();
616 }
617 
618 void
620 {
621  assert(strand_.running_in_this_thread());
622  if (socket_.is_open())
623  {
624  JLOG(journal_.warn())
625  << name << " from " << toBase58(TokenType::NodePublic, publicKey_)
626  << " at " << remote_address_.to_string() << ": " << ec.message();
627  }
628  close();
629 }
630 
633 {
635  return shardInfos_;
636 }
637 
638 void
640 {
641  assert(strand_.running_in_this_thread());
642  assert(socket_.is_open());
643  assert(!gracefulClose_);
644  gracefulClose_ = true;
645 #if 0
646  // Flush messages
647  while(send_queue_.size() > 1)
648  send_queue_.pop_back();
649 #endif
650  if (send_queue_.size() > 0)
651  return;
652  setTimer();
653  stream_.async_shutdown(bind_executor(
654  strand_,
655  std::bind(
656  &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
657 }
658 
659 void
661 {
662  error_code ec;
663  timer_.expires_from_now(peerTimerInterval, ec);
664 
665  if (ec)
666  {
667  JLOG(journal_.error()) << "setTimer: " << ec.message();
668  return;
669  }
670  timer_.async_wait(bind_executor(
671  strand_,
672  std::bind(
673  &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
674 }
675 
676 // convenience for ignoring the error code
677 void
679 {
680  error_code ec;
681  timer_.cancel(ec);
682 }
683 
684 //------------------------------------------------------------------------------
685 
688 {
690  ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
691  return ss.str();
692 }
693 
694 void
696 {
697  if (!socket_.is_open())
698  return;
699 
700  if (ec == boost::asio::error::operation_aborted)
701  return;
702 
703  if (ec)
704  {
705  // This should never happen
706  JLOG(journal_.error()) << "onTimer: " << ec.message();
707  return close();
708  }
709 
711  {
712  fail("Large send queue");
713  return;
714  }
715 
716  if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
717  {
718  clock_type::duration duration;
719 
720  {
722  duration = clock_type::now() - trackingTime_;
723  }
724 
725  if ((t == Tracking::diverged &&
726  (duration > app_.config().MAX_DIVERGED_TIME)) ||
727  (t == Tracking::unknown &&
728  (duration > app_.config().MAX_UNKNOWN_TIME)))
729  {
731  fail("Not useful");
732  return;
733  }
734  }
735 
736  // Already waiting for PONG
737  if (lastPingSeq_)
738  {
739  fail("Ping Timeout");
740  return;
741  }
742 
744  lastPingSeq_ = rand_int<std::uint32_t>();
745 
746  protocol::TMPing message;
747  message.set_type(protocol::TMPing::ptPING);
748  message.set_seq(*lastPingSeq_);
749 
750  send(std::make_shared<Message>(message, protocol::mtPING));
751 
752  setTimer();
753 }
754 
755 void
757 {
758  cancelTimer();
759  // If we don't get eof then something went wrong
760  if (!ec)
761  {
762  JLOG(journal_.error()) << "onShutdown: expected error condition";
763  return close();
764  }
765  if (ec != boost::asio::error::eof)
766  return fail("onShutdown", ec);
767  close();
768 }
769 
770 //------------------------------------------------------------------------------
771 void
773 {
774  assert(read_buffer_.size() == 0);
775 
776  JLOG(journal_.debug()) << "doAccept: " << remote_address_;
777 
778  auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
779 
780  // This shouldn't fail since we already computed
781  // the shared value successfully in OverlayImpl
782  if (!sharedValue)
783  return fail("makeSharedValue: Unexpected failure");
784 
785  JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
786  JLOG(journal_.info()) << "Public Key: "
788 
789  if (auto member = app_.cluster().member(publicKey_))
790  {
791  {
793  name_ = *member;
794  }
795  JLOG(journal_.info()) << "Cluster name: " << *member;
796  }
797 
799 
800  // XXX Set timer: connection is in grace period to be useful.
801  // XXX Set timer: connection idle (idle may vary depending on connection
802  // type.)
803 
804  auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
805 
806  boost::beast::ostream(*write_buffer) << makeResponse(
808  request_,
811  *sharedValue,
813  protocol_,
814  app_);
815 
816  // Write the whole buffer and only start protocol when that's done.
817  boost::asio::async_write(
818  stream_,
819  write_buffer->data(),
820  boost::asio::transfer_all(),
821  bind_executor(
822  strand_,
823  [this, write_buffer, self = shared_from_this()](
824  error_code ec, std::size_t bytes_transferred) {
825  if (!socket_.is_open())
826  return;
827  if (ec == boost::asio::error::operation_aborted)
828  return;
829  if (ec)
830  return fail("onWriteResponse", ec);
831  if (write_buffer->size() == bytes_transferred)
832  return doProtocolStart();
833  return fail("Failed to write header");
834  }));
835 }
836 
839 {
840  std::shared_lock read_lock{nameMutex_};
841  return name_;
842 }
843 
846 {
847  return headers_["Server-Domain"].to_string();
848 }
849 
850 //------------------------------------------------------------------------------
851 
852 // Protocol logic
853 
854 void
856 {
858 
859  // Send all the validator lists that have been loaded
861  {
863  [&](std::string const& manifest,
864  std::uint32_t version,
866  PublicKey const& pubKey,
867  std::size_t maxSequence,
868  uint256 const& hash) {
870  *this,
871  0,
872  pubKey,
873  maxSequence,
874  version,
875  manifest,
876  blobInfos,
878  p_journal_);
879 
880  // Don't send it next time.
882  });
883  }
884 
885  if (auto m = overlay_.getManifestsMessage())
886  send(m);
887 
888  // Request shard info from peer
889  protocol::TMGetPeerShardInfoV2 tmGPS;
890  tmGPS.set_relays(0);
891  send(std::make_shared<Message>(tmGPS, protocol::mtGET_PEER_SHARD_INFO_V2));
892 
893  setTimer();
894 }
895 
896 // Called repeatedly with protocol message data
897 void
899 {
900  if (!socket_.is_open())
901  return;
902  if (ec == boost::asio::error::operation_aborted)
903  return;
904  if (ec == boost::asio::error::eof)
905  {
906  JLOG(journal_.info()) << "EOF";
907  return gracefulClose();
908  }
909  if (ec)
910  return fail("onReadMessage", ec);
911  if (auto stream = journal_.trace())
912  {
913  if (bytes_transferred > 0)
914  stream << "onReadMessage: " << bytes_transferred << " bytes";
915  else
916  stream << "onReadMessage";
917  }
918 
919  metrics_.recv.add_message(bytes_transferred);
920 
921  read_buffer_.commit(bytes_transferred);
922 
923  auto hint = Tuning::readBufferBytes;
924 
925  while (read_buffer_.size() > 0)
926  {
927  std::size_t bytes_consumed;
928  std::tie(bytes_consumed, ec) =
929  invokeProtocolMessage(read_buffer_.data(), *this, hint);
930  if (ec)
931  return fail("onReadMessage", ec);
932  if (!socket_.is_open())
933  return;
934  if (gracefulClose_)
935  return;
936  if (bytes_consumed == 0)
937  break;
938  read_buffer_.consume(bytes_consumed);
939  }
940 
941  // Timeout on writes only
942  stream_.async_read_some(
944  bind_executor(
945  strand_,
946  std::bind(
949  std::placeholders::_1,
950  std::placeholders::_2)));
951 }
952 
953 void
955 {
956  if (!socket_.is_open())
957  return;
958  if (ec == boost::asio::error::operation_aborted)
959  return;
960  if (ec)
961  return fail("onWriteMessage", ec);
962  if (auto stream = journal_.trace())
963  {
964  if (bytes_transferred > 0)
965  stream << "onWriteMessage: " << bytes_transferred << " bytes";
966  else
967  stream << "onWriteMessage";
968  }
969 
970  metrics_.sent.add_message(bytes_transferred);
971 
972  assert(!send_queue_.empty());
973  send_queue_.pop();
974  if (!send_queue_.empty())
975  {
976  // Timeout on writes only
977  return boost::asio::async_write(
978  stream_,
979  boost::asio::buffer(
980  send_queue_.front()->getBuffer(compressionEnabled_)),
981  bind_executor(
982  strand_,
983  std::bind(
986  std::placeholders::_1,
987  std::placeholders::_2)));
988  }
989 
990  if (gracefulClose_)
991  {
992  return stream_.async_shutdown(bind_executor(
993  strand_,
994  std::bind(
997  std::placeholders::_1)));
998  }
999 }
1000 
1001 //------------------------------------------------------------------------------
1002 //
1003 // ProtocolHandler
1004 //
1005 //------------------------------------------------------------------------------
1006 
1007 void
1009 {
1010  // TODO
1011 }
1012 
1013 void
1015  std::uint16_t type,
1017  std::size_t size,
1018  std::size_t uncompressed_size,
1019  bool isCompressed)
1020 {
1021  load_event_ =
1024  auto const category = TrafficCount::categorize(*m, type, true);
1025  overlay_.reportTraffic(category, true, static_cast<int>(size));
1026  using namespace protocol;
1027  if ((type == MessageType::mtTRANSACTION ||
1028  type == MessageType::mtHAVE_TRANSACTIONS ||
1029  type == MessageType::mtTRANSACTIONS ||
1030  // GET_OBJECTS
1031  category == TrafficCount::category::get_transactions ||
1032  // GET_LEDGER
1033  category == TrafficCount::category::ld_tsc_get ||
1034  category == TrafficCount::category::ld_tsc_share ||
1035  // LEDGER_DATA
1036  category == TrafficCount::category::gl_tsc_share ||
1037  category == TrafficCount::category::gl_tsc_get) &&
1039  {
1041  static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1042  }
1043  JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
1044  << uncompressed_size << " " << isCompressed;
1045 }
1046 
1047 void
1049  std::uint16_t,
1051 {
1052  load_event_.reset();
1053  charge(fee_);
1054 }
1055 
1056 void
1058 {
1059  auto const s = m->list_size();
1060 
1061  if (s == 0)
1062  {
1064  return;
1065  }
1066 
1067  if (s > 100)
1069 
1070  // VFALCO What's the right job type?
1071  auto that = shared_from_this();
1073  jtVALIDATION_ut, "receiveManifests", [this, that, m](Job&) {
1074  overlay_.onManifests(m, that);
1075  });
1076 }
1077 
1078 void
1080 {
1081  if (m->type() == protocol::TMPing::ptPING)
1082  {
1083  // We have received a ping request, reply with a pong
1085  m->set_type(protocol::TMPing::ptPONG);
1086  send(std::make_shared<Message>(*m, protocol::mtPING));
1087  return;
1088  }
1089 
1090  if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1091  {
1092  // Only reset the ping sequence if we actually received a
1093  // PONG with the correct cookie. That way, any peers which
1094  // respond with incorrect cookies will eventually time out.
1095  if (m->seq() == lastPingSeq_)
1096  {
1097  lastPingSeq_.reset();
1098 
1099  // Update latency estimate
1100  auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1102 
1104 
1105  if (latency_)
1106  latency_ = (*latency_ * 7 + rtt) / 8;
1107  else
1108  latency_ = rtt;
1109  }
1110 
1111  return;
1112  }
1113 }
1114 
1115 void
1117 {
1118  // VFALCO NOTE I think we should drop the peer immediately
1119  if (!cluster())
1120  {
1122  return;
1123  }
1124 
1125  for (int i = 0; i < m->clusternodes().size(); ++i)
1126  {
1127  protocol::TMClusterNode const& node = m->clusternodes(i);
1128 
1129  std::string name;
1130  if (node.has_nodename())
1131  name = node.nodename();
1132 
1133  auto const publicKey =
1134  parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1135 
1136  // NIKB NOTE We should drop the peer immediately if
1137  // they send us a public key we can't parse
1138  if (publicKey)
1139  {
1140  auto const reportTime =
1141  NetClock::time_point{NetClock::duration{node.reporttime()}};
1142 
1143  app_.cluster().update(
1144  *publicKey, name, node.nodeload(), reportTime);
1145  }
1146  }
1147 
1148  int loadSources = m->loadsources().size();
1149  if (loadSources != 0)
1150  {
1151  Resource::Gossip gossip;
1152  gossip.items.reserve(loadSources);
1153  for (int i = 0; i < m->loadsources().size(); ++i)
1154  {
1155  protocol::TMLoadSource const& node = m->loadsources(i);
1157  item.address = beast::IP::Endpoint::from_string(node.name());
1158  item.balance = node.cost();
1159  if (item.address != beast::IP::Endpoint())
1160  gossip.items.push_back(item);
1161  }
1163  }
1164 
1165  // Calculate the cluster fee:
1166  auto const thresh = app_.timeKeeper().now() - 90s;
1167  std::uint32_t clusterFee = 0;
1168 
1170  fees.reserve(app_.cluster().size());
1171 
1172  app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1173  if (status.getReportTime() >= thresh)
1174  fees.push_back(status.getLoadFee());
1175  });
1176 
1177  if (!fees.empty())
1178  {
1179  auto const index = fees.size() / 2;
1180  std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1181  clusterFee = fees[index];
1182  }
1183 
1184  app_.getFeeTrack().setClusterFee(clusterFee);
1185 }
1186 
1187 void
1189 {
1190  // DEPRECATED
1191 }
1192 
1193 void
1195 {
1196  // DEPRECATED
1197 }
1198 
1199 void
1201 {
1202  auto badData = [&](std::string msg) {
1204  JLOG(p_journal_.warn()) << msg;
1205  };
1206 
1207  // Verify relays
1208  if (m->relays() > relayLimit)
1209  return badData("Invalid relays");
1210 
1211  // Verify peer chain
1212  // The peer chain should not contain this node's public key
1213  // nor the public key of the sending peer
1214  std::set<PublicKey> pubKeyChain;
1215  pubKeyChain.insert(app_.nodeIdentity().first);
1216  pubKeyChain.insert(publicKey_);
1217 
1218  auto const peerChainSz{m->peerchain_size()};
1219  if (peerChainSz > 0)
1220  {
1221  if (peerChainSz > relayLimit)
1222  return badData("Invalid peer chain size");
1223 
1224  if (peerChainSz + m->relays() > relayLimit)
1225  return badData("Invalid relays and peer chain size");
1226 
1227  for (int i = 0; i < peerChainSz; ++i)
1228  {
1229  auto const slice{makeSlice(m->peerchain(i).publickey())};
1230 
1231  // Verify peer public key
1232  if (!publicKeyType(slice))
1233  return badData("Invalid peer public key");
1234 
1235  // Verify peer public key is unique in the peer chain
1236  if (!pubKeyChain.emplace(slice).second)
1237  return badData("Invalid peer public key");
1238  }
1239  }
1240 
1241  // Reply with shard info this node may have
1242  if (auto shardStore = app_.getShardStore())
1243  {
1244  auto reply{shardStore->getShardInfo()->makeMessage(app_)};
1245  if (peerChainSz > 0)
1246  *(reply.mutable_peerchain()) = m->peerchain();
1247  send(std::make_shared<Message>(reply, protocol::mtPEER_SHARD_INFO_V2));
1248  }
1249 
1250  if (m->relays() == 0)
1251  return;
1252 
1253  // Charge originating peer a fee for requesting relays
1254  if (peerChainSz == 0)
1256 
1257  // Add peer to the peer chain
1258  m->add_peerchain()->set_publickey(publicKey_.data(), publicKey_.size());
1259 
1260  // Relay the request to peers, exclude the peer chain
1261  m->set_relays(m->relays() - 1);
1263  std::make_shared<Message>(*m, protocol::mtGET_PEER_SHARD_INFO_V2),
1264  [&](std::shared_ptr<Peer> const& peer) {
1265  return pubKeyChain.find(peer->getNodePublic()) != pubKeyChain.end();
1266  }));
1267 }
1268 
1269 void
1271 {
1272  // Find the earliest and latest shard indexes
1273  auto const& db{app_.getNodeStore()};
1274  auto const earliestShardIndex{db.earliestShardIndex()};
1275  auto const latestShardIndex{[&]() -> std::optional<std::uint32_t> {
1276  auto const curLedgerSeq{app_.getLedgerMaster().getCurrentLedgerIndex()};
1277  if (curLedgerSeq >= db.earliestLedgerSeq())
1278  return db.seqToShardIndex(curLedgerSeq);
1279  return std::nullopt;
1280  }()};
1281 
1282  auto badData = [&](std::string msg) {
1284  JLOG(p_journal_.warn()) << msg;
1285  };
1286 
1287  // Used to create a digest and verify the message signature
1288  Serializer s;
1290 
1291  // Verify message creation time
1293  {
1294  auto const timestamp{
1295  NetClock::time_point{std::chrono::seconds{m->timestamp()}}};
1296  auto const now{app_.timeKeeper().now()};
1297  if (timestamp > (now + 5s))
1298  return badData("Invalid timestamp");
1299 
1300  // Check if stale
1301  using namespace std::chrono_literals;
1302  if (timestamp < (now - 5min))
1303  return badData("Stale timestamp");
1304 
1305  s.add32(m->timestamp());
1306  shardInfo.setMsgTimestamp(timestamp);
1307  }
1308 
1309  // Verify incomplete shards
1310  auto const numIncomplete{m->incomplete_size()};
1311  if (numIncomplete > 0)
1312  {
1313  if (latestShardIndex && numIncomplete > *latestShardIndex)
1314  return badData("Invalid number of incomplete shards");
1315 
1316  // Verify each incomplete shard
1317  for (int i = 0; i < numIncomplete; ++i)
1318  {
1319  auto const& incomplete{m->incomplete(i)};
1320  auto const shardIndex{incomplete.shardindex()};
1321 
1322  // Verify shard index
1323  if (shardIndex < earliestShardIndex ||
1324  (latestShardIndex && shardIndex > latestShardIndex))
1325  {
1326  return badData("Invalid incomplete shard index");
1327  }
1328  s.add32(shardIndex);
1329 
1330  // Verify state
1331  auto const state{static_cast<ShardState>(incomplete.state())};
1332  switch (state)
1333  {
1334  // Incomplete states
1335  case ShardState::acquire:
1336  case ShardState::complete:
1338  case ShardState::queued:
1339  break;
1340 
1341  // case ShardState::finalized:
1342  default:
1343  return badData("Invalid incomplete shard state");
1344  };
1345  s.add32(incomplete.state());
1346 
1347  // Verify progress
1348  std::uint32_t progress{0};
1349  if (incomplete.has_progress())
1350  {
1351  progress = incomplete.progress();
1352  if (progress < 1 || progress > 100)
1353  return badData("Invalid incomplete shard progress");
1354  s.add32(progress);
1355  }
1356 
1357  // Verify each incomplete shard is unique
1358  if (!shardInfo.update(shardIndex, state, progress))
1359  return badData("Invalid duplicate incomplete shards");
1360  }
1361  }
1362 
1363  // Verify finalized shards
1364  if (m->has_finalized())
1365  {
1366  auto const& str{m->finalized()};
1367  if (str.empty())
1368  return badData("Invalid finalized shards");
1369 
1370  if (!shardInfo.setFinalizedFromString(str))
1371  return badData("Invalid finalized shard indexes");
1372 
1373  auto const& finalized{shardInfo.finalized()};
1374  auto const numFinalized{boost::icl::length(finalized)};
1375  if (numFinalized == 0 ||
1376  boost::icl::first(finalized) < earliestShardIndex ||
1377  (latestShardIndex &&
1378  boost::icl::last(finalized) > latestShardIndex))
1379  {
1380  return badData("Invalid finalized shard indexes");
1381  }
1382 
1383  if (latestShardIndex &&
1384  (numFinalized + numIncomplete) > *latestShardIndex)
1385  {
1386  return badData("Invalid number of finalized and incomplete shards");
1387  }
1388 
1389  s.addRaw(str.data(), str.size());
1390  }
1391 
1392  // Verify public key
1393  auto slice{makeSlice(m->publickey())};
1394  if (!publicKeyType(slice))
1395  return badData("Invalid public key");
1396 
1397  // Verify peer public key isn't this nodes's public key
1398  PublicKey const publicKey(slice);
1399  if (publicKey == app_.nodeIdentity().first)
1400  return badData("Invalid public key");
1401 
1402  // Verify signature
1403  if (!verify(publicKey, s.slice(), makeSlice(m->signature()), false))
1404  return badData("Invalid signature");
1405 
1406  // Forward the message if a peer chain exists
1407  auto const peerChainSz{m->peerchain_size()};
1408  if (peerChainSz > 0)
1409  {
1410  // Verify peer chain
1411  if (peerChainSz > relayLimit)
1412  return badData("Invalid peer chain size");
1413 
1414  // The peer chain should not contain this node's public key
1415  // nor the public key of the sending peer
1416  std::set<PublicKey> pubKeyChain;
1417  pubKeyChain.insert(app_.nodeIdentity().first);
1418  pubKeyChain.insert(publicKey_);
1419 
1420  for (int i = 0; i < peerChainSz; ++i)
1421  {
1422  // Verify peer public key
1423  slice = makeSlice(m->peerchain(i).publickey());
1424  if (!publicKeyType(slice))
1425  return badData("Invalid peer public key");
1426 
1427  // Verify peer public key is unique in the peer chain
1428  if (!pubKeyChain.emplace(slice).second)
1429  return badData("Invalid peer public key");
1430  }
1431 
1432  // If last peer in the chain is connected, relay the message
1433  PublicKey const peerPubKey(
1434  makeSlice(m->peerchain(peerChainSz - 1).publickey()));
1435  if (auto peer = overlay_.findPeerByPublicKey(peerPubKey))
1436  {
1437  m->mutable_peerchain()->RemoveLast();
1438  peer->send(
1439  std::make_shared<Message>(*m, protocol::mtPEER_SHARD_INFO_V2));
1440  JLOG(p_journal_.trace())
1441  << "Relayed TMPeerShardInfoV2 from peer IP "
1442  << remote_address_.address().to_string() << " to peer IP "
1443  << peer->getRemoteAddress().to_string();
1444  }
1445  else
1446  {
1447  // Peer is no longer available so the relay ends
1448  JLOG(p_journal_.info()) << "Unable to relay peer shard info";
1449  }
1450  }
1451 
1452  JLOG(p_journal_.trace())
1453  << "Consumed TMPeerShardInfoV2 originating from public key "
1454  << toBase58(TokenType::NodePublic, publicKey) << " finalized shards["
1455  << ripple::to_string(shardInfo.finalized()) << "] incomplete shards["
1456  << (shardInfo.incomplete().empty() ? "empty"
1457  : shardInfo.incompleteToString())
1458  << "]";
1459 
1460  // Consume the message
1461  {
1463  auto const it{shardInfos_.find(publicKey_)};
1464  if (it == shardInfos_.end())
1465  shardInfos_.emplace(publicKey, std::move(shardInfo));
1466  else if (shardInfo.msgTimestamp() > it->second.msgTimestamp())
1467  it->second = std::move(shardInfo);
1468  }
1469 
1470  // Notify overlay a reply was received from the last peer in this chain
1471  if (peerChainSz == 0)
1473 }
1474 
1475 void
1477 {
1478  // Don't allow endpoints from peers that are not known tracking or are
1479  // not using a version of the message that we support:
1480  if (tracking_.load() != Tracking::converged || m->version() != 2)
1481  return;
1482 
1484  endpoints.reserve(m->endpoints_v2().size());
1485 
1486  for (auto const& tm : m->endpoints_v2())
1487  {
1488  auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1489  if (!result)
1490  {
1491  JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1492  << tm.endpoint() << "}";
1493  continue;
1494  }
1495 
1496  // If hops == 0, this Endpoint describes the peer we are connected
1497  // to -- in that case, we take the remote address seen on the
1498  // socket and store that in the IP::Endpoint. If this is the first
1499  // time, then we'll verify that their listener can receive incoming
1500  // by performing a connectivity test. if hops > 0, then we just
1501  // take the address/port we were given
1502 
1503  endpoints.emplace_back(
1504  tm.hops() > 0 ? *result : remote_address_.at_port(result->port()),
1505  tm.hops());
1506  }
1507 
1508  if (!endpoints.empty())
1509  overlay_.peerFinder().on_endpoints(slot_, endpoints);
1510 }
1511 
1512 void
1514 {
1515  handleTransaction(m, true);
1516 }
1517 
1518 void
1521  bool eraseTxQueue)
1522 {
1524  return;
1525 
1527  {
1528  // If we've never been in synch, there's nothing we can do
1529  // with a transaction
1530  JLOG(p_journal_.debug()) << "Ignoring incoming transaction: "
1531  << "Need network ledger";
1532  return;
1533  }
1534 
1535  SerialIter sit(makeSlice(m->rawtransaction()));
1536 
1537  try
1538  {
1539  auto stx = std::make_shared<STTx const>(sit);
1540  uint256 txID = stx->getTransactionID();
1541 
1542  int flags;
1543  constexpr std::chrono::seconds tx_interval = 10s;
1544 
1545  if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1546  {
1547  // we have seen this transaction recently
1548  if (flags & SF_BAD)
1549  {
1551  JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1552  }
1553 
1554  // Erase only if the server has seen this tx. If the server has not
1555  // seen this tx then the tx could not has been queued for this peer.
1556  else if (eraseTxQueue && txReduceRelayEnabled())
1557  removeTxQueue(txID);
1558 
1559  return;
1560  }
1561 
1562  JLOG(p_journal_.debug()) << "Got tx " << txID;
1563 
1564  bool checkSignature = true;
1565  if (cluster())
1566  {
1567  if (!m->has_deferred() || !m->deferred())
1568  {
1569  // Skip local checks if a server we trust
1570  // put the transaction in its open ledger
1571  flags |= SF_TRUSTED;
1572  }
1573 
1575  {
1576  // For now, be paranoid and have each validator
1577  // check each transaction, regardless of source
1578  checkSignature = false;
1579  }
1580  }
1581 
1584  {
1586  JLOG(p_journal_.info()) << "Transaction queue is full";
1587  }
1588  else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
1589  {
1590  JLOG(p_journal_.trace())
1591  << "No new transactions until synchronized";
1592  }
1593  else
1594  {
1596  jtTRANSACTION,
1597  "recvTransaction->checkTransaction",
1599  flags,
1600  checkSignature,
1601  stx](Job&) {
1602  if (auto peer = weak.lock())
1603  peer->checkTransaction(flags, checkSignature, stx);
1604  });
1605  }
1606  }
1607  catch (std::exception const&)
1608  {
1609  JLOG(p_journal_.warn())
1610  << "Transaction invalid: " << strHex(m->rawtransaction());
1611  }
1612 }
1613 
1614 void
1616 {
1617  auto badData = [&](std::string const& msg) {
1619  JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1620  };
1621  auto const itype{m->itype()};
1622 
1623  // Verify ledger info type
1624  if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1625  return badData("Invalid ledger info type");
1626 
1627  auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1628  if (m->has_ltype())
1629  return m->ltype();
1630  return std::nullopt;
1631  }();
1632 
1633  if (itype == protocol::liTS_CANDIDATE)
1634  {
1635  if (!m->has_ledgerhash())
1636  return badData("Invalid TX candidate set, missing TX set hash");
1637  }
1638  else if (
1639  !m->has_ledgerhash() && !m->has_ledgerseq() &&
1640  !(ltype && *ltype == protocol::ltCLOSED))
1641  {
1642  return badData("Invalid request");
1643  }
1644 
1645  // Verify ledger type
1646  if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1647  return badData("Invalid ledger type");
1648 
1649  // Verify ledger hash
1650  if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1651  return badData("Invalid ledger hash");
1652 
1653  // Verify ledger sequence
1654  if (m->has_ledgerseq())
1655  {
1656  auto const ledgerSeq{m->ledgerseq()};
1657  // Verifying the network's earliest ledger only pertains to shards.
1658  if (app_.getShardStore() &&
1659  ledgerSeq < app_.getNodeStore().earliestLedgerSeq())
1660  {
1661  return badData(
1662  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1663  }
1664 
1665  // Check if within a reasonable range
1666  using namespace std::chrono_literals;
1667  if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1668  ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1669  {
1670  return badData(
1671  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1672  }
1673  }
1674 
1675  // Verify ledger node IDs
1676  if (itype != protocol::liBASE)
1677  {
1678  if (m->nodeids_size() <= 0)
1679  return badData("Invalid ledger node IDs");
1680 
1681  for (auto const& nodeId : m->nodeids())
1682  {
1683  if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
1684  return badData("Invalid SHAMap node ID");
1685  }
1686  }
1687 
1688  // Verify query type
1689  if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1690  return badData("Invalid query type");
1691 
1692  // Verify query depth
1693  if (m->has_querydepth())
1694  {
1695  if (m->querydepth() > Tuning::maxQueryDepth ||
1696  itype == protocol::liBASE)
1697  {
1698  return badData("Invalid query depth");
1699  }
1700  }
1701 
1702  // Queue a job to process the request
1704  app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m](Job&) {
1705  if (auto peer = weak.lock())
1706  peer->processLedgerRequest(m);
1707  });
1708 }
1709 
1710 void
1712 {
1713  JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1714  if (!ledgerReplayEnabled_)
1715  {
1717  return;
1718  }
1719 
1723  jtREPLAY_REQ, "recvProofPathRequest", [weak, m](Job&) {
1724  if (auto peer = weak.lock())
1725  {
1726  auto reply =
1727  peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1728  if (reply.has_error())
1729  {
1730  if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1731  peer->charge(Resource::feeInvalidRequest);
1732  else
1733  peer->charge(Resource::feeRequestNoReply);
1734  }
1735  else
1736  {
1737  peer->send(std::make_shared<Message>(
1738  reply, protocol::mtPROOF_PATH_RESPONSE));
1739  }
1740  }
1741  });
1742 }
1743 
1744 void
1746 {
1747  if (!ledgerReplayEnabled_)
1748  {
1750  return;
1751  }
1752 
1754  {
1756  }
1757 }
1758 
1759 void
1761 {
1762  JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1763  if (!ledgerReplayEnabled_)
1764  {
1766  return;
1767  }
1768 
1772  jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m](Job&) {
1773  if (auto peer = weak.lock())
1774  {
1775  auto reply =
1776  peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1777  if (reply.has_error())
1778  {
1779  if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1780  peer->charge(Resource::feeInvalidRequest);
1781  else
1782  peer->charge(Resource::feeRequestNoReply);
1783  }
1784  else
1785  {
1786  peer->send(std::make_shared<Message>(
1787  reply, protocol::mtREPLAY_DELTA_RESPONSE));
1788  }
1789  }
1790  });
1791 }
1792 
1793 void
1795 {
1796  if (!ledgerReplayEnabled_)
1797  {
1799  return;
1800  }
1801 
1803  {
1805  }
1806 }
1807 
1808 void
1810 {
1811  auto badData = [&](std::string const& msg) {
1813  JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1814  };
1815 
1816  // Verify ledger hash
1817  if (!stringIsUint256Sized(m->ledgerhash()))
1818  return badData("Invalid ledger hash");
1819 
1820  // Verify ledger sequence
1821  {
1822  auto const ledgerSeq{m->ledgerseq()};
1823  if (m->type() == protocol::liTS_CANDIDATE)
1824  {
1825  if (ledgerSeq != 0)
1826  {
1827  return badData(
1828  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1829  }
1830  }
1831  else
1832  {
1833  // Verifying the network's earliest ledger only pertains to shards.
1834  if (app_.getShardStore() &&
1835  ledgerSeq < app_.getNodeStore().earliestLedgerSeq())
1836  {
1837  return badData(
1838  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1839  }
1840 
1841  // Check if within a reasonable range
1842  using namespace std::chrono_literals;
1843  if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1844  ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1845  {
1846  return badData(
1847  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1848  }
1849  }
1850  }
1851 
1852  // Verify ledger info type
1853  if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1854  return badData("Invalid ledger info type");
1855 
1856  // Verify reply error
1857  if (m->has_error() &&
1858  (m->error() < protocol::reNO_LEDGER ||
1859  m->error() > protocol::reBAD_REQUEST))
1860  {
1861  return badData("Invalid reply error");
1862  }
1863 
1864  // Verify ledger nodes.
1865  if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1866  {
1867  return badData(
1868  "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1869  }
1870 
1871  // If there is a request cookie, attempt to relay the message
1872  if (m->has_requestcookie())
1873  {
1874  if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1875  {
1876  m->clear_requestcookie();
1877  peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1878  }
1879  else
1880  {
1881  JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1882  }
1883  return;
1884  }
1885 
1886  uint256 const ledgerHash{m->ledgerhash()};
1887 
1888  // Otherwise check if received data for a candidate transaction set
1889  if (m->type() == protocol::liTS_CANDIDATE)
1890  {
1893  jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m](Job&) {
1894  if (auto peer = weak.lock())
1895  {
1896  peer->app_.getInboundTransactions().gotData(
1897  ledgerHash, peer, m);
1898  }
1899  });
1900  return;
1901  }
1902 
1903  // Consume the message
1905 }
1906 
1907 void
1909 {
1910  protocol::TMProposeSet& set = *m;
1911 
1912  auto const sig = makeSlice(set.signature());
1913 
1914  // Preliminary check for the validity of the signature: A DER encoded
1915  // signature can't be longer than 72 bytes.
1916  if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1917  (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1918  {
1919  JLOG(p_journal_.warn()) << "Proposal: malformed";
1921  return;
1922  }
1923 
1924  if (!stringIsUint256Sized(set.currenttxhash()) ||
1925  !stringIsUint256Sized(set.previousledger()))
1926  {
1927  JLOG(p_journal_.warn()) << "Proposal: malformed";
1929  return;
1930  }
1931 
1932  uint256 const proposeHash{set.currenttxhash()};
1933  uint256 const prevLedger{set.previousledger()};
1934 
1935  PublicKey const publicKey{makeSlice(set.nodepubkey())};
1936  NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1937 
1938  uint256 const suppression = proposalUniqueId(
1939  proposeHash,
1940  prevLedger,
1941  set.proposeseq(),
1942  closeTime,
1943  publicKey.slice(),
1944  sig);
1945 
1946  if (auto [added, relayed] =
1948  !added)
1949  {
1950  // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1951  // receives within IDLED seconds since the message has been relayed.
1952  if (reduceRelayReady() && relayed &&
1953  (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1955  suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1956  JLOG(p_journal_.trace()) << "Proposal: duplicate";
1957  return;
1958  }
1959 
1960  auto const isTrusted = app_.validators().trusted(publicKey);
1961 
1962  if (!isTrusted)
1963  {
1965  {
1966  JLOG(p_journal_.debug())
1967  << "Proposal: Dropping untrusted (peer divergence)";
1968  return;
1969  }
1970 
1971  if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1972  {
1973  JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1974  return;
1975  }
1976  }
1977 
1978  JLOG(p_journal_.trace())
1979  << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1980 
1981  auto proposal = RCLCxPeerPos(
1982  publicKey,
1983  sig,
1984  suppression,
1986  prevLedger,
1987  set.proposeseq(),
1988  proposeHash,
1989  closeTime,
1992 
1995  isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1996  "recvPropose->checkPropose",
1997  [weak, m, proposal](Job& job) {
1998  if (auto peer = weak.lock())
1999  peer->checkPropose(job, m, proposal);
2000  });
2001 }
2002 
2003 void
2005 {
2006  JLOG(p_journal_.trace()) << "Status: Change";
2007 
2008  if (!m->has_networktime())
2009  m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2010 
2011  {
2013  if (!last_status_.has_newstatus() || m->has_newstatus())
2014  last_status_ = *m;
2015  else
2016  {
2017  // preserve old status
2018  protocol::NodeStatus status = last_status_.newstatus();
2019  last_status_ = *m;
2020  m->set_newstatus(status);
2021  }
2022  }
2023 
2024  if (m->newevent() == protocol::neLOST_SYNC)
2025  {
2026  bool outOfSync{false};
2027  {
2028  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
2029  // guarded by recentLock_.
2031  if (!closedLedgerHash_.isZero())
2032  {
2033  outOfSync = true;
2035  }
2037  }
2038  if (outOfSync)
2039  {
2040  JLOG(p_journal_.debug()) << "Status: Out of sync";
2041  }
2042  return;
2043  }
2044 
2045  {
2046  uint256 closedLedgerHash{};
2047  bool const peerChangedLedgers{
2048  m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
2049 
2050  {
2051  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
2052  // guarded by recentLock_.
2054  if (peerChangedLedgers)
2055  {
2056  closedLedgerHash_ = m->ledgerhash();
2057  closedLedgerHash = closedLedgerHash_;
2058  addLedger(closedLedgerHash, sl);
2059  }
2060  else
2061  {
2063  }
2064 
2065  if (m->has_ledgerhashprevious() &&
2066  stringIsUint256Sized(m->ledgerhashprevious()))
2067  {
2068  previousLedgerHash_ = m->ledgerhashprevious();
2070  }
2071  else
2072  {
2074  }
2075  }
2076  if (peerChangedLedgers)
2077  {
2078  JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
2079  }
2080  else
2081  {
2082  JLOG(p_journal_.debug()) << "Status: No ledger";
2083  }
2084  }
2085 
2086  if (m->has_firstseq() && m->has_lastseq())
2087  {
2089 
2090  minLedger_ = m->firstseq();
2091  maxLedger_ = m->lastseq();
2092 
2093  if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
2094  minLedger_ = maxLedger_ = 0;
2095  }
2096 
2097  if (m->has_ledgerseq() &&
2099  {
2100  checkTracking(
2101  m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
2102  }
2103 
2104  app_.getOPs().pubPeerStatus([=]() -> Json::Value {
2106 
2107  if (m->has_newstatus())
2108  {
2109  switch (m->newstatus())
2110  {
2111  case protocol::nsCONNECTING:
2112  j[jss::status] = "CONNECTING";
2113  break;
2114  case protocol::nsCONNECTED:
2115  j[jss::status] = "CONNECTED";
2116  break;
2117  case protocol::nsMONITORING:
2118  j[jss::status] = "MONITORING";
2119  break;
2120  case protocol::nsVALIDATING:
2121  j[jss::status] = "VALIDATING";
2122  break;
2123  case protocol::nsSHUTTING:
2124  j[jss::status] = "SHUTTING";
2125  break;
2126  }
2127  }
2128 
2129  if (m->has_newevent())
2130  {
2131  switch (m->newevent())
2132  {
2133  case protocol::neCLOSING_LEDGER:
2134  j[jss::action] = "CLOSING_LEDGER";
2135  break;
2136  case protocol::neACCEPTED_LEDGER:
2137  j[jss::action] = "ACCEPTED_LEDGER";
2138  break;
2139  case protocol::neSWITCHED_LEDGER:
2140  j[jss::action] = "SWITCHED_LEDGER";
2141  break;
2142  case protocol::neLOST_SYNC:
2143  j[jss::action] = "LOST_SYNC";
2144  break;
2145  }
2146  }
2147 
2148  if (m->has_ledgerseq())
2149  {
2150  j[jss::ledger_index] = m->ledgerseq();
2151  }
2152 
2153  if (m->has_ledgerhash())
2154  {
2155  uint256 closedLedgerHash{};
2156  {
2157  std::lock_guard sl(recentLock_);
2158  closedLedgerHash = closedLedgerHash_;
2159  }
2160  j[jss::ledger_hash] = to_string(closedLedgerHash);
2161  }
2162 
2163  if (m->has_networktime())
2164  {
2165  j[jss::date] = Json::UInt(m->networktime());
2166  }
2167 
2168  if (m->has_firstseq() && m->has_lastseq())
2169  {
2170  j[jss::ledger_index_min] = Json::UInt(m->firstseq());
2171  j[jss::ledger_index_max] = Json::UInt(m->lastseq());
2172  }
2173 
2174  return j;
2175  });
2176 }
2177 
2178 void
2179 PeerImp::checkTracking(std::uint32_t validationSeq)
2180 {
2181  std::uint32_t serverSeq;
2182  {
2183  // Extract the sequence number of the highest
2184  // ledger this peer has
2185  std::lock_guard sl(recentLock_);
2186 
2187  serverSeq = maxLedger_;
2188  }
2189  if (serverSeq != 0)
2190  {
2191  // Compare the peer's ledger sequence to the
2192  // sequence of a recently-validated ledger
2193  checkTracking(serverSeq, validationSeq);
2194  }
2195 }
2196 
2197 void
2198 PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
2199 {
2200  int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
2201 
2202  if (diff < Tuning::convergedLedgerLimit)
2203  {
2204  // The peer's ledger sequence is close to the validation's
2205  tracking_ = Tracking::converged;
2206  }
2207 
2208  if ((diff > Tuning::divergedLedgerLimit) &&
2209  (tracking_.load() != Tracking::diverged))
2210  {
2211  // The peer's ledger sequence is way off the validation's
2212  std::lock_guard sl(recentLock_);
2213 
2214  tracking_ = Tracking::diverged;
2215  trackingTime_ = clock_type::now();
2216  }
2217 }
2218 
2219 void
2221 {
2222  if (!stringIsUint256Sized(m->hash()))
2223  {
2224  fee_ = Resource::feeInvalidRequest;
2225  return;
2226  }
2227 
2228  uint256 const hash{m->hash()};
2229 
2230  if (m->status() == protocol::tsHAVE)
2231  {
2232  std::lock_guard sl(recentLock_);
2233 
2234  if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
2235  recentTxSets_.end())
2236  {
2237  fee_ = Resource::feeUnwantedData;
2238  return;
2239  }
2240 
2241  recentTxSets_.push_back(hash);
2242  }
2243 }
2244 
2245 void
2246 PeerImp::onValidatorListMessage(
2247  std::string const& messageType,
2248  std::string const& manifest,
2249  std::uint32_t version,
2250  std::vector<ValidatorBlobInfo> const& blobs)
2251 {
2252  // If there are no blobs, the message is malformed (possibly because of
2253  // ValidatorList class rules), so charge accordingly and skip processing.
2254  if (blobs.empty())
2255  {
2256  JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
2257  << " from peer " << remote_address_;
2258  // This shouldn't ever happen with a well-behaved peer
2259  fee_ = Resource::feeHighBurdenPeer;
2260  return;
2261  }
2262 
2263  auto const hash = sha512Half(manifest, blobs, version);
2264 
2265  JLOG(p_journal_.debug())
2266  << "Received " << messageType << " from " << remote_address_.to_string()
2267  << " (" << id_ << ")";
2268 
2269  if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
2270  {
2271  JLOG(p_journal_.debug())
2272  << messageType << ": received duplicate " << messageType;
2273  // Charging this fee here won't hurt the peer in the normal
2274  // course of operation (ie. refresh every 5 minutes), but
2275  // will add up if the peer is misbehaving.
2276  fee_ = Resource::feeUnwantedData;
2277  return;
2278  }
2279 
2280  auto const applyResult = app_.validators().applyListsAndBroadcast(
2281  manifest,
2282  version,
2283  blobs,
2284  remote_address_.to_string(),
2285  hash,
2286  app_.overlay(),
2287  app_.getHashRouter(),
2288  app_.getOPs());
2289 
2290  JLOG(p_journal_.debug())
2291  << "Processed " << messageType << " version " << version << " from "
2292  << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
2293  : "unknown or invalid publisher")
2294  << " from " << remote_address_.to_string() << " (" << id_
2295  << ") with best result " << to_string(applyResult.bestDisposition());
2296 
2297  // Act based on the best result
2298  switch (applyResult.bestDisposition())
2299  {
2300  // New list
2301  case ListDisposition::accepted:
2302  // Newest list is expired, and that needs to be broadcast, too
2303  case ListDisposition::expired:
2304  // Future list
2305  case ListDisposition::pending: {
2306  std::lock_guard<std::mutex> sl(recentLock_);
2307 
2308  assert(applyResult.publisherKey);
2309  auto const& pubKey = *applyResult.publisherKey;
2310 #ifndef NDEBUG
2311  if (auto const iter = publisherListSequences_.find(pubKey);
2312  iter != publisherListSequences_.end())
2313  {
2314  assert(iter->second < applyResult.sequence);
2315  }
2316 #endif
2317  publisherListSequences_[pubKey] = applyResult.sequence;
2318  }
2319  break;
2320  case ListDisposition::same_sequence:
2321  case ListDisposition::known_sequence:
2322 #ifndef NDEBUG
2323  {
2324  std::lock_guard<std::mutex> sl(recentLock_);
2325  assert(applyResult.sequence && applyResult.publisherKey);
2326  assert(
2327  publisherListSequences_[*applyResult.publisherKey] <=
2328  applyResult.sequence);
2329  }
2330 #endif // !NDEBUG
2331 
2332  break;
2333  case ListDisposition::stale:
2334  case ListDisposition::untrusted:
2335  case ListDisposition::invalid:
2336  case ListDisposition::unsupported_version:
2337  break;
2338  default:
2339  assert(false);
2340  }
2341 
2342  // Charge based on the worst result
2343  switch (applyResult.worstDisposition())
2344  {
2345  case ListDisposition::accepted:
2346  case ListDisposition::expired:
2347  case ListDisposition::pending:
2348  // No charges for good data
2349  break;
2350  case ListDisposition::same_sequence:
2351  case ListDisposition::known_sequence:
2352  // Charging this fee here won't hurt the peer in the normal
2353  // course of operation (ie. refresh every 5 minutes), but
2354  // will add up if the peer is misbehaving.
2355  fee_ = Resource::feeUnwantedData;
2356  break;
2357  case ListDisposition::stale:
2358  // There are very few good reasons for a peer to send an
2359  // old list, particularly more than once.
2360  fee_ = Resource::feeBadData;
2361  break;
2362  case ListDisposition::untrusted:
2363  // Charging this fee here won't hurt the peer in the normal
2364  // course of operation (ie. refresh every 5 minutes), but
2365  // will add up if the peer is misbehaving.
2366  fee_ = Resource::feeUnwantedData;
2367  break;
2368  case ListDisposition::invalid:
2369  // This shouldn't ever happen with a well-behaved peer
2370  fee_ = Resource::feeInvalidSignature;
2371  break;
2372  case ListDisposition::unsupported_version:
2373  // During a version transition, this may be legitimate.
2374  // If it happens frequently, that's probably bad.
2375  fee_ = Resource::feeBadData;
2376  break;
2377  default:
2378  assert(false);
2379  }
2380 
2381  // Log based on all the results.
2382  for (auto const& [disp, count] : applyResult.dispositions)
2383  {
2384  switch (disp)
2385  {
2386  // New list
2387  case ListDisposition::accepted:
2388  JLOG(p_journal_.debug())
2389  << "Applied " << count << " new " << messageType
2390  << "(s) from peer " << remote_address_;
2391  break;
2392  // Newest list is expired, and that needs to be broadcast, too
2393  case ListDisposition::expired:
2394  JLOG(p_journal_.debug())
2395  << "Applied " << count << " expired " << messageType
2396  << "(s) from peer " << remote_address_;
2397  break;
2398  // Future list
2399  case ListDisposition::pending:
2400  JLOG(p_journal_.debug())
2401  << "Processed " << count << " future " << messageType
2402  << "(s) from peer " << remote_address_;
2403  break;
2404  case ListDisposition::same_sequence:
2405  JLOG(p_journal_.warn())
2406  << "Ignored " << count << " " << messageType
2407  << "(s) with current sequence from peer "
2408  << remote_address_;
2409  break;
2410  case ListDisposition::known_sequence:
2411  JLOG(p_journal_.warn())
2412  << "Ignored " << count << " " << messageType
2413  << "(s) with future sequence from peer " << remote_address_;
2414  break;
2415  case ListDisposition::stale:
2416  JLOG(p_journal_.warn())
2417  << "Ignored " << count << "stale " << messageType
2418  << "(s) from peer " << remote_address_;
2419  break;
2420  case ListDisposition::untrusted:
2421  JLOG(p_journal_.warn())
2422  << "Ignored " << count << " untrusted " << messageType
2423  << "(s) from peer " << remote_address_;
2424  break;
2425  case ListDisposition::unsupported_version:
2426  JLOG(p_journal_.warn())
2427  << "Ignored " << count << "unsupported version "
2428  << messageType << "(s) from peer " << remote_address_;
2429  break;
2430  case ListDisposition::invalid:
2431  JLOG(p_journal_.warn())
2432  << "Ignored " << count << "invalid " << messageType
2433  << "(s) from peer " << remote_address_;
2434  break;
2435  default:
2436  assert(false);
2437  }
2438  }
2439 }
2440 
2441 void
2443 {
2444  try
2445  {
2446  if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2447  {
2448  JLOG(p_journal_.debug())
2449  << "ValidatorList: received validator list from peer using "
2450  << "protocol version " << to_string(protocol_)
2451  << " which shouldn't support this feature.";
2452  fee_ = Resource::feeUnwantedData;
2453  return;
2454  }
2455  onValidatorListMessage(
2456  "ValidatorList",
2457  m->manifest(),
2458  m->version(),
2459  ValidatorList::parseBlobs(*m));
2460  }
2461  catch (std::exception const& e)
2462  {
2463  JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2464  << " from peer " << remote_address_;
2465  fee_ = Resource::feeBadData;
2466  }
2467 }
2468 
2469 void
2470 PeerImp::onMessage(
2472 {
2473  try
2474  {
2475  if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2476  {
2477  JLOG(p_journal_.debug())
2478  << "ValidatorListCollection: received validator list from peer "
2479  << "using protocol version " << to_string(protocol_)
2480  << " which shouldn't support this feature.";
2481  fee_ = Resource::feeUnwantedData;
2482  return;
2483  }
2484  else if (m->version() < 2)
2485  {
2486  JLOG(p_journal_.debug())
2487  << "ValidatorListCollection: received invalid validator list "
2488  "version "
2489  << m->version() << " from peer using protocol version "
2490  << to_string(protocol_);
2491  fee_ = Resource::feeBadData;
2492  return;
2493  }
2494  onValidatorListMessage(
2495  "ValidatorListCollection",
2496  m->manifest(),
2497  m->version(),
2498  ValidatorList::parseBlobs(*m));
2499  }
2500  catch (std::exception const& e)
2501  {
2502  JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2503  << e.what() << " from peer " << remote_address_;
2504  fee_ = Resource::feeBadData;
2505  }
2506 }
2507 
2508 void
2509 PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
2510 {
2511  if (m->validation().size() < 50)
2512  {
2513  JLOG(p_journal_.warn()) << "Validation: Too small";
2514  fee_ = Resource::feeInvalidRequest;
2515  return;
2516  }
2517 
2518  try
2519  {
2520  auto const closeTime = app_.timeKeeper().closeTime();
2521 
2523  {
2524  SerialIter sit(makeSlice(m->validation()));
2525  val = std::make_shared<STValidation>(
2526  std::ref(sit),
2527  [this](PublicKey const& pk) {
2528  return calcNodeID(
2529  app_.validatorManifests().getMasterKey(pk));
2530  },
2531  false);
2532  val->setSeen(closeTime);
2533  }
2534 
2535  if (!isCurrent(
2536  app_.getValidations().parms(),
2537  app_.timeKeeper().closeTime(),
2538  val->getSignTime(),
2539  val->getSeenTime()))
2540  {
2541  JLOG(p_journal_.trace()) << "Validation: Not current";
2542  fee_ = Resource::feeUnwantedData;
2543  return;
2544  }
2545 
2546  auto key = sha512Half(makeSlice(m->validation()));
2547  if (auto [added, relayed] =
2548  app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2549  !added)
2550  {
2551  // Count unique messages (Slots has it's own 'HashRouter'), which a
2552  // peer receives within IDLED seconds since the message has been
2553  // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2554  // connections to peers.
2555  if (reduceRelayReady() && relayed &&
2556  (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2557  overlay_.updateSlotAndSquelch(
2558  key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2559  JLOG(p_journal_.trace()) << "Validation: duplicate";
2560  return;
2561  }
2562 
2563  auto const isTrusted =
2564  app_.validators().trusted(val->getSignerPublic());
2565 
2566  if (!isTrusted && (tracking_.load() == Tracking::diverged))
2567  {
2568  JLOG(p_journal_.debug())
2569  << "Validation: dropping untrusted from diverged peer";
2570  }
2571  if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal())
2572  {
2573  std::weak_ptr<PeerImp> weak = shared_from_this();
2574  app_.getJobQueue().addJob(
2575  isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2576  "recvValidation->checkValidation",
2577  [weak, val, m](Job&) {
2578  if (auto peer = weak.lock())
2579  peer->checkValidation(val, m);
2580  });
2581  }
2582  else
2583  {
2584  JLOG(p_journal_.debug()) << "Validation: Dropping UNTRUSTED (load)";
2585  }
2586  }
2587  catch (std::exception const& e)
2588  {
2589  JLOG(p_journal_.warn())
2590  << "Exception processing validation: " << e.what();
2591  fee_ = Resource::feeInvalidRequest;
2592  }
2593 }
2594 
2595 void
2597 {
2598  protocol::TMGetObjectByHash& packet = *m;
2599 
2600  JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
2601  << " " << packet.objects_size();
2602 
2603  if (packet.query())
2604  {
2605  // this is a query
2606  if (send_queue_.size() >= Tuning::dropSendQueue)
2607  {
2608  JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2609  return;
2610  }
2611 
2612  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2613  {
2614  doFetchPack(m);
2615  return;
2616  }
2617 
2618  if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2619  {
2620  if (!txReduceRelayEnabled())
2621  {
2622  JLOG(p_journal_.error())
2623  << "TMGetObjectByHash: tx reduce-relay is disabled";
2624  fee_ = Resource::feeInvalidRequest;
2625  return;
2626  }
2627 
2628  std::weak_ptr<PeerImp> weak = shared_from_this();
2629  app_.getJobQueue().addJob(
2630  jtREQUESTED_TXN, "doTransactions", [weak, m](Job&) {
2631  if (auto peer = weak.lock())
2632  peer->doTransactions(m);
2633  });
2634  return;
2635  }
2636 
2637  fee_ = Resource::feeMediumBurdenPeer;
2638 
2639  protocol::TMGetObjectByHash reply;
2640 
2641  reply.set_query(false);
2642 
2643  if (packet.has_seq())
2644  reply.set_seq(packet.seq());
2645 
2646  reply.set_type(packet.type());
2647 
2648  if (packet.has_ledgerhash())
2649  {
2650  if (!stringIsUint256Sized(packet.ledgerhash()))
2651  {
2652  fee_ = Resource::feeInvalidRequest;
2653  return;
2654  }
2655 
2656  reply.set_ledgerhash(packet.ledgerhash());
2657  }
2658 
2659  // This is a very minimal implementation
2660  for (int i = 0; i < packet.objects_size(); ++i)
2661  {
2662  auto const& obj = packet.objects(i);
2663  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2664  {
2665  uint256 const hash{obj.hash()};
2666  // VFALCO TODO Move this someplace more sensible so we dont
2667  // need to inject the NodeStore interfaces.
2668  std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2669  auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2670  if (!nodeObject)
2671  {
2672  if (auto shardStore = app_.getShardStore())
2673  {
2674  if (seq >= shardStore->earliestLedgerSeq())
2675  nodeObject = shardStore->fetchNodeObject(hash, seq);
2676  }
2677  }
2678  if (nodeObject)
2679  {
2680  protocol::TMIndexedObject& newObj = *reply.add_objects();
2681  newObj.set_hash(hash.begin(), hash.size());
2682  newObj.set_data(
2683  &nodeObject->getData().front(),
2684  nodeObject->getData().size());
2685 
2686  if (obj.has_nodeid())
2687  newObj.set_index(obj.nodeid());
2688  if (obj.has_ledgerseq())
2689  newObj.set_ledgerseq(obj.ledgerseq());
2690 
2691  // VFALCO NOTE "seq" in the message is obsolete
2692  }
2693  }
2694  }
2695 
2696  JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2697  << packet.objects_size();
2698  send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2699  }
2700  else
2701  {
2702  // this is a reply
2703  std::uint32_t pLSeq = 0;
2704  bool pLDo = true;
2705  bool progress = false;
2706 
2707  for (int i = 0; i < packet.objects_size(); ++i)
2708  {
2709  const protocol::TMIndexedObject& obj = packet.objects(i);
2710 
2711  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2712  {
2713  if (obj.has_ledgerseq())
2714  {
2715  if (obj.ledgerseq() != pLSeq)
2716  {
2717  if (pLDo && (pLSeq != 0))
2718  {
2719  JLOG(p_journal_.debug())
2720  << "GetObj: Full fetch pack for " << pLSeq;
2721  }
2722  pLSeq = obj.ledgerseq();
2723  pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2724 
2725  if (!pLDo)
2726  {
2727  JLOG(p_journal_.debug())
2728  << "GetObj: Late fetch pack for " << pLSeq;
2729  }
2730  else
2731  progress = true;
2732  }
2733  }
2734 
2735  if (pLDo)
2736  {
2737  uint256 const hash{obj.hash()};
2738 
2739  app_.getLedgerMaster().addFetchPack(
2740  hash,
2741  std::make_shared<Blob>(
2742  obj.data().begin(), obj.data().end()));
2743  }
2744  }
2745  }
2746 
2747  if (pLDo && (pLSeq != 0))
2748  {
2749  JLOG(p_journal_.debug())
2750  << "GetObj: Partial fetch pack for " << pLSeq;
2751  }
2752  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2753  app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2754  }
2755 }
2756 
2757 void
2759 {
2760  if (!txReduceRelayEnabled())
2761  {
2762  JLOG(p_journal_.error())
2763  << "TMHaveTransactions: tx reduce-relay is disabled";
2764  fee_ = Resource::feeInvalidRequest;
2765  return;
2766  }
2767 
2768  std::weak_ptr<PeerImp> weak = shared_from_this();
2769  app_.getJobQueue().addJob(
2770  jtMISSING_TXN, "handleHaveTransactions", [weak, m](Job&) {
2771  if (auto peer = weak.lock())
2772  peer->handleHaveTransactions(m);
2773  });
2774 }
2775 
2776 void
2777 PeerImp::handleHaveTransactions(
2779 {
2780  protocol::TMGetObjectByHash tmBH;
2781  tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2782  tmBH.set_query(true);
2783 
2784  JLOG(p_journal_.trace())
2785  << "received TMHaveTransactions " << m->hashes_size();
2786 
2787  for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2788  {
2789  if (!stringIsUint256Sized(m->hashes(i)))
2790  {
2791  JLOG(p_journal_.error())
2792  << "TMHaveTransactions with invalid hash size";
2793  fee_ = Resource::feeInvalidRequest;
2794  return;
2795  }
2796 
2797  uint256 hash(m->hashes(i));
2798 
2799  auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2800 
2801  JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2802 
2803  if (!txn)
2804  {
2805  JLOG(p_journal_.debug()) << "adding transaction to request";
2806 
2807  auto obj = tmBH.add_objects();
2808  obj->set_hash(hash.data(), hash.size());
2809  }
2810  else
2811  {
2812  // Erase only if a peer has seen this tx. If the peer has not
2813  // seen this tx then the tx could not has been queued for this
2814  // peer.
2815  removeTxQueue(hash);
2816  }
2817  }
2818 
2819  JLOG(p_journal_.trace())
2820  << "transaction request object is " << tmBH.objects_size();
2821 
2822  if (tmBH.objects_size() > 0)
2823  send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2824 }
2825 
2826 void
2827 PeerImp::onMessage(std::shared_ptr<protocol::TMTransactions> const& m)
2828 {
2829  if (!txReduceRelayEnabled())
2830  {
2831  JLOG(p_journal_.error())
2832  << "TMTransactions: tx reduce-relay is disabled";
2833  fee_ = Resource::feeInvalidRequest;
2834  return;
2835  }
2836 
2837  JLOG(p_journal_.trace())
2838  << "received TMTransactions " << m->transactions_size();
2839 
2840  overlay_.addTxMetrics(m->transactions_size());
2841 
2842  for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2843  handleTransaction(
2845  m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2846  false);
2847 }
2848 
2849 void
2850 PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2851 {
2852  using on_message_fn =
2854  if (!strand_.running_in_this_thread())
2855  return post(
2856  strand_,
2857  std::bind(
2858  (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2859 
2860  if (!m->has_validatorpubkey())
2861  {
2862  charge(Resource::feeBadData);
2863  return;
2864  }
2865  auto validator = m->validatorpubkey();
2866  auto const slice{makeSlice(validator)};
2867  if (!publicKeyType(slice))
2868  {
2869  charge(Resource::feeBadData);
2870  return;
2871  }
2872  PublicKey key(slice);
2873 
2874  // Ignore non-validator squelch
2875  if (!app_.validators().listed(key))
2876  {
2877  charge(Resource::feeBadData);
2878  JLOG(p_journal_.debug())
2879  << "onMessage: TMSquelch discarding non-validator squelch "
2880  << slice;
2881  return;
2882  }
2883 
2884  // Ignore the squelch for validator's own messages.
2885  if (key == app_.getValidationPublicKey())
2886  {
2887  JLOG(p_journal_.debug())
2888  << "onMessage: TMSquelch discarding validator's squelch " << slice;
2889  return;
2890  }
2891 
2892  std::uint32_t duration =
2893  m->has_squelchduration() ? m->squelchduration() : 0;
2894  if (!m->squelch())
2895  squelch_.removeSquelch(key);
2896  else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2897  charge(Resource::feeBadData);
2898 
2899  JLOG(p_journal_.debug())
2900  << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2901 }
2902 
2903 //--------------------------------------------------------------------------
2904 
2905 void
2906 PeerImp::addLedger(
2907  uint256 const& hash,
2908  std::lock_guard<std::mutex> const& lockedRecentLock)
2909 {
2910  // lockedRecentLock is passed as a reminder that recentLock_ must be
2911  // locked by the caller.
2912  (void)lockedRecentLock;
2913 
2914  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2915  recentLedgers_.end())
2916  return;
2917 
2918  recentLedgers_.push_back(hash);
2919 }
2920 
2921 void
2922 PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2923 {
2924  // VFALCO TODO Invert this dependency using an observer and shared state
2925  // object. Don't queue fetch pack jobs if we're under load or we already
2926  // have some queued.
2927  if (app_.getFeeTrack().isLoadedLocal() ||
2928  (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2929  (app_.getJobQueue().getJobCount(jtPACK) > 10))
2930  {
2931  JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2932  return;
2933  }
2934 
2935  if (!stringIsUint256Sized(packet->ledgerhash()))
2936  {
2937  JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2938  fee_ = Resource::feeInvalidRequest;
2939  return;
2940  }
2941 
2942  fee_ = Resource::feeHighBurdenPeer;
2943 
2944  uint256 const hash{packet->ledgerhash()};
2945 
2946  std::weak_ptr<PeerImp> weak = shared_from_this();
2947  auto elapsed = UptimeClock::now();
2948  auto const pap = &app_;
2949  app_.getJobQueue().addJob(
2950  jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed](Job&) {
2951  pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2952  });
2953 }
2954 
2955 void
2956 PeerImp::doTransactions(
2958 {
2959  protocol::TMTransactions reply;
2960 
2961  JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
2962  << packet->objects_size();
2963 
2964  if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2965  {
2966  JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2967  fee_ = Resource::feeInvalidRequest;
2968  return;
2969  }
2970 
2971  for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2972  {
2973  auto const& obj = packet->objects(i);
2974 
2975  if (!stringIsUint256Sized(obj.hash()))
2976  {
2977  fee_ = Resource::feeInvalidRequest;
2978  return;
2979  }
2980 
2981  uint256 hash(obj.hash());
2982 
2983  auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2984 
2985  if (!txn)
2986  {
2987  JLOG(p_journal_.error()) << "doTransactions, transaction not found "
2988  << Slice(hash.data(), hash.size());
2989  fee_ = Resource::feeInvalidRequest;
2990  return;
2991  }
2992 
2993  Serializer s;
2994  auto tx = reply.add_transactions();
2995  auto sttx = txn->getSTransaction();
2996  sttx->add(s);
2997  tx->set_rawtransaction(s.data(), s.size());
2998  tx->set_status(
2999  txn->getStatus() == INCLUDED ? protocol::tsCURRENT
3000  : protocol::tsNEW);
3001  tx->set_receivetimestamp(
3002  app_.timeKeeper().now().time_since_epoch().count());
3003  tx->set_deferred(txn->getSubmitResult().queued);
3004  }
3005 
3006  if (reply.transactions_size() > 0)
3007  send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
3008 }
3009 
3010 void
3011 PeerImp::checkTransaction(
3012  int flags,
3013  bool checkSignature,
3014  std::shared_ptr<STTx const> const& stx)
3015 {
3016  // VFALCO TODO Rewrite to not use exceptions
3017  try
3018  {
3019  // Expired?
3020  if (stx->isFieldPresent(sfLastLedgerSequence) &&
3021  (stx->getFieldU32(sfLastLedgerSequence) <
3022  app_.getLedgerMaster().getValidLedgerIndex()))
3023  {
3024  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
3025  charge(Resource::feeUnwantedData);
3026  return;
3027  }
3028 
3029  if (checkSignature)
3030  {
3031  // Check the signature before handing off to the job queue.
3032  if (auto [valid, validReason] = checkValidity(
3033  app_.getHashRouter(),
3034  *stx,
3035  app_.getLedgerMaster().getValidatedRules(),
3036  app_.config());
3037  valid != Validity::Valid)
3038  {
3039  if (!validReason.empty())
3040  {
3041  JLOG(p_journal_.trace())
3042  << "Exception checking transaction: " << validReason;
3043  }
3044 
3045  // Probably not necessary to set SF_BAD, but doesn't hurt.
3046  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
3047  charge(Resource::feeInvalidSignature);
3048  return;
3049  }
3050  }
3051  else
3052  {
3053  forceValidity(
3054  app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
3055  }
3056 
3057  std::string reason;
3058  auto tx = std::make_shared<Transaction>(stx, reason, app_);
3059 
3060  if (tx->getStatus() == INVALID)
3061  {
3062  if (!reason.empty())
3063  {
3064  JLOG(p_journal_.trace())
3065  << "Exception checking transaction: " << reason;
3066  }
3067  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
3068  charge(Resource::feeInvalidSignature);
3069  return;
3070  }
3071 
3072  bool const trusted(flags & SF_TRUSTED);
3073  app_.getOPs().processTransaction(
3074  tx, trusted, false, NetworkOPs::FailHard::no);
3075  }
3076  catch (std::exception const&)
3077  {
3078  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
3079  charge(Resource::feeBadData);
3080  }
3081 }
3082 
3083 // Called from our JobQueue
3084 void
3085 PeerImp::checkPropose(
3086  Job& job,
3088  RCLCxPeerPos peerPos)
3089 {
3090  bool isTrusted = (job.getType() == jtPROPOSAL_t);
3091 
3092  JLOG(p_journal_.trace())
3093  << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
3094 
3095  assert(packet);
3096 
3097  if (!cluster() && !peerPos.checkSign())
3098  {
3099  JLOG(p_journal_.warn()) << "Proposal fails sig check";
3100  charge(Resource::feeInvalidSignature);
3101  return;
3102  }
3103 
3104  bool relay;
3105 
3106  if (isTrusted)
3107  relay = app_.getOPs().processTrustedProposal(peerPos);
3108  else
3109  relay = app_.config().RELAY_UNTRUSTED_PROPOSALS || cluster();
3110 
3111  if (relay)
3112  {
3113  // haveMessage contains peers, which are suppressed; i.e. the peers
3114  // are the source of the message, consequently the message should
3115  // not be relayed to these peers. But the message must be counted
3116  // as part of the squelch logic.
3117  auto haveMessage = app_.overlay().relay(
3118  *packet, peerPos.suppressionID(), peerPos.publicKey());
3119  if (reduceRelayReady() && !haveMessage.empty())
3120  overlay_.updateSlotAndSquelch(
3121  peerPos.suppressionID(),
3122  peerPos.publicKey(),
3123  std::move(haveMessage),
3124  protocol::mtPROPOSE_LEDGER);
3125  }
3126 }
3127 
3128 void
3129 PeerImp::checkValidation(
3130  std::shared_ptr<STValidation> const& val,
3132 {
3133  if (!cluster() && !val->isValid())
3134  {
3135  JLOG(p_journal_.debug()) << "Validation forwarded by peer is invalid";
3136  charge(Resource::feeInvalidRequest);
3137  return;
3138  }
3139 
3140  // FIXME it should be safe to remove this try/catch. Investigate codepaths.
3141  try
3142  {
3143  if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
3144  cluster())
3145  {
3146  auto const suppression =
3147  sha512Half(makeSlice(val->getSerialized()));
3148  // haveMessage contains peers, which are suppressed; i.e. the peers
3149  // are the source of the message, consequently the message should
3150  // not be relayed to these peers. But the message must be counted
3151  // as part of the squelch logic.
3152  auto haveMessage =
3153  overlay_.relay(*packet, suppression, val->getSignerPublic());
3154  if (reduceRelayReady() && !haveMessage.empty())
3155  {
3156  overlay_.updateSlotAndSquelch(
3157  suppression,
3158  val->getSignerPublic(),
3159  std::move(haveMessage),
3160  protocol::mtVALIDATION);
3161  }
3162  }
3163  }
3164  catch (std::exception const&)
3165  {
3166  JLOG(p_journal_.trace()) << "Exception processing validation";
3167  charge(Resource::feeInvalidRequest);
3168  }
3169 }
3170 
3171 // Returns the set of peers that can help us get
3172 // the TX tree with the specified root hash.
3173 //
3175 getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
3176 {
3178  int retScore = 0;
3179 
3180  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
3181  if (p->hasTxSet(rootHash) && p.get() != skip)
3182  {
3183  auto score = p->getScore(true);
3184  if (!ret || (score > retScore))
3185  {
3186  ret = std::move(p);
3187  retScore = score;
3188  }
3189  }
3190  });
3191 
3192  return ret;
3193 }
3194 
3195 // Returns a random peer weighted by how likely to
3196 // have the ledger and how responsive it is.
3197 //
3200  OverlayImpl& ov,
3201  uint256 const& ledgerHash,
3202  LedgerIndex ledger,
3203  PeerImp const* skip)
3204 {
3206  int retScore = 0;
3207 
3208  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
3209  if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3210  {
3211  auto score = p->getScore(true);
3212  if (!ret || (score > retScore))
3213  {
3214  ret = std::move(p);
3215  retScore = score;
3216  }
3217  }
3218  });
3219 
3220  return ret;
3221 }
3222 
3223 void
3224 PeerImp::sendLedgerBase(
3225  std::shared_ptr<Ledger const> const& ledger,
3226  protocol::TMLedgerData& ledgerData)
3227 {
3228  JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
3229 
3230  Serializer s(sizeof(LedgerInfo));
3231  addRaw(ledger->info(), s);
3232  ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
3233 
3234  auto const& stateMap{ledger->stateMap()};
3235  if (stateMap.getHash() != beast::zero)
3236  {
3237  // Return account state root node if possible
3238  Serializer root(768);
3239 
3240  stateMap.serializeRoot(root);
3241  ledgerData.add_nodes()->set_nodedata(
3242  root.getDataPtr(), root.getLength());
3243 
3244  if (ledger->info().txHash != beast::zero)
3245  {
3246  auto const& txMap{ledger->txMap()};
3247  if (txMap.getHash() != beast::zero)
3248  {
3249  // Return TX root node if possible
3250  root.erase();
3251  txMap.serializeRoot(root);
3252  ledgerData.add_nodes()->set_nodedata(
3253  root.getDataPtr(), root.getLength());
3254  }
3255  }
3256  }
3257 
3258  auto message{
3259  std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3260  send(message);
3261 }
3262 
3264 PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
3265 {
3266  JLOG(p_journal_.trace()) << "getLedger: Ledger";
3267 
3269 
3270  if (m->has_ledgerhash())
3271  {
3272  // Attempt to find ledger by hash
3273  uint256 const ledgerHash{m->ledgerhash()};
3274  ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3275  if (!ledger)
3276  {
3277  if (m->has_ledgerseq())
3278  {
3279  // Attempt to find ledger by sequence in the shard store
3280  if (auto shards = app_.getShardStore())
3281  {
3282  if (m->ledgerseq() >= shards->earliestLedgerSeq())
3283  {
3284  ledger =
3285  shards->fetchLedger(ledgerHash, m->ledgerseq());
3286  }
3287  }
3288  }
3289 
3290  if (!ledger)
3291  {
3292  JLOG(p_journal_.trace())
3293  << "getLedger: Don't have ledger with hash " << ledgerHash;
3294 
3295  if (m->has_querytype() && !m->has_requestcookie())
3296  {
3297  // Attempt to relay the request to a peer
3298  if (auto const peer = getPeerWithLedger(
3299  overlay_,
3300  ledgerHash,
3301  m->has_ledgerseq() ? m->ledgerseq() : 0,
3302  this))
3303  {
3304  m->set_requestcookie(id());
3305  peer->send(std::make_shared<Message>(
3306  *m, protocol::mtGET_LEDGER));
3307  JLOG(p_journal_.debug())
3308  << "getLedger: Request relayed to peer";
3309  return ledger;
3310  }
3311 
3312  JLOG(p_journal_.trace())
3313  << "getLedger: Failed to find peer to relay request";
3314  }
3315  }
3316  }
3317  }
3318  else if (m->has_ledgerseq())
3319  {
3320  // Attempt to find ledger by sequence
3321  if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3322  {
3323  JLOG(p_journal_.debug())
3324  << "getLedger: Early ledger sequence request";
3325  }
3326  else
3327  {
3328  ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3329  if (!ledger)
3330  {
3331  JLOG(p_journal_.debug())
3332  << "getLedger: Don't have ledger with sequence "
3333  << m->ledgerseq();
3334  }
3335  }
3336  }
3337  else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3338  {
3339  ledger = app_.getLedgerMaster().getClosedLedger();
3340  }
3341 
3342  if (ledger)
3343  {
3344  // Validate retrieved ledger sequence
3345  auto const ledgerSeq{ledger->info().seq};
3346  if (m->has_ledgerseq())
3347  {
3348  if (ledgerSeq != m->ledgerseq())
3349  {
3350  // Do not resource charge a peer responding to a relay
3351  if (!m->has_requestcookie())
3352  charge(Resource::feeInvalidRequest);
3353 
3354  ledger.reset();
3355  JLOG(p_journal_.warn())
3356  << "getLedger: Invalid ledger sequence " << ledgerSeq;
3357  }
3358  }
3359  else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3360  {
3361  ledger.reset();
3362  JLOG(p_journal_.debug())
3363  << "getLedger: Early ledger sequence request " << ledgerSeq;
3364  }
3365  }
3366  else
3367  {
3368  JLOG(p_journal_.warn()) << "getLedger: Unable to find ledger";
3369  }
3370 
3371  return ledger;
3372 }
3373 
3375 PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3376 {
3377  JLOG(p_journal_.trace()) << "getTxSet: TX set";
3378 
3379  uint256 const txSetHash{m->ledgerhash()};
3380  std::shared_ptr<SHAMap> shaMap{
3381  app_.getInboundTransactions().getSet(txSetHash, false)};
3382  if (!shaMap)
3383  {
3384  if (m->has_querytype() && !m->has_requestcookie())
3385  {
3386  // Attempt to relay the request to a peer
3387  if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3388  {
3389  m->set_requestcookie(id());
3390  peer->send(
3391  std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3392  JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3393  }
3394  else
3395  {
3396  JLOG(p_journal_.debug())
3397  << "getTxSet: Failed to find relay peer";
3398  }
3399  }
3400  else
3401  {
3402  JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3403  }
3404  }
3405 
3406  return shaMap;
3407 }
3408 
3409 void
3410 PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3411 {
3412  // Do not resource charge a peer responding to a relay
3413  if (!m->has_requestcookie())
3414  charge(Resource::feeMediumBurdenPeer);
3415 
3418  SHAMap const* map{nullptr};
3419  protocol::TMLedgerData ledgerData;
3420  bool fatLeaves{true};
3421  auto const itype{m->itype()};
3422 
3423  if (itype == protocol::liTS_CANDIDATE)
3424  {
3425  if (sharedMap = getTxSet(m); !sharedMap)
3426  return;
3427  map = sharedMap.get();
3428 
3429  // Fill out the reply
3430  ledgerData.set_ledgerseq(0);
3431  ledgerData.set_ledgerhash(m->ledgerhash());
3432  ledgerData.set_type(protocol::liTS_CANDIDATE);
3433  if (m->has_requestcookie())
3434  ledgerData.set_requestcookie(m->requestcookie());
3435 
3436  // We'll already have most transactions
3437  fatLeaves = false;
3438  }
3439  else
3440  {
3441  if (send_queue_.size() >= Tuning::dropSendQueue)
3442  {
3443  JLOG(p_journal_.debug())
3444  << "processLedgerRequest: Large send queue";
3445  return;
3446  }
3447  if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3448  {
3449  JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3450  return;
3451  }
3452 
3453  if (ledger = getLedger(m); !ledger)
3454  return;
3455 
3456  // Fill out the reply
3457  auto const ledgerHash{ledger->info().hash};
3458  ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3459  ledgerData.set_ledgerseq(ledger->info().seq);
3460  ledgerData.set_type(itype);
3461  if (m->has_requestcookie())
3462  ledgerData.set_requestcookie(m->requestcookie());
3463 
3464  switch (itype)
3465  {
3466  case protocol::liBASE:
3467  sendLedgerBase(ledger, ledgerData);
3468  return;
3469 
3470  case protocol::liTX_NODE:
3471  map = &ledger->txMap();
3472  JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash "
3473  << to_string(map->getHash());
3474  break;
3475 
3476  case protocol::liAS_NODE:
3477  map = &ledger->stateMap();
3478  JLOG(p_journal_.trace())
3479  << "processLedgerRequest: Account state map hash "
3480  << to_string(map->getHash());
3481  break;
3482 
3483  default:
3484  // This case should not be possible here
3485  JLOG(p_journal_.error())
3486  << "processLedgerRequest: Invalid ledger info type";
3487  return;
3488  }
3489  }
3490 
3491  if (!map)
3492  {
3493  JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3494  return;
3495  }
3496 
3497  // Add requested node data to reply
3498  if (m->nodeids_size() > 0)
3499  {
3500  auto const queryDepth{
3501  m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3502  std::vector<SHAMapNodeID> nodeIds;
3503  std::vector<Blob> rawNodes;
3504 
3505  for (int i = 0; i < m->nodeids_size() &&
3506  ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3507  ++i)
3508  {
3509  auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3510 
3511  nodeIds.clear();
3512  rawNodes.clear();
3513  try
3514  {
3515  if (map->getNodeFat(
3516  *shaMapNodeId,
3517  nodeIds,
3518  rawNodes,
3519  fatLeaves,
3520  queryDepth))
3521  {
3522  assert(nodeIds.size() == rawNodes.size());
3523  JLOG(p_journal_.trace())
3524  << "processLedgerRequest: getNodeFat got "
3525  << rawNodes.size() << " nodes";
3526 
3527  auto rawNodeIter{rawNodes.begin()};
3528  for (auto const& nodeId : nodeIds)
3529  {
3530  protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3531  node->set_nodeid(nodeId.getRawString());
3532  node->set_nodedata(
3533  &rawNodeIter->front(), rawNodeIter->size());
3534  ++rawNodeIter;
3535  }
3536  }
3537  else
3538  {
3539  JLOG(p_journal_.warn())
3540  << "processLedgerRequest: getNodeFat returns false";
3541  }
3542  }
3543  catch (std::exception& e)
3544  {
3545  std::string info;
3546  switch (itype)
3547  {
3548  case protocol::liBASE:
3549  // This case should not be possible here
3550  info = "Ledger base";
3551  break;
3552 
3553  case protocol::liTX_NODE:
3554  info = "TX node";
3555  break;
3556 
3557  case protocol::liAS_NODE:
3558  info = "AS node";
3559  break;
3560 
3561  case protocol::liTS_CANDIDATE:
3562  info = "TS candidate";
3563  break;
3564 
3565  default:
3566  info = "Invalid";
3567  break;
3568  }
3569 
3570  if (!m->has_ledgerhash())
3571  info += ", no hash specified";
3572 
3573  JLOG(p_journal_.error())
3574  << "processLedgerRequest: getNodeFat with nodeId "
3575  << *shaMapNodeId << " and ledger info type " << info
3576  << " throws exception: " << e.what();
3577  }
3578  }
3579 
3580  JLOG(p_journal_.info())
3581  << "processLedgerRequest: Got request for " << m->nodeids_size()
3582  << " nodes at depth " << queryDepth << ", return "
3583  << ledgerData.nodes_size() << " nodes";
3584  }
3585 
3586  auto message{
3587  std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3588  send(message);
3589 }
3590 
3591 int
3592 PeerImp::getScore(bool haveItem) const
3593 {
3594  // Random component of score, used to break ties and avoid
3595  // overloading the "best" peer
3596  static const int spRandomMax = 9999;
3597 
3598  // Score for being very likely to have the thing we are
3599  // look for; should be roughly spRandomMax
3600  static const int spHaveItem = 10000;
3601 
3602  // Score reduction for each millisecond of latency; should
3603  // be roughly spRandomMax divided by the maximum reasonable
3604  // latency
3605  static const int spLatency = 30;
3606 
3607  // Penalty for unknown latency; should be roughly spRandomMax
3608  static const int spNoLatency = 8000;
3609 
3610  int score = rand_int(spRandomMax);
3611 
3612  if (haveItem)
3613  score += spHaveItem;
3614 
3616  {
3617  std::lock_guard sl(recentLock_);
3618  latency = latency_;
3619  }
3620 
3621  if (latency)
3622  score -= latency->count() * spLatency;
3623  else
3624  score -= spNoLatency;
3625 
3626  return score;
3627 }
3628 
3629 bool
3630 PeerImp::isHighLatency() const
3631 {
3632  std::lock_guard sl(recentLock_);
3633  return latency_ >= peerHighLatency;
3634 }
3635 
3636 bool
3637 PeerImp::reduceRelayReady()
3638 {
3639  if (!reduceRelayReady_)
3640  reduceRelayReady_ =
3641  reduce_relay::epoch<std::chrono::minutes>(UptimeClock::now()) >
3642  reduce_relay::WAIT_ON_BOOTUP;
3643  return vpReduceRelayEnabled_ && reduceRelayReady_;
3644 }
3645 
3646 void
3647 PeerImp::Metrics::add_message(std::uint64_t bytes)
3648 {
3649  using namespace std::chrono_literals;
3650  std::unique_lock lock{mutex_};
3651 
3652  totalBytes_ += bytes;
3653  accumBytes_ += bytes;
3654  auto const timeElapsed = clock_type::now() - intervalStart_;
3655  auto const timeElapsedInSecs =
3656  std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3657 
3658  if (timeElapsedInSecs >= 1s)
3659  {
3660  auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3661  rollingAvg_.push_back(avgBytes);
3662 
3663  auto const totalBytes =
3664  std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3665  rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3666 
3667  intervalStart_ = clock_type::now();
3668  accumBytes_ = 0;
3669  }
3670 }
3671 
3673 PeerImp::Metrics::average_bytes() const
3674 {
3675  std::shared_lock lock{mutex_};
3676  return rollingAvgBytes_;
3677 }
3678 
3680 PeerImp::Metrics::total_bytes() const
3681 {
3682  std::shared_lock lock{mutex_};
3683  return totalBytes_;
3684 }
3685 
3686 } // namespace ripple
ripple::PublicKey::data
std::uint8_t const * data() const noexcept
Definition: PublicKey.h:81
ripple::PeerImp::latency_
std::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:114
ripple::PeerImp::ledgerRange
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:541
ripple::PeerImp::uptime
clock_type::duration uptime() const
Definition: PeerImp.h:352
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:115
ripple::ClusterNode
Definition: ClusterNode.h:30
ripple::jtTRANSACTION
@ jtTRANSACTION
Definition: Job.h:55
ripple::PeerImp::inbound_
const bool inbound_
Definition: PeerImp.h:91
ripple::TrafficCount::categorize
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
Definition: TrafficCount.cpp:25
sstream
ripple::PeerImp::recentLock_
std::mutex recentLock_
Definition: PeerImp.h:149
ripple::HashRouter::addSuppressionPeerWithStatus
std::pair< bool, std::optional< Stopwatch::time_point > > addSuppressionPeerWithStatus(uint256 const &key, PeerShortID peer)
Add a suppression peer and get message's relay status.
Definition: HashRouter.cpp:57
ripple::RCLCxPeerPos
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:42
std::weak_ptr::lock
T lock(T... args)
std::for_each
T for_each(T... args)
ripple::PeerImp::stream_ptr_
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:78
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::Application::cluster
virtual Cluster & cluster()=0
ripple::PeerImp::socket_
socket_type & socket_
Definition: PeerImp.h:79
std::bind
T bind(T... args)
ripple::PeerImp::trackingTime_
clock_type::time_point trackingTime_
Definition: PeerImp.h:97
ripple::ShardState::complete
@ complete
ripple::HashRouter::addSuppressionPeer
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:51
std::string
STL class.
ripple::Resource::feeMediumBurdenPeer
const Charge feeMediumBurdenPeer
std::shared_ptr
STL class.
ripple::PeerImp::addTxQueue
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition: PeerImp.cpp:314
ripple::PeerImp::onMessage
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1057
ripple::ManifestCache::getMasterKey
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: app/misc/impl/Manifest.cpp:296
ripple::Overlay::Setup::networkID
std::optional< std::uint32_t > networkID
Definition: Overlay.h:75
std::exception
STL class.
ripple::PeerImp::hasTxSet
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:550
ripple::calcNodeID
NodeID calcNodeID(PublicKey const &pk)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:299
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::PeerImp::strand_
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:81
ripple::PeerImp::recentLedgers_
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:111
ripple::PeerImp::ledgerReplayEnabled_
bool ledgerReplayEnabled_
Definition: PeerImp.h:181
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::PeerImp::request_
http_request_type request_
Definition: PeerImp.h:155
ripple::Resource::Gossip
Data format for exchanging consumption information across peers.
Definition: Gossip.h:29
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:44
ripple::PeerImp::~PeerImp
virtual ~PeerImp()
Definition: PeerImp.cpp:134
ripple::Serializer::erase
void erase()
Definition: Serializer.h:209
ripple::relayLimit
static constexpr std::uint32_t relayLimit
Definition: ripple/overlay/Peer.h:36
beast::IP::Endpoint::to_string
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:54
std::pair
ripple::jtMISSING_TXN
@ jtMISSING_TXN
Definition: Job.h:56
ripple::PeerImp::doAccept
void doAccept()
Definition: PeerImp.cpp:772
std::vector::reserve
T reserve(T... args)
ripple::OverlayImpl::updateSlotAndSquelch
void updateSlotAndSquelch(uint256 const &key, PublicKey const &validator, std::set< Peer::id_t > &&peers, protocol::MessageType type)
Updates message count for validator/peer.
Definition: OverlayImpl.cpp:1484
ripple::HashRouter::shouldProcess
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:78
ripple::PeerImp::handleTransaction
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue)
Called from onMessage(TMTransaction(s)).
Definition: PeerImp.cpp:1519
ripple::HashPrefix::manifest
@ manifest
Manifest.
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:214
ripple::OverlayImpl::endOfPeerChain
void endOfPeerChain(std::uint32_t id)
Called when the reply from the last peer in a peer chain is received.
Definition: OverlayImpl.cpp:774
ripple::addRaw
void addRaw(LedgerInfo const &info, Serializer &s, bool includeHash)
Definition: View.cpp:164
Json::UInt
unsigned int UInt
Definition: json_forwards.h:27
ripple::verify
bool verify(PublicKey const &publicKey, Slice const &m, Slice const &sig, bool mustBeFullyCanonical) noexcept
Verify a signature on a message.
Definition: PublicKey.cpp:268
ripple::PeerImp::doProtocolStart
void doProtocolStart()
Definition: PeerImp.cpp:855
std::vector
STL class.
std::find
T find(T... args)
std::string::size
T size(T... args)
ripple::PeerImp::recentTxSets_
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:112
ripple::PublicKey::empty
bool empty() const noexcept
Definition: PublicKey.h:117
ripple::PeerImp::sendTxQueue
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition: PeerImp.cpp:295
ripple::make_protocol
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
Definition: ProtocolVersion.h:40
ripple::PeerImp::txReduceRelayEnabled
bool txReduceRelayEnabled() const override
Definition: PeerImp.h:429
ripple::FEATURE_LEDGER_REPLAY
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition: Handshake.h:148
std::chrono::milliseconds
ripple::PeerImp::setTimer
void setTimer()
Definition: PeerImp.cpp:660
ripple::ProtocolFeature::LedgerReplay
@ LedgerReplay
ripple::OverlayImpl::incPeerDisconnectCharges
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:380
ripple::toBase58
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:29
beast::IP::Endpoint::address
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:76
ripple::PeerImp::getVersion
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:372
std::set::emplace
T emplace(T... args)
std::stringstream
STL class.
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::shared_ptr::get
T get(T... args)
std::lock_guard
STL class.
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerImp::close
void close()
Definition: PeerImp.cpp:578
ripple::PeerImp::charge
void charge(Resource::Charge const &fee) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:343
ripple::PeerImp::onMessageUnknown
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:1008
ripple::makeSharedValue
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:145
ripple::Cluster::member
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:39
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::stopwatch
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:88
std::setfill
T setfill(T... args)
ripple::PeerImp::journal_
const beast::Journal journal_
Definition: PeerImp.h:76
ripple::PeerImp::send
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:241
ripple::Application::timeKeeper
virtual TimeKeeper & timeKeeper()=0
ripple::OverlayImpl::setup
Setup const & setup() const
Definition: OverlayImpl.h:178
ripple::ProtocolFeature
ProtocolFeature
Definition: ripple/overlay/Peer.h:38
ripple::PeerImp::onTimer
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:695
ripple::ShardState
ShardState
Shard states.
Definition: nodestore/Types.h:60
ripple::Cluster::update
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:58
ripple::PeerImp::lastPingTime_
clock_type::time_point lastPingTime_
Definition: PeerImp.h:116
ripple::OverlayImpl::incJqTransOverflow
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:356
ripple::PeerImp
Definition: PeerImp.h:52
ripple::PeerFinder::Config::peerPrivate
bool peerPrivate
true if we want our IP address kept private.
Definition: PeerfinderManager.h:61
ripple::Config::MAX_TRANSACTIONS
int MAX_TRANSACTIONS
Definition: Config.h:208
ripple::PeerImp::previousLedgerHash_
uint256 previousLedgerHash_
Definition: PeerImp.h:109
ripple::FEATURE_VPRR
static constexpr char FEATURE_VPRR[]
Definition: Handshake.h:144
std::optional::reset
T reset(T... args)
ripple::PeerImp::txQueue_
hash_set< uint256 > txQueue_
Definition: PeerImp.h:175
ripple::base_uint::data
pointer data()
Definition: base_uint.h:115
algorithm
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::PeerImp::name_
std::string name_
Definition: PeerImp.h:101
ripple::PeerFinder::Manager::on_endpoints
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
ripple::forceValidity
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:89
std::vector::clear
T clear(T... args)
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::Application::getFeeTrack
virtual LoadFeeTrack & getFeeTrack()=0
ripple::base_uint< 256 >::size
constexpr static std::size_t size()
Definition: base_uint.h:498
ripple::ValidatorList::sendValidatorList
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
Definition: ValidatorList.cpp:749
ripple::getPeerWithLedger
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:3199
ripple::PeerImp::publicKey_
const PublicKey publicKey_
Definition: PeerImp.h:100
ripple::protocolMessageName
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
Definition: ProtocolMessage.h:62
ripple::Serializer::data
void const * data() const noexcept
Definition: Serializer.h:75
ripple::PeerImp::read_buffer_
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:154
ripple::PeerImp::error_code
boost::system::error_code error_code
Definition: PeerImp.h:62
ripple::JobQueue::getJobCount
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:109
ripple::FEATURE_TXRR
static constexpr char FEATURE_TXRR[]
Definition: Handshake.h:146
std::tie
T tie(T... args)
ripple::PeerImp::remote_address_
const beast::IP::Endpoint remote_address_
Definition: PeerImp.h:86
ripple::publicKeyType
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:203
ripple::jtTXN_DATA
@ jtTXN_DATA
Definition: Job.h:61
ripple::PeerFinder::Manager::on_closed
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
ripple::OverlayImpl::peerFinder
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:166
ripple::getPeerWithTree
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:3175
ripple::base_uint< 256 >
ripple::INCLUDED
@ INCLUDED
Definition: Transaction.h:48
ripple::LoadFeeTrack::isLoadedLocal
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:123
ripple::PeerImp::addLedger
void addLedger(uint256 const &hash, std::lock_guard< std::mutex > const &lockedRecentLock)
Definition: PeerImp.cpp:2906
ripple::Resource::feeInvalidSignature
const Charge feeInvalidSignature
ripple::OverlayImpl::onManifests
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Definition: OverlayImpl.cpp:625
ripple::Overlay::Setup::public_ip
beast::IP::Address public_ip
Definition: Overlay.h:72
std::enable_shared_from_this< PeerImp >::shared_from_this
T shared_from_this(T... args)
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NetworkOPs::isNeedNetworkLedger
virtual bool isNeedNetworkLedger()=0
ripple::Resource::drop
@ drop
Definition: Disposition.h:37
ripple::checkValidity
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
ripple::jtPROPOSAL_t
@ jtPROPOSAL_t
Definition: Job.h:66
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:511
ripple::OverlayImpl::resourceManager
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:172
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::LedgerReplayMsgHandler::processProofPathResponse
bool processProofPathResponse(std::shared_ptr< protocol::TMProofPathResponse > const &msg)
Process TMProofPathResponse.
Definition: LedgerReplayMsgHandler.cpp:105
ripple::PeerImp::gracefulClose
void gracefulClose()
Definition: PeerImp.cpp:639
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
std::atomic::load
T load(T... args)
ripple::Resource::feeBadData
const Charge feeBadData
ripple::PublicKey::size
std::size_t size() const noexcept
Definition: PublicKey.h:87
ripple::Serializer::getDataPtr
const void * getDataPtr() const
Definition: Serializer.h:189
ripple::Resource::Manager::importConsumers
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
ripple::PeerImp::closedLedgerHash_
uint256 closedLedgerHash_
Definition: PeerImp.h:108
ripple::PeerImp::detaching_
bool detaching_
Definition: PeerImp.h:98
ripple::PeerImp::onMessageEnd
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:1048
ripple::Application::config
virtual Config & config()=0
ripple::PeerImp::shardInfos_
hash_map< PublicKey, NodeStore::ShardInfo > shardInfos_
Definition: PeerImp.h:167
ripple::isCurrent
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:146
beast::Journal::active
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:301
ripple::PeerImp::stream_
stream_type & stream_
Definition: PeerImp.h:80
ripple::PeerImp::onWriteMessage
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:954
std::unique_lock
STL class.
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::InfoSub::Source::pubPeerStatus
virtual void pubPeerStatus(std::function< Json::Value(void)> const &)=0
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::Tuning::hardMaxReplyNodes
@ hardMaxReplyNodes
The hard cap on the number of ledger entries in a single reply.
Definition: overlay/impl/Tuning.h:42
ripple::jtVALIDATION_t
@ jtVALIDATION_t
Definition: Job.h:63
ripple::reduce_relay::IDLED
static constexpr auto IDLED
Definition: ReduceRelayCommon.h:39
ripple::PeerImp::hasRange
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:568
ripple::Resource::feeUnwantedData
const Charge feeUnwantedData
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::Resource::Gossip::items
std::vector< Item > items
Definition: Gossip.h:42
ripple::PeerImp::cycleStatus
void cycleStatus() override
Definition: PeerImp.cpp:558
ripple::set
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:313
ripple::PeerImp::app_
Application & app_
Definition: PeerImp.h:72
ripple::PeerImp::crawl
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:357
ripple::PeerImp::minLedger_
LedgerIndex minLedger_
Definition: PeerImp.h:106
ripple::FEATURE_COMPR
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:142
ripple::Serializer::slice
Slice slice() const noexcept
Definition: Serializer.h:63
ripple::PeerImp::ledgerReplayMsgHandler_
LedgerReplayMsgHandler ledgerReplayMsgHandler_
Definition: PeerImp.h:182
ripple::base64_decode
std::string base64_decode(std::string const &data)
Definition: base64.cpp:245
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::ShardState::finalized
@ finalized
ripple::PeerImp::hasLedger
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:515
ripple::PeerImp::Tracking::unknown
@ unknown
ripple::Resource::Consumer::balance
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:124
ripple::HashPrefix::proposal
@ proposal
proposal for signing
ripple::TimeKeeper::closeTime
virtual time_point closeTime() const =0
Returns the close time, in network time.
ripple::Job
Definition: Job.h:87
ripple::PeerImp::headers_
boost::beast::http::fields const & headers_
Definition: PeerImp.h:157
std::accumulate
T accumulate(T... args)
ripple::SerialIter
Definition: Serializer.h:310
ripple::PeerImp::metrics_
struct ripple::PeerImp::@13 metrics_
ripple::peerFeatureEnabled
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:199
ripple::PeerImp::reduceRelayReady
bool reduceRelayReady()
Definition: PeerImp.cpp:3637
std::uint32_t
ripple::PeerImp::send_queue_
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:158
ripple::PeerImp::slot_
const std::shared_ptr< PeerFinder::Slot > slot_
Definition: PeerImp.h:153
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:198
ripple::PeerImp::load_event_
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:161
ripple::ShardState::finalizing
@ finalizing
std::map
STL class.
ripple::PeerImp::protocol_
ProtocolVersion protocol_
Definition: PeerImp.h:94
ripple::Application::getValidationPublicKey
virtual PublicKey const & getValidationPublicKey() const =0
ripple::Cluster::size
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:50
std::nth_element
T nth_element(T... args)
memory
ripple::PeerImp::waitable_timer
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:69
ripple::jtPEER
@ jtPEER
Definition: Job.h:72
ripple::NodeStore::ShardInfo
Definition: ShardInfo.h:32
ripple::PeerImp::onShutdown
void onShutdown(error_code ec)
Definition: PeerImp.cpp:756
ripple::proposalUniqueId
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
Definition: RCLCxPeerPos.cpp:72
ripple::PeerImp::name
std::string name() const
Definition: PeerImp.cpp:838
ripple::Application::validators
virtual ValidatorList & validators()=0
ripple::KeyType::secp256k1
@ secp256k1
ripple::RCLCxPeerPos::publicKey
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:81
std::weak_ptr
STL class.
ripple::PeerImp::timer_
waitable_timer timer_
Definition: PeerImp.h:82
std::min
T min(T... args)
ripple::Serializer
Definition: Serializer.h:39
ripple::LedgerMaster::getValidatedLedgerAge
std::chrono::seconds getValidatedLedgerAge()
Definition: LedgerMaster.cpp:270
ripple::Resource::Gossip::Item
Describes a single consumer.
Definition: Gossip.h:34
ripple::OverlayImpl::deletePeer
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
Definition: OverlayImpl.cpp:1517
ripple::jtREQUESTED_TXN
@ jtREQUESTED_TXN
Definition: Job.h:57
ripple::PeerImp::Tracking::diverged
@ diverged
ripple::jtPACK
@ jtPACK
Definition: Job.h:42
ripple::PeerImp::gracefulClose_
bool gracefulClose_
Definition: PeerImp.h:159
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedgers::gotLedgerData
virtual bool gotLedgerData(LedgerHash const &ledgerHash, std::shared_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData >)=0
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::Application::validatorManifests
virtual ManifestCache & validatorManifests()=0
ripple::OverlayImpl::getManifestsMessage
std::shared_ptr< Message > getManifestsMessage()
Definition: OverlayImpl.cpp:1276
ripple::Serializer::size
std::size_t size() const noexcept
Definition: Serializer.h:69
ripple::send_if_not
send_if_not_pred< Predicate > send_if_not(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:107
ripple::ShardState::acquire
@ acquire
protocol
Definition: ValidatorList.h:38
ripple::jtVALIDATION_ut
@ jtVALIDATION_ut
Definition: Job.h:46
ripple::INVALID
@ INVALID
Definition: Transaction.h:47
ripple::reduce_relay::MAX_TX_QUEUE_SIZE
static constexpr std::size_t MAX_TX_QUEUE_SIZE
Definition: ReduceRelayCommon.h:55
ripple::ProtocolFeature::ValidatorList2Propagation
@ ValidatorList2Propagation
ripple::OverlayImpl::remove
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
Definition: OverlayImpl.cpp:462
ripple::PeerImp::squelch_
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:119
ripple::Config::TX_REDUCE_RELAY_METRICS
bool TX_REDUCE_RELAY_METRICS
Definition: Config.h:245
ripple::PeerImp::lastPingSeq_
std::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:115
ripple::base_uint::zero
void zero()
Definition: base_uint.h:521
std::vector::begin
T begin(T... args)
ripple::NodeStore::Database::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const noexcept
Calculates the shard index for a given ledger sequence.
Definition: Database.h:282
ripple::PeerFinder::Manager::config
virtual Config config()=0
Returns the configuration for the manager.
std
STL namespace.
ripple::Resource::Consumer::disconnect
bool disconnect()
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:117
beast::severities::kWarning
@ kWarning
Definition: Journal.h:37
ripple::NodeStore::Database::earliestShardIndex
std::uint32_t earliestShardIndex() const noexcept
Definition: Database.h:245
std::set::insert
T insert(T... args)
ripple::sha512Half
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:216
beast::IP::Endpoint::from_string
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:46
ripple::OverlayImpl::activate
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
Definition: OverlayImpl.cpp:594
ripple::OverlayImpl::onPeerDeactivate
void onPeerDeactivate(Peer::id_t id)
Definition: OverlayImpl.cpp:618
ripple::Tuning::readBufferBytes
constexpr std::size_t readBufferBytes
Size of buffer used to read from the socket.
Definition: overlay/impl/Tuning.h:65
ripple::Resource::Gossip::Item::address
beast::IP::Endpoint address
Definition: Gossip.h:39
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:208
ripple::Resource::Consumer
An endpoint that consumes resources.
Definition: Consumer.h:33
ripple::Resource::Charge
A consumption charge.
Definition: Charge.h:30
ripple::Resource::Gossip::Item::balance
int balance
Definition: Gossip.h:38
ripple::TimeKeeper::now
virtual time_point now() const override=0
Returns the estimate of wall time, in network time.
ripple::PeerImp::maxLedger_
LedgerIndex maxLedger_
Definition: PeerImp.h:107
ripple::PeerImp::run
virtual void run()
Definition: PeerImp.cpp:157
ripple::Tuning::targetSendQueue
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
Definition: overlay/impl/Tuning.h:52
ripple::LoadFeeTrack::setClusterFee
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:111
ripple::PeerImp::checkTracking
void checkTracking(std::uint32_t validationSeq)
Check if the peer is tracking.
Definition: PeerImp.cpp:2179
ripple::PeerImp::large_sendq_
int large_sendq_
Definition: PeerImp.h:160
ripple::PeerImp::domain
std::string domain() const
Definition: PeerImp.cpp:845
std::string::empty
T empty(T... args)
ripple::Resource::feeLightPeer
const Charge feeLightPeer
ripple::jtREPLAY_REQ
@ jtREPLAY_REQ
Definition: Job.h:49
ripple::jtPROPOSAL_ut
@ jtPROPOSAL_ut
Definition: Job.h:51
ripple::TokenType::NodePublic
@ NodePublic
ripple::PeerImp::last_status_
protocol::TMStatusChange last_status_
Definition: PeerImp.h:150
ripple::RCLCxPeerPos::suppressionID
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:88
ripple::PeerImp::supportsFeature
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:498
ripple::OverlayImpl::findPeerByPublicKey
std::shared_ptr< Peer > findPeerByPublicKey(PublicKey const &pubKey) override
Returns the peer with the matching public key, or null.
Definition: OverlayImpl.cpp:1209
std::optional
mutex
ripple::PeerImp::getPeerShardInfos
const hash_map< PublicKey, NodeStore::ShardInfo > getPeerShardInfos() const
Definition: PeerImp.cpp:632
ripple::PeerImp::onMessageBegin
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:1014
std::stringstream::str
T str(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
std::size_t
ripple::to_string
std::string to_string(Manifest const &m)
Format the specified manifest to a string for debugging purposes.
Definition: app/misc/impl/Manifest.cpp:38
ripple::PeerImp::json
Json::Value json() override
Definition: PeerImp.cpp:380
ripple::Cluster::for_each
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:84
ripple::PeerImp::compressionEnabled_
Compressed compressionEnabled_
Definition: PeerImp.h:170
ripple::Tuning::sendqIntervals
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
Definition: overlay/impl/Tuning.h:46
ripple::ProtocolFeature::ValidatorListPropagation
@ ValidatorListPropagation
beast::IP::Endpoint
A version-independent IP address and port combination.
Definition: IPEndpoint.h:38
ripple::OverlayImpl::incPeerDisconnect
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:368
ripple::OverlayImpl::addTxMetrics
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
Definition: OverlayImpl.h:449
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::LedgerInfo
Information about the notional ledger backing the view.
Definition: ReadView.h:84
ripple::strHex
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:45
std::set::end
T end(T... args)
ripple::PeerFinder::Manager::on_failure
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
ripple::Job::getType
JobType getType() const
Definition: Job.cpp:52
ripple::PeerImp::makePrefix
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:687
ripple::PeerImp::usage_
Resource::Consumer usage_
Definition: PeerImp.h:151
std::setw
T setw(T... args)
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const noexcept
Definition: Database.h:237
numeric
ripple::OverlayImpl
Definition: OverlayImpl.h:58
std::max
T max(T... args)
ripple::base_uint::parseHex
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:475
beast::IP::Endpoint::at_port
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:69
ripple::ValidatorList::trusted
bool trusted(PublicKey const &identity) const
Returns true if public key is trusted.
Definition: ValidatorList.cpp:1367
ripple::OverlayImpl::findPeerByShortID
std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id) const override
Returns the peer with the matching short id, or null.
Definition: OverlayImpl.cpp:1197
ripple::Serializer::getLength
int getLength() const
Definition: Serializer.h:199
ripple::OverlayImpl::reportTraffic
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
Definition: OverlayImpl.cpp:678
ripple::sfLastLedgerSequence
const SF_UINT32 sfLastLedgerSequence
ripple::JobQueue::makeLoadEvent
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:146
ripple::PeerImp::shardInfoMutex_
std::mutex shardInfoMutex_
Definition: PeerImp.h:168
ripple::Resource::Consumer::charge
Disposition charge(Charge const &fee)
Apply a load charge to the consumer.
Definition: Consumer.cpp:99
ripple::PeerImp::overlay_
OverlayImpl & overlay_
Definition: PeerImp.h:90
ripple::makeResponse
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:396
ripple::http_request_type
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handshake.h:47
std::unique_ptr< stream_type >
ripple::Tuning::sendQueueLogFreq
@ sendQueueLogFreq
How often to log send queue size.
Definition: overlay/impl/Tuning.h:55
ripple::PeerImp::tracking_
std::atomic< Tracking > tracking_
Definition: PeerImp.h:96
ripple::PeerImp::nameMutex_
boost::shared_mutex nameMutex_
Definition: PeerImp.h:102
ripple::PeerImp::cancelTimer
void cancelTimer()
Definition: PeerImp.cpp:678
ripple::invokeProtocolMessage
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
Definition: ProtocolMessage.h:343
std::unordered_map
STL class.
ripple::PeerImp::fee_
Resource::Charge fee_
Definition: PeerImp.h:152
ripple::stringIsUint256Sized
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:151
beast::IP::Endpoint::from_string_checked
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:35
ripple::ValidatorList::for_each_available
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
Definition: ValidatorList.cpp:1651
std::set
STL class.
ripple::PeerImp::stop
void stop() override
Definition: PeerImp.cpp:215
ripple::Tuning::maxQueryDepth
@ maxQueryDepth
The maximum number of levels to search.
Definition: overlay/impl/Tuning.h:61
ripple::Application::getHashRouter
virtual HashRouter & getHashRouter()=0
ripple::PeerImp::removeTxQueue
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition: PeerImp.cpp:331
ripple::PeerImp::Tracking::converged
@ converged
ripple::PeerImp::id_
const id_t id_
Definition: PeerImp.h:73
ripple::OverlayImpl::for_each
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:283
std::ref
T ref(T... args)
ripple::RCLCxPeerPos::checkSign
bool checkSign() const
Verify the signing hash of the proposal.
Definition: RCLCxPeerPos.cpp:55
std::exception::what
T what(T... args)
ripple::LedgerReplayMsgHandler::processReplayDeltaResponse
bool processReplayDeltaResponse(std::shared_ptr< protocol::TMReplayDeltaResponse > const &msg)
Process TMReplayDeltaResponse.
Definition: LedgerReplayMsgHandler.cpp:219
std::shared_lock
STL class.
ripple::PeerImp::fail
void fail(std::string const &reason)
Definition: PeerImp.cpp:600
ripple::PeerImp::cluster
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:366
ripple::ShardState::queued
@ queued
ripple::HashPrefix::shardInfo
@ shardInfo
shard info for signing
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::PeerImp::p_journal_
const beast::Journal p_journal_
Definition: PeerImp.h:77
ripple::Config::MAX_UNKNOWN_TIME
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:261
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:45
ripple::Config::MAX_DIVERGED_TIME
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:264
ripple::jtLEDGER_REQ
@ jtLEDGER_REQ
Definition: Job.h:50
ripple::PeerImp::onReadMessage
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:898
ripple::ConsensusProposal< NodeID, uint256, uint256 >
std::chrono::steady_clock::now
T now(T... args)