rippled
PeerImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/consensus/RCLValidations.h>
21 #include <ripple/app/ledger/InboundLedgers.h>
22 #include <ripple/app/ledger/InboundTransactions.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/ledger/TransactionMaster.h>
25 #include <ripple/app/misc/HashRouter.h>
26 #include <ripple/app/misc/LoadFeeTrack.h>
27 #include <ripple/app/misc/NetworkOPs.h>
28 #include <ripple/app/misc/Transaction.h>
29 #include <ripple/app/misc/ValidatorList.h>
30 #include <ripple/app/tx/apply.h>
31 #include <ripple/basics/UptimeClock.h>
32 #include <ripple/basics/base64.h>
33 #include <ripple/basics/random.h>
34 #include <ripple/basics/safe_cast.h>
35 #include <ripple/beast/core/LexicalCast.h>
36 #include <ripple/beast/core/SemanticVersion.h>
37 #include <ripple/nodestore/DatabaseShard.h>
38 #include <ripple/overlay/Cluster.h>
39 #include <ripple/overlay/impl/PeerImp.h>
40 #include <ripple/overlay/impl/Tuning.h>
41 #include <ripple/overlay/predicates.h>
42 #include <ripple/protocol/digest.h>
43 
44 #include <boost/algorithm/string.hpp>
45 #include <boost/algorithm/string/predicate.hpp>
46 #include <boost/beast/core/ostream.hpp>
47 
48 #include <algorithm>
49 #include <memory>
50 #include <mutex>
51 #include <numeric>
52 #include <sstream>
53 
54 using namespace std::chrono_literals;
55 
56 namespace ripple {
57 
58 namespace {
60 std::chrono::milliseconds constexpr peerHighLatency{300};
61 
63 std::chrono::seconds constexpr peerTimerInterval{60};
64 } // namespace
65 
66 PeerImp::PeerImp(
67  Application& app,
68  id_t id,
70  http_request_type&& request,
71  PublicKey const& publicKey,
73  Resource::Consumer consumer,
74  std::unique_ptr<stream_type>&& stream_ptr,
75  OverlayImpl& overlay)
76  : Child(overlay)
77  , app_(app)
78  , id_(id)
79  , sink_(app_.journal("Peer"), makePrefix(id))
80  , p_sink_(app_.journal("Protocol"), makePrefix(id))
81  , journal_(sink_)
82  , p_journal_(p_sink_)
83  , stream_ptr_(std::move(stream_ptr))
84  , socket_(stream_ptr_->next_layer().socket())
85  , stream_(*stream_ptr_)
86  , strand_(socket_.get_executor())
87  , timer_(waitable_timer{socket_.get_executor()})
88  , remote_address_(slot->remote_endpoint())
89  , overlay_(overlay)
90  , inbound_(true)
91  , protocol_(protocol)
92  , tracking_(Tracking::unknown)
93  , trackingTime_(clock_type::now())
94  , publicKey_(publicKey)
95  , lastPingTime_(clock_type::now())
96  , creationTime_(clock_type::now())
97  , squelch_(app_.journal("Squelch"))
98  , usage_(consumer)
100  , slot_(slot)
101  , request_(std::move(request))
102  , headers_(request_)
103  , compressionEnabled_(
105  headers_,
107  "lz4",
108  app_.config().COMPRESSION)
109  ? Compressed::On
110  : Compressed::Off)
111  , txReduceRelayEnabled_(peerFeatureEnabled(
112  headers_,
113  FEATURE_TXRR,
114  app_.config().TX_REDUCE_RELAY_ENABLE))
115  , vpReduceRelayEnabled_(peerFeatureEnabled(
116  headers_,
117  FEATURE_VPRR,
118  app_.config().VP_REDUCE_RELAY_ENABLE))
119  , ledgerReplayEnabled_(peerFeatureEnabled(
120  headers_,
122  app_.config().LEDGER_REPLAY))
123  , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
124 {
125  JLOG(journal_.info()) << "compression enabled "
126  << (compressionEnabled_ == Compressed::On)
127  << " vp reduce-relay enabled "
128  << vpReduceRelayEnabled_
129  << " tx reduce-relay enabled "
130  << txReduceRelayEnabled_ << " on " << remote_address_
131  << " " << id_;
132 }
133 
135 {
136  const bool inCluster{cluster()};
137 
142 
143  if (inCluster)
144  {
145  JLOG(journal_.warn()) << name() << " left cluster";
146  }
147 }
148 
149 // Helper function to check for valid uint256 values in protobuf buffers
150 static bool
152 {
153  return pBuffStr.size() == uint256::size();
154 }
155 
156 void
158 {
159  if (!strand_.running_in_this_thread())
160  return post(strand_, std::bind(&PeerImp::run, shared_from_this()));
161 
162  auto parseLedgerHash =
163  [](std::string const& value) -> std::optional<uint256> {
164  if (uint256 ret; ret.parseHex(value))
165  return ret;
166 
167  if (auto const s = base64_decode(value); s.size() == uint256::size())
168  return uint256{s};
169 
170  return std::nullopt;
171  };
172 
173  std::optional<uint256> closed;
174  std::optional<uint256> previous;
175 
176  if (auto const iter = headers_.find("Closed-Ledger");
177  iter != headers_.end())
178  {
179  closed = parseLedgerHash(iter->value().to_string());
180 
181  if (!closed)
182  fail("Malformed handshake data (1)");
183  }
184 
185  if (auto const iter = headers_.find("Previous-Ledger");
186  iter != headers_.end())
187  {
188  previous = parseLedgerHash(iter->value().to_string());
189 
190  if (!previous)
191  fail("Malformed handshake data (2)");
192  }
193 
194  if (previous && !closed)
195  fail("Malformed handshake data (3)");
196 
197  {
199  if (closed)
200  closedLedgerHash_ = *closed;
201  if (previous)
202  previousLedgerHash_ = *previous;
203  }
204 
205  if (inbound_)
206  doAccept();
207  else
208  doProtocolStart();
209 
210  // Anything else that needs to be done with the connection should be
211  // done in doProtocolStart
212 }
213 
214 void
216 {
217  if (!strand_.running_in_this_thread())
218  return post(strand_, std::bind(&PeerImp::stop, shared_from_this()));
219  if (socket_.is_open())
220  {
221  // The rationale for using different severity levels is that
222  // outbound connections are under our control and may be logged
223  // at a higher level, but inbound connections are more numerous and
224  // uncontrolled so to prevent log flooding the severity is reduced.
225  //
226  if (inbound_)
227  {
228  JLOG(journal_.debug()) << "Stop";
229  }
230  else
231  {
232  JLOG(journal_.info()) << "Stop";
233  }
234  }
235  close();
236 }
237 
238 //------------------------------------------------------------------------------
239 
240 void
242 {
243  if (!strand_.running_in_this_thread())
244  return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
245  if (gracefulClose_)
246  return;
247  if (detaching_)
248  return;
249 
250  auto validator = m->getValidatorKey();
251  if (validator && !squelch_.expireSquelch(*validator))
252  return;
253 
255  safe_cast<TrafficCount::category>(m->getCategory()),
256  false,
257  static_cast<int>(m->getBuffer(compressionEnabled_).size()));
258 
259  auto sendq_size = send_queue_.size();
260 
261  if (sendq_size < Tuning::targetSendQueue)
262  {
263  // To detect a peer that does not read from their
264  // side of the connection, we expect a peer to have
265  // a small senq periodically
266  large_sendq_ = 0;
267  }
268  else if (auto sink = journal_.debug();
269  sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
270  {
271  std::string const n = name();
272  sink << (n.empty() ? remote_address_.to_string() : n)
273  << " sendq: " << sendq_size;
274  }
275 
276  send_queue_.push(m);
277 
278  if (sendq_size != 0)
279  return;
280 
281  boost::asio::async_write(
282  stream_,
283  boost::asio::buffer(
284  send_queue_.front()->getBuffer(compressionEnabled_)),
285  bind_executor(
286  strand_,
287  std::bind(
290  std::placeholders::_1,
291  std::placeholders::_2)));
292 }
293 
294 void
296 {
297  if (!strand_.running_in_this_thread())
298  return post(
300 
301  if (!txQueue_.empty())
302  {
303  protocol::TMHaveTransactions ht;
304  std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
305  ht.add_hashes(hash.data(), hash.size());
306  });
307  JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
308  txQueue_.clear();
309  send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
310  }
311 }
312 
313 void
315 {
316  if (!strand_.running_in_this_thread())
317  return post(
319 
321  {
322  JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
323  sendTxQueue();
324  }
325 
326  txQueue_.insert(hash);
327  JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
328 }
329 
330 void
332 {
333  if (!strand_.running_in_this_thread())
334  return post(
335  strand_,
337 
338  auto removed = txQueue_.erase(hash);
339  JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
340 }
341 
342 void
344 {
345  if ((usage_.charge(fee) == Resource::drop) &&
346  usage_.disconnect(p_journal_) && strand_.running_in_this_thread())
347  {
348  // Sever the connection
350  fail("charge: Resources");
351  }
352 }
353 
354 //------------------------------------------------------------------------------
355 
356 bool
358 {
359  auto const iter = headers_.find("Crawl");
360  if (iter == headers_.end())
361  return false;
362  return boost::iequals(iter->value(), "public");
363 }
364 
365 bool
367 {
368  return static_cast<bool>(app_.cluster().member(publicKey_));
369 }
370 
373 {
374  if (inbound_)
375  return headers_["User-Agent"].to_string();
376  return headers_["Server"].to_string();
377 }
378 
381 {
383 
384  ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
385  ret[jss::address] = remote_address_.to_string();
386 
387  if (inbound_)
388  ret[jss::inbound] = true;
389 
390  if (cluster())
391  {
392  ret[jss::cluster] = true;
393 
394  if (auto const n = name(); !n.empty())
395  // Could move here if Json::Value supported moving from a string
396  ret[jss::name] = n;
397  }
398 
399  if (auto const d = domain(); !d.empty())
400  ret[jss::server_domain] = domain();
401 
402  if (auto const nid = headers_["Network-ID"].to_string(); !nid.empty())
403  ret[jss::network_id] = nid;
404 
405  ret[jss::load] = usage_.balance();
406 
407  if (auto const version = getVersion(); !version.empty())
408  ret[jss::version] = version;
409 
410  ret[jss::protocol] = to_string(protocol_);
411 
412  {
414  if (latency_)
415  ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
416  }
417 
418  ret[jss::uptime] = static_cast<Json::UInt>(
419  std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
420 
421  std::uint32_t minSeq, maxSeq;
422  ledgerRange(minSeq, maxSeq);
423 
424  if ((minSeq != 0) || (maxSeq != 0))
425  ret[jss::complete_ledgers] =
426  std::to_string(minSeq) + " - " + std::to_string(maxSeq);
427 
428  switch (tracking_.load())
429  {
430  case Tracking::diverged:
431  ret[jss::track] = "diverged";
432  break;
433 
434  case Tracking::unknown:
435  ret[jss::track] = "unknown";
436  break;
437 
438  case Tracking::converged:
439  // Nothing to do here
440  break;
441  }
442 
443  uint256 closedLedgerHash;
444  protocol::TMStatusChange last_status;
445  {
447  closedLedgerHash = closedLedgerHash_;
448  last_status = last_status_;
449  }
450 
451  if (closedLedgerHash != beast::zero)
452  ret[jss::ledger] = to_string(closedLedgerHash);
453 
454  if (last_status.has_newstatus())
455  {
456  switch (last_status.newstatus())
457  {
458  case protocol::nsCONNECTING:
459  ret[jss::status] = "connecting";
460  break;
461 
462  case protocol::nsCONNECTED:
463  ret[jss::status] = "connected";
464  break;
465 
466  case protocol::nsMONITORING:
467  ret[jss::status] = "monitoring";
468  break;
469 
470  case protocol::nsVALIDATING:
471  ret[jss::status] = "validating";
472  break;
473 
474  case protocol::nsSHUTTING:
475  ret[jss::status] = "shutting";
476  break;
477 
478  default:
479  JLOG(p_journal_.warn())
480  << "Unknown status: " << last_status.newstatus();
481  }
482  }
483 
484  ret[jss::metrics] = Json::Value(Json::objectValue);
485  ret[jss::metrics][jss::total_bytes_recv] =
486  std::to_string(metrics_.recv.total_bytes());
487  ret[jss::metrics][jss::total_bytes_sent] =
488  std::to_string(metrics_.sent.total_bytes());
489  ret[jss::metrics][jss::avg_bps_recv] =
490  std::to_string(metrics_.recv.average_bytes());
491  ret[jss::metrics][jss::avg_bps_sent] =
492  std::to_string(metrics_.sent.average_bytes());
493 
494  return ret;
495 }
496 
497 bool
499 {
500  switch (f)
501  {
503  return protocol_ >= make_protocol(2, 1);
505  return protocol_ >= make_protocol(2, 2);
507  return ledgerReplayEnabled_;
508  }
509  return false;
510 }
511 
512 //------------------------------------------------------------------------------
513 
514 bool
515 PeerImp::hasLedger(uint256 const& hash, std::uint32_t seq) const
516 {
517  {
519  if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
521  return true;
522  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
523  recentLedgers_.end())
524  return true;
525  }
526 
527  if (seq >= app_.getNodeStore().earliestLedgerSeq())
528  {
530  auto const it{shardInfos_.find(publicKey_)};
531  if (it != shardInfos_.end())
532  {
533  auto const shardIndex{app_.getNodeStore().seqToShardIndex(seq)};
534  return boost::icl::contains(it->second.finalized(), shardIndex);
535  }
536  }
537  return false;
538 }
539 
540 void
542 {
544 
545  minSeq = minLedger_;
546  maxSeq = maxLedger_;
547 }
548 
549 bool
550 PeerImp::hasTxSet(uint256 const& hash) const
551 {
553  return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
554  recentTxSets_.end();
555 }
556 
557 void
559 {
560  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
561  // guarded by recentLock_.
565 }
566 
567 bool
569 {
571  return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
572  (uMax <= maxLedger_);
573 }
574 
575 //------------------------------------------------------------------------------
576 
577 void
579 {
580  assert(strand_.running_in_this_thread());
581  if (socket_.is_open())
582  {
583  detaching_ = true; // DEPRECATED
584  error_code ec;
585  timer_.cancel(ec);
586  socket_.close(ec);
588  if (inbound_)
589  {
590  JLOG(journal_.debug()) << "Closed";
591  }
592  else
593  {
594  JLOG(journal_.info()) << "Closed";
595  }
596  }
597 }
598 
599 void
601 {
602  if (!strand_.running_in_this_thread())
603  return post(
604  strand_,
605  std::bind(
606  (void (Peer::*)(std::string const&)) & PeerImp::fail,
608  reason));
610  {
611  std::string const n = name();
612  JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
613  << " failed: " << reason;
614  }
615  close();
616 }
617 
618 void
620 {
621  assert(strand_.running_in_this_thread());
622  if (socket_.is_open())
623  {
624  JLOG(journal_.warn())
625  << name << " from " << toBase58(TokenType::NodePublic, publicKey_)
626  << " at " << remote_address_.to_string() << ": " << ec.message();
627  }
628  close();
629 }
630 
633 {
635  return shardInfos_;
636 }
637 
638 void
640 {
641  assert(strand_.running_in_this_thread());
642  assert(socket_.is_open());
643  assert(!gracefulClose_);
644  gracefulClose_ = true;
645 #if 0
646  // Flush messages
647  while(send_queue_.size() > 1)
648  send_queue_.pop_back();
649 #endif
650  if (send_queue_.size() > 0)
651  return;
652  setTimer();
653  stream_.async_shutdown(bind_executor(
654  strand_,
655  std::bind(
656  &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
657 }
658 
659 void
661 {
662  error_code ec;
663  timer_.expires_from_now(peerTimerInterval, ec);
664 
665  if (ec)
666  {
667  JLOG(journal_.error()) << "setTimer: " << ec.message();
668  return;
669  }
670  timer_.async_wait(bind_executor(
671  strand_,
672  std::bind(
673  &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
674 }
675 
676 // convenience for ignoring the error code
677 void
679 {
680  error_code ec;
681  timer_.cancel(ec);
682 }
683 
684 //------------------------------------------------------------------------------
685 
688 {
690  ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
691  return ss.str();
692 }
693 
694 void
696 {
697  if (!socket_.is_open())
698  return;
699 
700  if (ec == boost::asio::error::operation_aborted)
701  return;
702 
703  if (ec)
704  {
705  // This should never happen
706  JLOG(journal_.error()) << "onTimer: " << ec.message();
707  return close();
708  }
709 
711  {
712  fail("Large send queue");
713  return;
714  }
715 
716  if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
717  {
718  clock_type::duration duration;
719 
720  {
722  duration = clock_type::now() - trackingTime_;
723  }
724 
725  if ((t == Tracking::diverged &&
726  (duration > app_.config().MAX_DIVERGED_TIME)) ||
727  (t == Tracking::unknown &&
728  (duration > app_.config().MAX_UNKNOWN_TIME)))
729  {
731  fail("Not useful");
732  return;
733  }
734  }
735 
736  // Already waiting for PONG
737  if (lastPingSeq_)
738  {
739  fail("Ping Timeout");
740  return;
741  }
742 
744  lastPingSeq_ = rand_int<std::uint32_t>();
745 
746  protocol::TMPing message;
747  message.set_type(protocol::TMPing::ptPING);
748  message.set_seq(*lastPingSeq_);
749 
750  send(std::make_shared<Message>(message, protocol::mtPING));
751 
752  setTimer();
753 }
754 
755 void
757 {
758  cancelTimer();
759  // If we don't get eof then something went wrong
760  if (!ec)
761  {
762  JLOG(journal_.error()) << "onShutdown: expected error condition";
763  return close();
764  }
765  if (ec != boost::asio::error::eof)
766  return fail("onShutdown", ec);
767  close();
768 }
769 
770 //------------------------------------------------------------------------------
771 void
773 {
774  assert(read_buffer_.size() == 0);
775 
776  JLOG(journal_.debug()) << "doAccept: " << remote_address_;
777 
778  auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
779 
780  // This shouldn't fail since we already computed
781  // the shared value successfully in OverlayImpl
782  if (!sharedValue)
783  return fail("makeSharedValue: Unexpected failure");
784 
785  JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
786  JLOG(journal_.info()) << "Public Key: "
788 
789  if (auto member = app_.cluster().member(publicKey_))
790  {
791  {
793  name_ = *member;
794  }
795  JLOG(journal_.info()) << "Cluster name: " << *member;
796  }
797 
799 
800  // XXX Set timer: connection is in grace period to be useful.
801  // XXX Set timer: connection idle (idle may vary depending on connection
802  // type.)
803 
804  auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
805 
806  boost::beast::ostream(*write_buffer) << makeResponse(
808  request_,
811  *sharedValue,
813  protocol_,
814  app_);
815 
816  // Write the whole buffer and only start protocol when that's done.
817  boost::asio::async_write(
818  stream_,
819  write_buffer->data(),
820  boost::asio::transfer_all(),
821  bind_executor(
822  strand_,
823  [this, write_buffer, self = shared_from_this()](
824  error_code ec, std::size_t bytes_transferred) {
825  if (!socket_.is_open())
826  return;
827  if (ec == boost::asio::error::operation_aborted)
828  return;
829  if (ec)
830  return fail("onWriteResponse", ec);
831  if (write_buffer->size() == bytes_transferred)
832  return doProtocolStart();
833  return fail("Failed to write header");
834  }));
835 }
836 
839 {
840  std::shared_lock read_lock{nameMutex_};
841  return name_;
842 }
843 
846 {
847  return headers_["Server-Domain"].to_string();
848 }
849 
850 //------------------------------------------------------------------------------
851 
852 // Protocol logic
853 
854 void
856 {
858 
859  // Send all the validator lists that have been loaded
861  {
863  [&](std::string const& manifest,
864  std::uint32_t version,
866  PublicKey const& pubKey,
867  std::size_t maxSequence,
868  uint256 const& hash) {
870  *this,
871  0,
872  pubKey,
873  maxSequence,
874  version,
875  manifest,
876  blobInfos,
878  p_journal_);
879 
880  // Don't send it next time.
882  });
883  }
884 
885  if (auto m = overlay_.getManifestsMessage())
886  send(m);
887 
888  // Request shard info from peer
889  protocol::TMGetPeerShardInfoV2 tmGPS;
890  tmGPS.set_relays(0);
891  send(std::make_shared<Message>(tmGPS, protocol::mtGET_PEER_SHARD_INFO_V2));
892 
893  setTimer();
894 }
895 
896 // Called repeatedly with protocol message data
897 void
899 {
900  if (!socket_.is_open())
901  return;
902  if (ec == boost::asio::error::operation_aborted)
903  return;
904  if (ec == boost::asio::error::eof)
905  {
906  JLOG(journal_.info()) << "EOF";
907  return gracefulClose();
908  }
909  if (ec)
910  return fail("onReadMessage", ec);
911  if (auto stream = journal_.trace())
912  {
913  if (bytes_transferred > 0)
914  stream << "onReadMessage: " << bytes_transferred << " bytes";
915  else
916  stream << "onReadMessage";
917  }
918 
919  metrics_.recv.add_message(bytes_transferred);
920 
921  read_buffer_.commit(bytes_transferred);
922 
923  auto hint = Tuning::readBufferBytes;
924 
925  while (read_buffer_.size() > 0)
926  {
927  std::size_t bytes_consumed;
928  std::tie(bytes_consumed, ec) =
929  invokeProtocolMessage(read_buffer_.data(), *this, hint);
930  if (ec)
931  return fail("onReadMessage", ec);
932  if (!socket_.is_open())
933  return;
934  if (gracefulClose_)
935  return;
936  if (bytes_consumed == 0)
937  break;
938  read_buffer_.consume(bytes_consumed);
939  }
940 
941  // Timeout on writes only
942  stream_.async_read_some(
944  bind_executor(
945  strand_,
946  std::bind(
949  std::placeholders::_1,
950  std::placeholders::_2)));
951 }
952 
953 void
955 {
956  if (!socket_.is_open())
957  return;
958  if (ec == boost::asio::error::operation_aborted)
959  return;
960  if (ec)
961  return fail("onWriteMessage", ec);
962  if (auto stream = journal_.trace())
963  {
964  if (bytes_transferred > 0)
965  stream << "onWriteMessage: " << bytes_transferred << " bytes";
966  else
967  stream << "onWriteMessage";
968  }
969 
970  metrics_.sent.add_message(bytes_transferred);
971 
972  assert(!send_queue_.empty());
973  send_queue_.pop();
974  if (!send_queue_.empty())
975  {
976  // Timeout on writes only
977  return boost::asio::async_write(
978  stream_,
979  boost::asio::buffer(
980  send_queue_.front()->getBuffer(compressionEnabled_)),
981  bind_executor(
982  strand_,
983  std::bind(
986  std::placeholders::_1,
987  std::placeholders::_2)));
988  }
989 
990  if (gracefulClose_)
991  {
992  return stream_.async_shutdown(bind_executor(
993  strand_,
994  std::bind(
997  std::placeholders::_1)));
998  }
999 }
1000 
1001 //------------------------------------------------------------------------------
1002 //
1003 // ProtocolHandler
1004 //
1005 //------------------------------------------------------------------------------
1006 
1007 void
1009 {
1010  // TODO
1011 }
1012 
1013 void
1015  std::uint16_t type,
1017  std::size_t size,
1018  std::size_t uncompressed_size,
1019  bool isCompressed)
1020 {
1021  load_event_ =
1024  auto const category = TrafficCount::categorize(*m, type, true);
1025  overlay_.reportTraffic(category, true, static_cast<int>(size));
1026  using namespace protocol;
1027  if ((type == MessageType::mtTRANSACTION ||
1028  type == MessageType::mtHAVE_TRANSACTIONS ||
1029  type == MessageType::mtTRANSACTIONS ||
1030  // GET_OBJECTS
1031  category == TrafficCount::category::get_transactions ||
1032  // GET_LEDGER
1033  category == TrafficCount::category::ld_tsc_get ||
1034  category == TrafficCount::category::ld_tsc_share ||
1035  // LEDGER_DATA
1036  category == TrafficCount::category::gl_tsc_share ||
1037  category == TrafficCount::category::gl_tsc_get) &&
1039  {
1041  static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1042  }
1043  JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
1044  << uncompressed_size << " " << isCompressed;
1045 }
1046 
1047 void
1049  std::uint16_t,
1051 {
1052  load_event_.reset();
1053  charge(fee_);
1054 }
1055 
1056 void
1058 {
1059  auto const s = m->list_size();
1060 
1061  if (s == 0)
1062  {
1064  return;
1065  }
1066 
1067  if (s > 100)
1069 
1070  // VFALCO What's the right job type?
1071  auto that = shared_from_this();
1073  jtVALIDATION_ut, "receiveManifests", [this, that, m](Job&) {
1074  overlay_.onManifests(m, that);
1075  });
1076 }
1077 
1078 void
1080 {
1081  if (m->type() == protocol::TMPing::ptPING)
1082  {
1083  // We have received a ping request, reply with a pong
1085  m->set_type(protocol::TMPing::ptPONG);
1086  send(std::make_shared<Message>(*m, protocol::mtPING));
1087  return;
1088  }
1089 
1090  if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1091  {
1092  // Only reset the ping sequence if we actually received a
1093  // PONG with the correct cookie. That way, any peers which
1094  // respond with incorrect cookies will eventually time out.
1095  if (m->seq() == lastPingSeq_)
1096  {
1097  lastPingSeq_.reset();
1098 
1099  // Update latency estimate
1100  auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1102 
1104 
1105  if (latency_)
1106  latency_ = (*latency_ * 7 + rtt) / 8;
1107  else
1108  latency_ = rtt;
1109  }
1110 
1111  return;
1112  }
1113 }
1114 
1115 void
1117 {
1118  // VFALCO NOTE I think we should drop the peer immediately
1119  if (!cluster())
1120  {
1122  return;
1123  }
1124 
1125  for (int i = 0; i < m->clusternodes().size(); ++i)
1126  {
1127  protocol::TMClusterNode const& node = m->clusternodes(i);
1128 
1129  std::string name;
1130  if (node.has_nodename())
1131  name = node.nodename();
1132 
1133  auto const publicKey =
1134  parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1135 
1136  // NIKB NOTE We should drop the peer immediately if
1137  // they send us a public key we can't parse
1138  if (publicKey)
1139  {
1140  auto const reportTime =
1141  NetClock::time_point{NetClock::duration{node.reporttime()}};
1142 
1143  app_.cluster().update(
1144  *publicKey, name, node.nodeload(), reportTime);
1145  }
1146  }
1147 
1148  int loadSources = m->loadsources().size();
1149  if (loadSources != 0)
1150  {
1151  Resource::Gossip gossip;
1152  gossip.items.reserve(loadSources);
1153  for (int i = 0; i < m->loadsources().size(); ++i)
1154  {
1155  protocol::TMLoadSource const& node = m->loadsources(i);
1157  item.address = beast::IP::Endpoint::from_string(node.name());
1158  item.balance = node.cost();
1159  if (item.address != beast::IP::Endpoint())
1160  gossip.items.push_back(item);
1161  }
1163  }
1164 
1165  // Calculate the cluster fee:
1166  auto const thresh = app_.timeKeeper().now() - 90s;
1167  std::uint32_t clusterFee = 0;
1168 
1170  fees.reserve(app_.cluster().size());
1171 
1172  app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1173  if (status.getReportTime() >= thresh)
1174  fees.push_back(status.getLoadFee());
1175  });
1176 
1177  if (!fees.empty())
1178  {
1179  auto const index = fees.size() / 2;
1180  std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1181  clusterFee = fees[index];
1182  }
1183 
1184  app_.getFeeTrack().setClusterFee(clusterFee);
1185 }
1186 
1187 void
1189 {
1190  // DEPRECATED
1191 }
1192 
1193 void
1195 {
1196  // DEPRECATED
1197 }
1198 
1199 void
1201 {
1202  auto badData = [&](std::string msg) {
1204  JLOG(p_journal_.warn()) << msg;
1205  };
1206 
1207  // Verify relays
1208  if (m->relays() > relayLimit)
1209  return badData("Invalid relays");
1210 
1211  // Verify peer chain
1212  // The peer chain should not contain this node's public key
1213  // nor the public key of the sending peer
1214  std::set<PublicKey> pubKeyChain;
1215  pubKeyChain.insert(app_.nodeIdentity().first);
1216  pubKeyChain.insert(publicKey_);
1217 
1218  auto const peerChainSz{m->peerchain_size()};
1219  if (peerChainSz > 0)
1220  {
1221  if (peerChainSz > relayLimit)
1222  return badData("Invalid peer chain size");
1223 
1224  if (peerChainSz + m->relays() > relayLimit)
1225  return badData("Invalid relays and peer chain size");
1226 
1227  for (int i = 0; i < peerChainSz; ++i)
1228  {
1229  auto const slice{makeSlice(m->peerchain(i).publickey())};
1230 
1231  // Verify peer public key
1232  if (!publicKeyType(slice))
1233  return badData("Invalid peer public key");
1234 
1235  // Verify peer public key is unique in the peer chain
1236  if (!pubKeyChain.emplace(slice).second)
1237  return badData("Invalid peer public key");
1238  }
1239  }
1240 
1241  // Reply with shard info this node may have
1242  if (auto shardStore = app_.getShardStore())
1243  {
1244  auto reply{shardStore->getShardInfo()->makeMessage(app_)};
1245  if (peerChainSz > 0)
1246  *(reply.mutable_peerchain()) = m->peerchain();
1247  send(std::make_shared<Message>(reply, protocol::mtPEER_SHARD_INFO_V2));
1248  }
1249 
1250  if (m->relays() == 0)
1251  return;
1252 
1253  // Charge originating peer a fee for requesting relays
1254  if (peerChainSz == 0)
1256 
1257  // Add peer to the peer chain
1258  m->add_peerchain()->set_publickey(publicKey_.data(), publicKey_.size());
1259 
1260  // Relay the request to peers, exclude the peer chain
1261  m->set_relays(m->relays() - 1);
1263  std::make_shared<Message>(*m, protocol::mtGET_PEER_SHARD_INFO_V2),
1264  [&](std::shared_ptr<Peer> const& peer) {
1265  return pubKeyChain.find(peer->getNodePublic()) != pubKeyChain.end();
1266  }));
1267 }
1268 
1269 void
1271 {
1272  // Find the earliest and latest shard indexes
1273  auto const& db{app_.getNodeStore()};
1274  auto const earliestShardIndex{db.earliestShardIndex()};
1275  auto const latestShardIndex{[&]() -> std::optional<std::uint32_t> {
1276  auto const curLedgerSeq{app_.getLedgerMaster().getCurrentLedgerIndex()};
1277  if (curLedgerSeq >= db.earliestLedgerSeq())
1278  return db.seqToShardIndex(curLedgerSeq);
1279  return std::nullopt;
1280  }()};
1281 
1282  auto badData = [&](std::string msg) {
1284  JLOG(p_journal_.warn()) << msg;
1285  };
1286 
1287  // Used to create a digest and verify the message signature
1288  Serializer s;
1290 
1291  // Verify message creation time
1293  {
1294  auto const timestamp{
1295  NetClock::time_point{std::chrono::seconds{m->timestamp()}}};
1296  auto const now{app_.timeKeeper().now()};
1297  if (timestamp > (now + 5s))
1298  return badData("Invalid timestamp");
1299 
1300  // Check if stale
1301  using namespace std::chrono_literals;
1302  if (timestamp < (now - 5min))
1303  return badData("Stale timestamp");
1304 
1305  s.add32(m->timestamp());
1306  shardInfo.setMsgTimestamp(timestamp);
1307  }
1308 
1309  // Verify incomplete shards
1310  auto const numIncomplete{m->incomplete_size()};
1311  if (numIncomplete > 0)
1312  {
1313  if (latestShardIndex && numIncomplete > *latestShardIndex)
1314  return badData("Invalid number of incomplete shards");
1315 
1316  // Verify each incomplete shard
1317  for (int i = 0; i < numIncomplete; ++i)
1318  {
1319  auto const& incomplete{m->incomplete(i)};
1320  auto const shardIndex{incomplete.shardindex()};
1321 
1322  // Verify shard index
1323  if (shardIndex < earliestShardIndex ||
1324  (latestShardIndex && shardIndex > latestShardIndex))
1325  {
1326  return badData("Invalid incomplete shard index");
1327  }
1328  s.add32(shardIndex);
1329 
1330  // Verify state
1331  auto const state{static_cast<ShardState>(incomplete.state())};
1332  switch (state)
1333  {
1334  // Incomplete states
1335  case ShardState::acquire:
1336  case ShardState::complete:
1338  case ShardState::queued:
1339  break;
1340 
1341  // case ShardState::finalized:
1342  default:
1343  return badData("Invalid incomplete shard state");
1344  };
1345  s.add32(incomplete.state());
1346 
1347  // Verify progress
1348  std::uint32_t progress{0};
1349  if (incomplete.has_progress())
1350  {
1351  progress = incomplete.progress();
1352  if (progress < 1 || progress > 100)
1353  return badData("Invalid incomplete shard progress");
1354  s.add32(progress);
1355  }
1356 
1357  // Verify each incomplete shard is unique
1358  if (!shardInfo.update(shardIndex, state, progress))
1359  return badData("Invalid duplicate incomplete shards");
1360  }
1361  }
1362 
1363  // Verify finalized shards
1364  if (m->has_finalized())
1365  {
1366  auto const& str{m->finalized()};
1367  if (str.empty())
1368  return badData("Invalid finalized shards");
1369 
1370  if (!shardInfo.setFinalizedFromString(str))
1371  return badData("Invalid finalized shard indexes");
1372 
1373  auto const& finalized{shardInfo.finalized()};
1374  auto const numFinalized{boost::icl::length(finalized)};
1375  if (numFinalized == 0 ||
1376  boost::icl::first(finalized) < earliestShardIndex ||
1377  (latestShardIndex &&
1378  boost::icl::last(finalized) > latestShardIndex))
1379  {
1380  return badData("Invalid finalized shard indexes");
1381  }
1382 
1383  if (latestShardIndex &&
1384  (numFinalized + numIncomplete) > *latestShardIndex)
1385  {
1386  return badData("Invalid number of finalized and incomplete shards");
1387  }
1388 
1389  s.addRaw(str.data(), str.size());
1390  }
1391 
1392  // Verify public key
1393  auto slice{makeSlice(m->publickey())};
1394  if (!publicKeyType(slice))
1395  return badData("Invalid public key");
1396 
1397  // Verify peer public key isn't this nodes's public key
1398  PublicKey const publicKey(slice);
1399  if (publicKey == app_.nodeIdentity().first)
1400  return badData("Invalid public key");
1401 
1402  // Verify signature
1403  if (!verify(publicKey, s.slice(), makeSlice(m->signature()), false))
1404  return badData("Invalid signature");
1405 
1406  // Forward the message if a peer chain exists
1407  auto const peerChainSz{m->peerchain_size()};
1408  if (peerChainSz > 0)
1409  {
1410  // Verify peer chain
1411  if (peerChainSz > relayLimit)
1412  return badData("Invalid peer chain size");
1413 
1414  // The peer chain should not contain this node's public key
1415  // nor the public key of the sending peer
1416  std::set<PublicKey> pubKeyChain;
1417  pubKeyChain.insert(app_.nodeIdentity().first);
1418  pubKeyChain.insert(publicKey_);
1419 
1420  for (int i = 0; i < peerChainSz; ++i)
1421  {
1422  // Verify peer public key
1423  slice = makeSlice(m->peerchain(i).publickey());
1424  if (!publicKeyType(slice))
1425  return badData("Invalid peer public key");
1426 
1427  // Verify peer public key is unique in the peer chain
1428  if (!pubKeyChain.emplace(slice).second)
1429  return badData("Invalid peer public key");
1430  }
1431 
1432  // If last peer in the chain is connected, relay the message
1433  PublicKey const peerPubKey(
1434  makeSlice(m->peerchain(peerChainSz - 1).publickey()));
1435  if (auto peer = overlay_.findPeerByPublicKey(peerPubKey))
1436  {
1437  m->mutable_peerchain()->RemoveLast();
1438  peer->send(
1439  std::make_shared<Message>(*m, protocol::mtPEER_SHARD_INFO_V2));
1440  JLOG(p_journal_.trace())
1441  << "Relayed TMPeerShardInfoV2 from peer IP "
1442  << remote_address_.address().to_string() << " to peer IP "
1443  << peer->getRemoteAddress().to_string();
1444  }
1445  else
1446  {
1447  // Peer is no longer available so the relay ends
1448  JLOG(p_journal_.info()) << "Unable to relay peer shard info";
1449  }
1450  }
1451 
1452  JLOG(p_journal_.trace())
1453  << "Consumed TMPeerShardInfoV2 originating from public key "
1454  << toBase58(TokenType::NodePublic, publicKey) << " finalized shards["
1455  << ripple::to_string(shardInfo.finalized()) << "] incomplete shards["
1456  << (shardInfo.incomplete().empty() ? "empty"
1457  : shardInfo.incompleteToString())
1458  << "]";
1459 
1460  // Consume the message
1461  {
1463  auto const it{shardInfos_.find(publicKey_)};
1464  if (it == shardInfos_.end())
1465  shardInfos_.emplace(publicKey, std::move(shardInfo));
1466  else if (shardInfo.msgTimestamp() > it->second.msgTimestamp())
1467  it->second = std::move(shardInfo);
1468  }
1469 
1470  // Notify overlay a reply was received from the last peer in this chain
1471  if (peerChainSz == 0)
1473 }
1474 
1475 void
1477 {
1478  // Don't allow endpoints from peers that are not known tracking or are
1479  // not using a version of the message that we support:
1480  if (tracking_.load() != Tracking::converged || m->version() != 2)
1481  return;
1482 
1484  endpoints.reserve(m->endpoints_v2().size());
1485 
1486  for (auto const& tm : m->endpoints_v2())
1487  {
1488  auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1489  if (!result)
1490  {
1491  JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1492  << tm.endpoint() << "}";
1493  continue;
1494  }
1495 
1496  // If hops == 0, this Endpoint describes the peer we are connected
1497  // to -- in that case, we take the remote address seen on the
1498  // socket and store that in the IP::Endpoint. If this is the first
1499  // time, then we'll verify that their listener can receive incoming
1500  // by performing a connectivity test. if hops > 0, then we just
1501  // take the address/port we were given
1502 
1503  endpoints.emplace_back(
1504  tm.hops() > 0 ? *result : remote_address_.at_port(result->port()),
1505  tm.hops());
1506  }
1507 
1508  if (!endpoints.empty())
1509  overlay_.peerFinder().on_endpoints(slot_, endpoints);
1510 }
1511 
1512 void
1514 {
1515  handleTransaction(m, true);
1516 }
1517 
1518 void
1521  bool eraseTxQueue)
1522 {
1524  return;
1525 
1527  {
1528  // If we've never been in synch, there's nothing we can do
1529  // with a transaction
1530  JLOG(p_journal_.debug()) << "Ignoring incoming transaction: "
1531  << "Need network ledger";
1532  return;
1533  }
1534 
1535  SerialIter sit(makeSlice(m->rawtransaction()));
1536 
1537  try
1538  {
1539  auto stx = std::make_shared<STTx const>(sit);
1540  uint256 txID = stx->getTransactionID();
1541 
1542  int flags;
1543  constexpr std::chrono::seconds tx_interval = 10s;
1544 
1545  if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1546  {
1547  // we have seen this transaction recently
1548  if (flags & SF_BAD)
1549  {
1551  JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1552  }
1553 
1554  // Erase only if the server has seen this tx. If the server has not
1555  // seen this tx then the tx could not has been queued for this peer.
1556  else if (eraseTxQueue && txReduceRelayEnabled())
1557  removeTxQueue(txID);
1558 
1559  return;
1560  }
1561 
1562  JLOG(p_journal_.debug()) << "Got tx " << txID;
1563 
1564  bool checkSignature = true;
1565  if (cluster())
1566  {
1567  if (!m->has_deferred() || !m->deferred())
1568  {
1569  // Skip local checks if a server we trust
1570  // put the transaction in its open ledger
1571  flags |= SF_TRUSTED;
1572  }
1573 
1575  {
1576  // For now, be paranoid and have each validator
1577  // check each transaction, regardless of source
1578  checkSignature = false;
1579  }
1580  }
1581 
1584  {
1586  JLOG(p_journal_.info()) << "Transaction queue is full";
1587  }
1588  else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
1589  {
1590  JLOG(p_journal_.trace())
1591  << "No new transactions until synchronized";
1592  }
1593  else
1594  {
1596  jtTRANSACTION,
1597  "recvTransaction->checkTransaction",
1599  flags,
1600  checkSignature,
1601  stx](Job&) {
1602  if (auto peer = weak.lock())
1603  peer->checkTransaction(flags, checkSignature, stx);
1604  });
1605  }
1606  }
1607  catch (std::exception const&)
1608  {
1609  JLOG(p_journal_.warn())
1610  << "Transaction invalid: " << strHex(m->rawtransaction());
1611  }
1612 }
1613 
1614 void
1616 {
1617  auto badData = [&](std::string const& msg) {
1619  JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1620  };
1621  auto const itype{m->itype()};
1622 
1623  // Verify ledger info type
1624  if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1625  return badData("Invalid ledger info type");
1626 
1627  auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1628  if (m->has_ltype())
1629  return m->ltype();
1630  return std::nullopt;
1631  }();
1632 
1633  if (itype == protocol::liTS_CANDIDATE)
1634  {
1635  if (!m->has_ledgerhash())
1636  return badData("Invalid TX candidate set, missing TX set hash");
1637  }
1638  else if (
1639  !m->has_ledgerhash() && !m->has_ledgerseq() &&
1640  !(ltype && *ltype == protocol::ltCLOSED))
1641  {
1642  return badData("Invalid request");
1643  }
1644 
1645  // Verify ledger type
1646  if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1647  return badData("Invalid ledger type");
1648 
1649  // Verify ledger hash
1650  if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1651  return badData("Invalid ledger hash");
1652 
1653  // Verify ledger sequence
1654  if (m->has_ledgerseq())
1655  {
1656  auto const ledgerSeq{m->ledgerseq()};
1657  // Verifying the network's earliest ledger only pertains to shards.
1658  if (app_.getShardStore() &&
1659  ledgerSeq < app_.getNodeStore().earliestLedgerSeq())
1660  {
1661  return badData(
1662  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1663  }
1664 
1665  // Check if within a reasonable range
1666  using namespace std::chrono_literals;
1667  if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1668  ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1669  {
1670  return badData(
1671  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1672  }
1673  }
1674 
1675  // Verify ledger node IDs
1676  if (itype != protocol::liBASE)
1677  {
1678  if (m->nodeids_size() <= 0)
1679  return badData("Invalid ledger node IDs");
1680 
1681  for (auto const& nodeId : m->nodeids())
1682  {
1683  if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
1684  return badData("Invalid SHAMap node ID");
1685  }
1686  }
1687 
1688  // Verify query type
1689  if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1690  return badData("Invalid query type");
1691 
1692  // Verify query depth
1693  if (m->has_querydepth())
1694  {
1695  if (m->querydepth() > Tuning::maxQueryDepth ||
1696  itype == protocol::liBASE)
1697  {
1698  return badData("Invalid query depth");
1699  }
1700  }
1701 
1702  // Queue a job to process the request
1704  app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m](Job&) {
1705  if (auto peer = weak.lock())
1706  peer->processLedgerRequest(m);
1707  });
1708 }
1709 
1710 void
1712 {
1713  JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1714  if (!ledgerReplayEnabled_)
1715  {
1717  return;
1718  }
1719 
1723  jtREPLAY_REQ, "recvProofPathRequest", [weak, m](Job&) {
1724  if (auto peer = weak.lock())
1725  {
1726  auto reply =
1727  peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1728  if (reply.has_error())
1729  {
1730  if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1731  peer->charge(Resource::feeInvalidRequest);
1732  else
1733  peer->charge(Resource::feeRequestNoReply);
1734  }
1735  else
1736  {
1737  peer->send(std::make_shared<Message>(
1738  reply, protocol::mtPROOF_PATH_RESPONSE));
1739  }
1740  }
1741  });
1742 }
1743 
1744 void
1746 {
1747  if (!ledgerReplayEnabled_)
1748  {
1750  return;
1751  }
1752 
1754  {
1756  }
1757 }
1758 
1759 void
1761 {
1762  JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1763  if (!ledgerReplayEnabled_)
1764  {
1766  return;
1767  }
1768 
1772  jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m](Job&) {
1773  if (auto peer = weak.lock())
1774  {
1775  auto reply =
1776  peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1777  if (reply.has_error())
1778  {
1779  if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1780  peer->charge(Resource::feeInvalidRequest);
1781  else
1782  peer->charge(Resource::feeRequestNoReply);
1783  }
1784  else
1785  {
1786  peer->send(std::make_shared<Message>(
1787  reply, protocol::mtREPLAY_DELTA_RESPONSE));
1788  }
1789  }
1790  });
1791 }
1792 
1793 void
1795 {
1796  if (!ledgerReplayEnabled_)
1797  {
1799  return;
1800  }
1801 
1803  {
1805  }
1806 }
1807 
1808 void
1810 {
1811  auto badData = [&](std::string const& msg) {
1813  JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1814  };
1815 
1816  // Verify ledger hash
1817  if (!stringIsUint256Sized(m->ledgerhash()))
1818  return badData("Invalid ledger hash");
1819 
1820  // Verify ledger sequence
1821  {
1822  auto const ledgerSeq{m->ledgerseq()};
1823  if (m->type() == protocol::liTS_CANDIDATE)
1824  {
1825  if (ledgerSeq != 0)
1826  {
1827  return badData(
1828  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1829  }
1830  }
1831  else
1832  {
1833  // Verifying the network's earliest ledger only pertains to shards.
1834  if (app_.getShardStore() &&
1835  ledgerSeq < app_.getNodeStore().earliestLedgerSeq())
1836  {
1837  return badData(
1838  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1839  }
1840 
1841  // Check if within a reasonable range
1842  using namespace std::chrono_literals;
1843  if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1844  ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1845  {
1846  return badData(
1847  "Invalid ledger sequence " + std::to_string(ledgerSeq));
1848  }
1849  }
1850  }
1851 
1852  // Verify ledger info type
1853  if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1854  return badData("Invalid ledger info type");
1855 
1856  // Verify reply error
1857  if (m->has_error() &&
1858  (m->error() < protocol::reNO_LEDGER ||
1859  m->error() > protocol::reBAD_REQUEST))
1860  {
1861  return badData("Invalid reply error");
1862  }
1863 
1864  // Verify ledger nodes.
1865  if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1866  {
1867  return badData(
1868  "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1869  }
1870 
1871  // If there is a request cookie, attempt to relay the message
1872  if (m->has_requestcookie())
1873  {
1874  if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1875  {
1876  m->clear_requestcookie();
1877  peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1878  }
1879  else
1880  {
1881  JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1882  }
1883  return;
1884  }
1885 
1886  uint256 const ledgerHash{m->ledgerhash()};
1887 
1888  // Otherwise check if received data for a candidate transaction set
1889  if (m->type() == protocol::liTS_CANDIDATE)
1890  {
1893  jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m](Job&) {
1894  if (auto peer = weak.lock())
1895  {
1896  peer->app_.getInboundTransactions().gotData(
1897  ledgerHash, peer, m);
1898  }
1899  });
1900  return;
1901  }
1902 
1903  // Consume the message
1905 }
1906 
1907 void
1909 {
1910  protocol::TMProposeSet& set = *m;
1911 
1912  auto const sig = makeSlice(set.signature());
1913 
1914  // Preliminary check for the validity of the signature: A DER encoded
1915  // signature can't be longer than 72 bytes.
1916  if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1917  (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1918  {
1919  JLOG(p_journal_.warn()) << "Proposal: malformed";
1921  return;
1922  }
1923 
1924  if (!stringIsUint256Sized(set.currenttxhash()) ||
1925  !stringIsUint256Sized(set.previousledger()))
1926  {
1927  JLOG(p_journal_.warn()) << "Proposal: malformed";
1929  return;
1930  }
1931 
1932  // RH TODO: when isTrusted = false we should probably also cache a key
1933  // suppression for 30 seconds to avoid doing a relatively expensive lookup
1934  // every time a spam packet is received
1935  PublicKey const publicKey{makeSlice(set.nodepubkey())};
1936  auto const isTrusted = app_.validators().trusted(publicKey);
1937 
1938  // If the operator has specified that untrusted proposals be dropped then
1939  // this happens here I.e. before further wasting CPU verifying the signature
1940  // of an untrusted key
1941  if (!isTrusted && app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1942  return;
1943 
1944  uint256 const proposeHash{set.currenttxhash()};
1945  uint256 const prevLedger{set.previousledger()};
1946 
1947  NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1948 
1949  uint256 const suppression = proposalUniqueId(
1950  proposeHash,
1951  prevLedger,
1952  set.proposeseq(),
1953  closeTime,
1954  publicKey.slice(),
1955  sig);
1956 
1957  if (auto [added, relayed] =
1959  !added)
1960  {
1961  // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1962  // receives within IDLED seconds since the message has been relayed.
1963  if (reduceRelayReady() && relayed &&
1964  (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1966  suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1967  JLOG(p_journal_.trace()) << "Proposal: duplicate";
1968  return;
1969  }
1970 
1971  if (!isTrusted)
1972  {
1974  {
1975  JLOG(p_journal_.debug())
1976  << "Proposal: Dropping untrusted (peer divergence)";
1977  return;
1978  }
1979 
1980  if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1981  {
1982  JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1983  return;
1984  }
1985  }
1986 
1987  JLOG(p_journal_.trace())
1988  << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1989 
1990  auto proposal = RCLCxPeerPos(
1991  publicKey,
1992  sig,
1993  suppression,
1995  prevLedger,
1996  set.proposeseq(),
1997  proposeHash,
1998  closeTime,
2001 
2004  isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
2005  "recvPropose->checkPropose",
2006  [weak, m, proposal](Job& job) {
2007  if (auto peer = weak.lock())
2008  peer->checkPropose(job, m, proposal);
2009  });
2010 }
2011 
2012 void
2014 {
2015  JLOG(p_journal_.trace()) << "Status: Change";
2016 
2017  if (!m->has_networktime())
2018  m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2019 
2020  {
2022  if (!last_status_.has_newstatus() || m->has_newstatus())
2023  last_status_ = *m;
2024  else
2025  {
2026  // preserve old status
2027  protocol::NodeStatus status = last_status_.newstatus();
2028  last_status_ = *m;
2029  m->set_newstatus(status);
2030  }
2031  }
2032 
2033  if (m->newevent() == protocol::neLOST_SYNC)
2034  {
2035  bool outOfSync{false};
2036  {
2037  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
2038  // guarded by recentLock_.
2040  if (!closedLedgerHash_.isZero())
2041  {
2042  outOfSync = true;
2044  }
2046  }
2047  if (outOfSync)
2048  {
2049  JLOG(p_journal_.debug()) << "Status: Out of sync";
2050  }
2051  return;
2052  }
2053 
2054  {
2055  uint256 closedLedgerHash{};
2056  bool const peerChangedLedgers{
2057  m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
2058 
2059  {
2060  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
2061  // guarded by recentLock_.
2063  if (peerChangedLedgers)
2064  {
2065  closedLedgerHash_ = m->ledgerhash();
2066  closedLedgerHash = closedLedgerHash_;
2067  addLedger(closedLedgerHash, sl);
2068  }
2069  else
2070  {
2072  }
2073 
2074  if (m->has_ledgerhashprevious() &&
2075  stringIsUint256Sized(m->ledgerhashprevious()))
2076  {
2077  previousLedgerHash_ = m->ledgerhashprevious();
2079  }
2080  else
2081  {
2083  }
2084  }
2085  if (peerChangedLedgers)
2086  {
2087  JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
2088  }
2089  else
2090  {
2091  JLOG(p_journal_.debug()) << "Status: No ledger";
2092  }
2093  }
2094 
2095  if (m->has_firstseq() && m->has_lastseq())
2096  {
2098 
2099  minLedger_ = m->firstseq();
2100  maxLedger_ = m->lastseq();
2101 
2102  if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
2103  minLedger_ = maxLedger_ = 0;
2104  }
2105 
2106  if (m->has_ledgerseq() &&
2108  {
2109  checkTracking(
2110  m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
2111  }
2112 
2113  app_.getOPs().pubPeerStatus([=]() -> Json::Value {
2115 
2116  if (m->has_newstatus())
2117  {
2118  switch (m->newstatus())
2119  {
2120  case protocol::nsCONNECTING:
2121  j[jss::status] = "CONNECTING";
2122  break;
2123  case protocol::nsCONNECTED:
2124  j[jss::status] = "CONNECTED";
2125  break;
2126  case protocol::nsMONITORING:
2127  j[jss::status] = "MONITORING";
2128  break;
2129  case protocol::nsVALIDATING:
2130  j[jss::status] = "VALIDATING";
2131  break;
2132  case protocol::nsSHUTTING:
2133  j[jss::status] = "SHUTTING";
2134  break;
2135  }
2136  }
2137 
2138  if (m->has_newevent())
2139  {
2140  switch (m->newevent())
2141  {
2142  case protocol::neCLOSING_LEDGER:
2143  j[jss::action] = "CLOSING_LEDGER";
2144  break;
2145  case protocol::neACCEPTED_LEDGER:
2146  j[jss::action] = "ACCEPTED_LEDGER";
2147  break;
2148  case protocol::neSWITCHED_LEDGER:
2149  j[jss::action] = "SWITCHED_LEDGER";
2150  break;
2151  case protocol::neLOST_SYNC:
2152  j[jss::action] = "LOST_SYNC";
2153  break;
2154  }
2155  }
2156 
2157  if (m->has_ledgerseq())
2158  {
2159  j[jss::ledger_index] = m->ledgerseq();
2160  }
2161 
2162  if (m->has_ledgerhash())
2163  {
2164  uint256 closedLedgerHash{};
2165  {
2166  std::lock_guard sl(recentLock_);
2167  closedLedgerHash = closedLedgerHash_;
2168  }
2169  j[jss::ledger_hash] = to_string(closedLedgerHash);
2170  }
2171 
2172  if (m->has_networktime())
2173  {
2174  j[jss::date] = Json::UInt(m->networktime());
2175  }
2176 
2177  if (m->has_firstseq() && m->has_lastseq())
2178  {
2179  j[jss::ledger_index_min] = Json::UInt(m->firstseq());
2180  j[jss::ledger_index_max] = Json::UInt(m->lastseq());
2181  }
2182 
2183  return j;
2184  });
2185 }
2186 
2187 void
2188 PeerImp::checkTracking(std::uint32_t validationSeq)
2189 {
2190  std::uint32_t serverSeq;
2191  {
2192  // Extract the sequence number of the highest
2193  // ledger this peer has
2194  std::lock_guard sl(recentLock_);
2195 
2196  serverSeq = maxLedger_;
2197  }
2198  if (serverSeq != 0)
2199  {
2200  // Compare the peer's ledger sequence to the
2201  // sequence of a recently-validated ledger
2202  checkTracking(serverSeq, validationSeq);
2203  }
2204 }
2205 
2206 void
2207 PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
2208 {
2209  int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
2210 
2211  if (diff < Tuning::convergedLedgerLimit)
2212  {
2213  // The peer's ledger sequence is close to the validation's
2214  tracking_ = Tracking::converged;
2215  }
2216 
2217  if ((diff > Tuning::divergedLedgerLimit) &&
2218  (tracking_.load() != Tracking::diverged))
2219  {
2220  // The peer's ledger sequence is way off the validation's
2221  std::lock_guard sl(recentLock_);
2222 
2223  tracking_ = Tracking::diverged;
2224  trackingTime_ = clock_type::now();
2225  }
2226 }
2227 
2228 void
2230 {
2231  if (!stringIsUint256Sized(m->hash()))
2232  {
2233  fee_ = Resource::feeInvalidRequest;
2234  return;
2235  }
2236 
2237  uint256 const hash{m->hash()};
2238 
2239  if (m->status() == protocol::tsHAVE)
2240  {
2241  std::lock_guard sl(recentLock_);
2242 
2243  if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
2244  recentTxSets_.end())
2245  {
2246  fee_ = Resource::feeUnwantedData;
2247  return;
2248  }
2249 
2250  recentTxSets_.push_back(hash);
2251  }
2252 }
2253 
2254 void
2255 PeerImp::onValidatorListMessage(
2256  std::string const& messageType,
2257  std::string const& manifest,
2258  std::uint32_t version,
2259  std::vector<ValidatorBlobInfo> const& blobs)
2260 {
2261  // If there are no blobs, the message is malformed (possibly because of
2262  // ValidatorList class rules), so charge accordingly and skip processing.
2263  if (blobs.empty())
2264  {
2265  JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
2266  << " from peer " << remote_address_;
2267  // This shouldn't ever happen with a well-behaved peer
2268  fee_ = Resource::feeHighBurdenPeer;
2269  return;
2270  }
2271 
2272  auto const hash = sha512Half(manifest, blobs, version);
2273 
2274  JLOG(p_journal_.debug())
2275  << "Received " << messageType << " from " << remote_address_.to_string()
2276  << " (" << id_ << ")";
2277 
2278  if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
2279  {
2280  JLOG(p_journal_.debug())
2281  << messageType << ": received duplicate " << messageType;
2282  // Charging this fee here won't hurt the peer in the normal
2283  // course of operation (ie. refresh every 5 minutes), but
2284  // will add up if the peer is misbehaving.
2285  fee_ = Resource::feeUnwantedData;
2286  return;
2287  }
2288 
2289  auto const applyResult = app_.validators().applyListsAndBroadcast(
2290  manifest,
2291  version,
2292  blobs,
2293  remote_address_.to_string(),
2294  hash,
2295  app_.overlay(),
2296  app_.getHashRouter(),
2297  app_.getOPs());
2298 
2299  JLOG(p_journal_.debug())
2300  << "Processed " << messageType << " version " << version << " from "
2301  << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
2302  : "unknown or invalid publisher")
2303  << " from " << remote_address_.to_string() << " (" << id_
2304  << ") with best result " << to_string(applyResult.bestDisposition());
2305 
2306  // Act based on the best result
2307  switch (applyResult.bestDisposition())
2308  {
2309  // New list
2310  case ListDisposition::accepted:
2311  // Newest list is expired, and that needs to be broadcast, too
2312  case ListDisposition::expired:
2313  // Future list
2314  case ListDisposition::pending: {
2315  std::lock_guard<std::mutex> sl(recentLock_);
2316 
2317  assert(applyResult.publisherKey);
2318  auto const& pubKey = *applyResult.publisherKey;
2319 #ifndef NDEBUG
2320  if (auto const iter = publisherListSequences_.find(pubKey);
2321  iter != publisherListSequences_.end())
2322  {
2323  assert(iter->second < applyResult.sequence);
2324  }
2325 #endif
2326  publisherListSequences_[pubKey] = applyResult.sequence;
2327  }
2328  break;
2329  case ListDisposition::same_sequence:
2330  case ListDisposition::known_sequence:
2331 #ifndef NDEBUG
2332  {
2333  std::lock_guard<std::mutex> sl(recentLock_);
2334  assert(applyResult.sequence && applyResult.publisherKey);
2335  assert(
2336  publisherListSequences_[*applyResult.publisherKey] <=
2337  applyResult.sequence);
2338  }
2339 #endif // !NDEBUG
2340 
2341  break;
2342  case ListDisposition::stale:
2343  case ListDisposition::untrusted:
2344  case ListDisposition::invalid:
2345  case ListDisposition::unsupported_version:
2346  break;
2347  default:
2348  assert(false);
2349  }
2350 
2351  // Charge based on the worst result
2352  switch (applyResult.worstDisposition())
2353  {
2354  case ListDisposition::accepted:
2355  case ListDisposition::expired:
2356  case ListDisposition::pending:
2357  // No charges for good data
2358  break;
2359  case ListDisposition::same_sequence:
2360  case ListDisposition::known_sequence:
2361  // Charging this fee here won't hurt the peer in the normal
2362  // course of operation (ie. refresh every 5 minutes), but
2363  // will add up if the peer is misbehaving.
2364  fee_ = Resource::feeUnwantedData;
2365  break;
2366  case ListDisposition::stale:
2367  // There are very few good reasons for a peer to send an
2368  // old list, particularly more than once.
2369  fee_ = Resource::feeBadData;
2370  break;
2371  case ListDisposition::untrusted:
2372  // Charging this fee here won't hurt the peer in the normal
2373  // course of operation (ie. refresh every 5 minutes), but
2374  // will add up if the peer is misbehaving.
2375  fee_ = Resource::feeUnwantedData;
2376  break;
2377  case ListDisposition::invalid:
2378  // This shouldn't ever happen with a well-behaved peer
2379  fee_ = Resource::feeInvalidSignature;
2380  break;
2381  case ListDisposition::unsupported_version:
2382  // During a version transition, this may be legitimate.
2383  // If it happens frequently, that's probably bad.
2384  fee_ = Resource::feeBadData;
2385  break;
2386  default:
2387  assert(false);
2388  }
2389 
2390  // Log based on all the results.
2391  for (auto const& [disp, count] : applyResult.dispositions)
2392  {
2393  switch (disp)
2394  {
2395  // New list
2396  case ListDisposition::accepted:
2397  JLOG(p_journal_.debug())
2398  << "Applied " << count << " new " << messageType
2399  << "(s) from peer " << remote_address_;
2400  break;
2401  // Newest list is expired, and that needs to be broadcast, too
2402  case ListDisposition::expired:
2403  JLOG(p_journal_.debug())
2404  << "Applied " << count << " expired " << messageType
2405  << "(s) from peer " << remote_address_;
2406  break;
2407  // Future list
2408  case ListDisposition::pending:
2409  JLOG(p_journal_.debug())
2410  << "Processed " << count << " future " << messageType
2411  << "(s) from peer " << remote_address_;
2412  break;
2413  case ListDisposition::same_sequence:
2414  JLOG(p_journal_.warn())
2415  << "Ignored " << count << " " << messageType
2416  << "(s) with current sequence from peer "
2417  << remote_address_;
2418  break;
2419  case ListDisposition::known_sequence:
2420  JLOG(p_journal_.warn())
2421  << "Ignored " << count << " " << messageType
2422  << "(s) with future sequence from peer " << remote_address_;
2423  break;
2424  case ListDisposition::stale:
2425  JLOG(p_journal_.warn())
2426  << "Ignored " << count << "stale " << messageType
2427  << "(s) from peer " << remote_address_;
2428  break;
2429  case ListDisposition::untrusted:
2430  JLOG(p_journal_.warn())
2431  << "Ignored " << count << " untrusted " << messageType
2432  << "(s) from peer " << remote_address_;
2433  break;
2434  case ListDisposition::unsupported_version:
2435  JLOG(p_journal_.warn())
2436  << "Ignored " << count << "unsupported version "
2437  << messageType << "(s) from peer " << remote_address_;
2438  break;
2439  case ListDisposition::invalid:
2440  JLOG(p_journal_.warn())
2441  << "Ignored " << count << "invalid " << messageType
2442  << "(s) from peer " << remote_address_;
2443  break;
2444  default:
2445  assert(false);
2446  }
2447  }
2448 }
2449 
2450 void
2452 {
2453  try
2454  {
2455  if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2456  {
2457  JLOG(p_journal_.debug())
2458  << "ValidatorList: received validator list from peer using "
2459  << "protocol version " << to_string(protocol_)
2460  << " which shouldn't support this feature.";
2461  fee_ = Resource::feeUnwantedData;
2462  return;
2463  }
2464  onValidatorListMessage(
2465  "ValidatorList",
2466  m->manifest(),
2467  m->version(),
2468  ValidatorList::parseBlobs(*m));
2469  }
2470  catch (std::exception const& e)
2471  {
2472  JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2473  << " from peer " << remote_address_;
2474  fee_ = Resource::feeBadData;
2475  }
2476 }
2477 
2478 void
2479 PeerImp::onMessage(
2481 {
2482  try
2483  {
2484  if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2485  {
2486  JLOG(p_journal_.debug())
2487  << "ValidatorListCollection: received validator list from peer "
2488  << "using protocol version " << to_string(protocol_)
2489  << " which shouldn't support this feature.";
2490  fee_ = Resource::feeUnwantedData;
2491  return;
2492  }
2493  else if (m->version() < 2)
2494  {
2495  JLOG(p_journal_.debug())
2496  << "ValidatorListCollection: received invalid validator list "
2497  "version "
2498  << m->version() << " from peer using protocol version "
2499  << to_string(protocol_);
2500  fee_ = Resource::feeBadData;
2501  return;
2502  }
2503  onValidatorListMessage(
2504  "ValidatorListCollection",
2505  m->manifest(),
2506  m->version(),
2507  ValidatorList::parseBlobs(*m));
2508  }
2509  catch (std::exception const& e)
2510  {
2511  JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2512  << e.what() << " from peer " << remote_address_;
2513  fee_ = Resource::feeBadData;
2514  }
2515 }
2516 
2517 void
2518 PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
2519 {
2520  if (m->validation().size() < 50)
2521  {
2522  JLOG(p_journal_.warn()) << "Validation: Too small";
2523  fee_ = Resource::feeInvalidRequest;
2524  return;
2525  }
2526 
2527  try
2528  {
2529  auto const closeTime = app_.timeKeeper().closeTime();
2530 
2532  {
2533  SerialIter sit(makeSlice(m->validation()));
2534  val = std::make_shared<STValidation>(
2535  std::ref(sit),
2536  [this](PublicKey const& pk) {
2537  return calcNodeID(
2538  app_.validatorManifests().getMasterKey(pk));
2539  },
2540  false);
2541  val->setSeen(closeTime);
2542  }
2543 
2544  if (!isCurrent(
2545  app_.getValidations().parms(),
2546  app_.timeKeeper().closeTime(),
2547  val->getSignTime(),
2548  val->getSeenTime()))
2549  {
2550  JLOG(p_journal_.trace()) << "Validation: Not current";
2551  fee_ = Resource::feeUnwantedData;
2552  return;
2553  }
2554 
2555  // RH TODO: when isTrusted = false we should probably also cache a key
2556  // suppression for 30 seconds to avoid doing a relatively expensive
2557  // lookup every time a spam packet is received
2558  auto const isTrusted =
2559  app_.validators().trusted(val->getSignerPublic());
2560 
2561  // If the operator has specified that untrusted validations be dropped
2562  // then this happens here I.e. before further wasting CPU verifying the
2563  // signature of an untrusted key
2564  if (!isTrusted && app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2565  return;
2566 
2567  auto key = sha512Half(makeSlice(m->validation()));
2568  if (auto [added, relayed] =
2569  app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2570  !added)
2571  {
2572  // Count unique messages (Slots has it's own 'HashRouter'), which a
2573  // peer receives within IDLED seconds since the message has been
2574  // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2575  // connections to peers.
2576  if (reduceRelayReady() && relayed &&
2577  (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2578  overlay_.updateSlotAndSquelch(
2579  key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2580  JLOG(p_journal_.trace()) << "Validation: duplicate";
2581  return;
2582  }
2583 
2584  if (!isTrusted && (tracking_.load() == Tracking::diverged))
2585  {
2586  JLOG(p_journal_.debug())
2587  << "Validation: dropping untrusted from diverged peer";
2588  }
2589  if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal())
2590  {
2591  std::weak_ptr<PeerImp> weak = shared_from_this();
2592  app_.getJobQueue().addJob(
2593  isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2594  "recvValidation->checkValidation",
2595  [weak, val, m](Job&) {
2596  if (auto peer = weak.lock())
2597  peer->checkValidation(val, m);
2598  });
2599  }
2600  else
2601  {
2602  JLOG(p_journal_.debug()) << "Validation: Dropping UNTRUSTED (load)";
2603  }
2604  }
2605  catch (std::exception const& e)
2606  {
2607  JLOG(p_journal_.warn())
2608  << "Exception processing validation: " << e.what();
2609  fee_ = Resource::feeInvalidRequest;
2610  }
2611 }
2612 
2613 void
2615 {
2616  protocol::TMGetObjectByHash& packet = *m;
2617 
2618  JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
2619  << " " << packet.objects_size();
2620 
2621  if (packet.query())
2622  {
2623  // this is a query
2624  if (send_queue_.size() >= Tuning::dropSendQueue)
2625  {
2626  JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2627  return;
2628  }
2629 
2630  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2631  {
2632  doFetchPack(m);
2633  return;
2634  }
2635 
2636  if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2637  {
2638  if (!txReduceRelayEnabled())
2639  {
2640  JLOG(p_journal_.error())
2641  << "TMGetObjectByHash: tx reduce-relay is disabled";
2642  fee_ = Resource::feeInvalidRequest;
2643  return;
2644  }
2645 
2646  std::weak_ptr<PeerImp> weak = shared_from_this();
2647  app_.getJobQueue().addJob(
2648  jtREQUESTED_TXN, "doTransactions", [weak, m](Job&) {
2649  if (auto peer = weak.lock())
2650  peer->doTransactions(m);
2651  });
2652  return;
2653  }
2654 
2655  fee_ = Resource::feeMediumBurdenPeer;
2656 
2657  protocol::TMGetObjectByHash reply;
2658 
2659  reply.set_query(false);
2660 
2661  if (packet.has_seq())
2662  reply.set_seq(packet.seq());
2663 
2664  reply.set_type(packet.type());
2665 
2666  if (packet.has_ledgerhash())
2667  {
2668  if (!stringIsUint256Sized(packet.ledgerhash()))
2669  {
2670  fee_ = Resource::feeInvalidRequest;
2671  return;
2672  }
2673 
2674  reply.set_ledgerhash(packet.ledgerhash());
2675  }
2676 
2677  // This is a very minimal implementation
2678  for (int i = 0; i < packet.objects_size(); ++i)
2679  {
2680  auto const& obj = packet.objects(i);
2681  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2682  {
2683  uint256 const hash{obj.hash()};
2684  // VFALCO TODO Move this someplace more sensible so we dont
2685  // need to inject the NodeStore interfaces.
2686  std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2687  auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2688  if (!nodeObject)
2689  {
2690  if (auto shardStore = app_.getShardStore())
2691  {
2692  if (seq >= shardStore->earliestLedgerSeq())
2693  nodeObject = shardStore->fetchNodeObject(hash, seq);
2694  }
2695  }
2696  if (nodeObject)
2697  {
2698  protocol::TMIndexedObject& newObj = *reply.add_objects();
2699  newObj.set_hash(hash.begin(), hash.size());
2700  newObj.set_data(
2701  &nodeObject->getData().front(),
2702  nodeObject->getData().size());
2703 
2704  if (obj.has_nodeid())
2705  newObj.set_index(obj.nodeid());
2706  if (obj.has_ledgerseq())
2707  newObj.set_ledgerseq(obj.ledgerseq());
2708 
2709  // VFALCO NOTE "seq" in the message is obsolete
2710  }
2711  }
2712  }
2713 
2714  JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2715  << packet.objects_size();
2716  send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2717  }
2718  else
2719  {
2720  // this is a reply
2721  std::uint32_t pLSeq = 0;
2722  bool pLDo = true;
2723  bool progress = false;
2724 
2725  for (int i = 0; i < packet.objects_size(); ++i)
2726  {
2727  const protocol::TMIndexedObject& obj = packet.objects(i);
2728 
2729  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2730  {
2731  if (obj.has_ledgerseq())
2732  {
2733  if (obj.ledgerseq() != pLSeq)
2734  {
2735  if (pLDo && (pLSeq != 0))
2736  {
2737  JLOG(p_journal_.debug())
2738  << "GetObj: Full fetch pack for " << pLSeq;
2739  }
2740  pLSeq = obj.ledgerseq();
2741  pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2742 
2743  if (!pLDo)
2744  {
2745  JLOG(p_journal_.debug())
2746  << "GetObj: Late fetch pack for " << pLSeq;
2747  }
2748  else
2749  progress = true;
2750  }
2751  }
2752 
2753  if (pLDo)
2754  {
2755  uint256 const hash{obj.hash()};
2756 
2757  app_.getLedgerMaster().addFetchPack(
2758  hash,
2759  std::make_shared<Blob>(
2760  obj.data().begin(), obj.data().end()));
2761  }
2762  }
2763  }
2764 
2765  if (pLDo && (pLSeq != 0))
2766  {
2767  JLOG(p_journal_.debug())
2768  << "GetObj: Partial fetch pack for " << pLSeq;
2769  }
2770  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2771  app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2772  }
2773 }
2774 
2775 void
2777 {
2778  if (!txReduceRelayEnabled())
2779  {
2780  JLOG(p_journal_.error())
2781  << "TMHaveTransactions: tx reduce-relay is disabled";
2782  fee_ = Resource::feeInvalidRequest;
2783  return;
2784  }
2785 
2786  std::weak_ptr<PeerImp> weak = shared_from_this();
2787  app_.getJobQueue().addJob(
2788  jtMISSING_TXN, "handleHaveTransactions", [weak, m](Job&) {
2789  if (auto peer = weak.lock())
2790  peer->handleHaveTransactions(m);
2791  });
2792 }
2793 
2794 void
2795 PeerImp::handleHaveTransactions(
2797 {
2798  protocol::TMGetObjectByHash tmBH;
2799  tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2800  tmBH.set_query(true);
2801 
2802  JLOG(p_journal_.trace())
2803  << "received TMHaveTransactions " << m->hashes_size();
2804 
2805  for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2806  {
2807  if (!stringIsUint256Sized(m->hashes(i)))
2808  {
2809  JLOG(p_journal_.error())
2810  << "TMHaveTransactions with invalid hash size";
2811  fee_ = Resource::feeInvalidRequest;
2812  return;
2813  }
2814 
2815  uint256 hash(m->hashes(i));
2816 
2817  auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2818 
2819  JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2820 
2821  if (!txn)
2822  {
2823  JLOG(p_journal_.debug()) << "adding transaction to request";
2824 
2825  auto obj = tmBH.add_objects();
2826  obj->set_hash(hash.data(), hash.size());
2827  }
2828  else
2829  {
2830  // Erase only if a peer has seen this tx. If the peer has not
2831  // seen this tx then the tx could not has been queued for this
2832  // peer.
2833  removeTxQueue(hash);
2834  }
2835  }
2836 
2837  JLOG(p_journal_.trace())
2838  << "transaction request object is " << tmBH.objects_size();
2839 
2840  if (tmBH.objects_size() > 0)
2841  send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2842 }
2843 
2844 void
2845 PeerImp::onMessage(std::shared_ptr<protocol::TMTransactions> const& m)
2846 {
2847  if (!txReduceRelayEnabled())
2848  {
2849  JLOG(p_journal_.error())
2850  << "TMTransactions: tx reduce-relay is disabled";
2851  fee_ = Resource::feeInvalidRequest;
2852  return;
2853  }
2854 
2855  JLOG(p_journal_.trace())
2856  << "received TMTransactions " << m->transactions_size();
2857 
2858  overlay_.addTxMetrics(m->transactions_size());
2859 
2860  for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2861  handleTransaction(
2863  m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2864  false);
2865 }
2866 
2867 void
2868 PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2869 {
2870  using on_message_fn =
2872  if (!strand_.running_in_this_thread())
2873  return post(
2874  strand_,
2875  std::bind(
2876  (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2877 
2878  if (!m->has_validatorpubkey())
2879  {
2880  charge(Resource::feeBadData);
2881  return;
2882  }
2883  auto validator = m->validatorpubkey();
2884  auto const slice{makeSlice(validator)};
2885  if (!publicKeyType(slice))
2886  {
2887  charge(Resource::feeBadData);
2888  return;
2889  }
2890  PublicKey key(slice);
2891 
2892  // Ignore non-validator squelch
2893  if (!app_.validators().listed(key))
2894  {
2895  charge(Resource::feeBadData);
2896  JLOG(p_journal_.debug())
2897  << "onMessage: TMSquelch discarding non-validator squelch "
2898  << slice;
2899  return;
2900  }
2901 
2902  // Ignore the squelch for validator's own messages.
2903  if (key == app_.getValidationPublicKey())
2904  {
2905  JLOG(p_journal_.debug())
2906  << "onMessage: TMSquelch discarding validator's squelch " << slice;
2907  return;
2908  }
2909 
2910  std::uint32_t duration =
2911  m->has_squelchduration() ? m->squelchduration() : 0;
2912  if (!m->squelch())
2913  squelch_.removeSquelch(key);
2914  else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2915  charge(Resource::feeBadData);
2916 
2917  JLOG(p_journal_.debug())
2918  << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2919 }
2920 
2921 //--------------------------------------------------------------------------
2922 
2923 void
2924 PeerImp::addLedger(
2925  uint256 const& hash,
2926  std::lock_guard<std::mutex> const& lockedRecentLock)
2927 {
2928  // lockedRecentLock is passed as a reminder that recentLock_ must be
2929  // locked by the caller.
2930  (void)lockedRecentLock;
2931 
2932  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2933  recentLedgers_.end())
2934  return;
2935 
2936  recentLedgers_.push_back(hash);
2937 }
2938 
2939 void
2940 PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2941 {
2942  // VFALCO TODO Invert this dependency using an observer and shared state
2943  // object. Don't queue fetch pack jobs if we're under load or we already
2944  // have some queued.
2945  if (app_.getFeeTrack().isLoadedLocal() ||
2946  (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2947  (app_.getJobQueue().getJobCount(jtPACK) > 10))
2948  {
2949  JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2950  return;
2951  }
2952 
2953  if (!stringIsUint256Sized(packet->ledgerhash()))
2954  {
2955  JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2956  fee_ = Resource::feeInvalidRequest;
2957  return;
2958  }
2959 
2960  fee_ = Resource::feeHighBurdenPeer;
2961 
2962  uint256 const hash{packet->ledgerhash()};
2963 
2964  std::weak_ptr<PeerImp> weak = shared_from_this();
2965  auto elapsed = UptimeClock::now();
2966  auto const pap = &app_;
2967  app_.getJobQueue().addJob(
2968  jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed](Job&) {
2969  pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2970  });
2971 }
2972 
2973 void
2974 PeerImp::doTransactions(
2976 {
2977  protocol::TMTransactions reply;
2978 
2979  JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
2980  << packet->objects_size();
2981 
2982  if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2983  {
2984  JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2985  fee_ = Resource::feeInvalidRequest;
2986  return;
2987  }
2988 
2989  for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2990  {
2991  auto const& obj = packet->objects(i);
2992 
2993  if (!stringIsUint256Sized(obj.hash()))
2994  {
2995  fee_ = Resource::feeInvalidRequest;
2996  return;
2997  }
2998 
2999  uint256 hash(obj.hash());
3000 
3001  auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
3002 
3003  if (!txn)
3004  {
3005  JLOG(p_journal_.error()) << "doTransactions, transaction not found "
3006  << Slice(hash.data(), hash.size());
3007  fee_ = Resource::feeInvalidRequest;
3008  return;
3009  }
3010 
3011  Serializer s;
3012  auto tx = reply.add_transactions();
3013  auto sttx = txn->getSTransaction();
3014  sttx->add(s);
3015  tx->set_rawtransaction(s.data(), s.size());
3016  tx->set_status(
3017  txn->getStatus() == INCLUDED ? protocol::tsCURRENT
3018  : protocol::tsNEW);
3019  tx->set_receivetimestamp(
3020  app_.timeKeeper().now().time_since_epoch().count());
3021  tx->set_deferred(txn->getSubmitResult().queued);
3022  }
3023 
3024  if (reply.transactions_size() > 0)
3025  send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
3026 }
3027 
3028 void
3029 PeerImp::checkTransaction(
3030  int flags,
3031  bool checkSignature,
3032  std::shared_ptr<STTx const> const& stx)
3033 {
3034  // VFALCO TODO Rewrite to not use exceptions
3035  try
3036  {
3037  // Expired?
3038  if (stx->isFieldPresent(sfLastLedgerSequence) &&
3039  (stx->getFieldU32(sfLastLedgerSequence) <
3040  app_.getLedgerMaster().getValidLedgerIndex()))
3041  {
3042  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
3043  charge(Resource::feeUnwantedData);
3044  return;
3045  }
3046 
3047  if (checkSignature)
3048  {
3049  // Check the signature before handing off to the job queue.
3050  if (auto [valid, validReason] = checkValidity(
3051  app_.getHashRouter(),
3052  *stx,
3053  app_.getLedgerMaster().getValidatedRules(),
3054  app_.config());
3055  valid != Validity::Valid)
3056  {
3057  if (!validReason.empty())
3058  {
3059  JLOG(p_journal_.trace())
3060  << "Exception checking transaction: " << validReason;
3061  }
3062 
3063  // Probably not necessary to set SF_BAD, but doesn't hurt.
3064  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
3065  charge(Resource::feeInvalidSignature);
3066  return;
3067  }
3068  }
3069  else
3070  {
3071  forceValidity(
3072  app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
3073  }
3074 
3075  std::string reason;
3076  auto tx = std::make_shared<Transaction>(stx, reason, app_);
3077 
3078  if (tx->getStatus() == INVALID)
3079  {
3080  if (!reason.empty())
3081  {
3082  JLOG(p_journal_.trace())
3083  << "Exception checking transaction: " << reason;
3084  }
3085  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
3086  charge(Resource::feeInvalidSignature);
3087  return;
3088  }
3089 
3090  bool const trusted(flags & SF_TRUSTED);
3091  app_.getOPs().processTransaction(
3092  tx, trusted, false, NetworkOPs::FailHard::no);
3093  }
3094  catch (std::exception const&)
3095  {
3096  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
3097  charge(Resource::feeBadData);
3098  }
3099 }
3100 
3101 // Called from our JobQueue
3102 void
3103 PeerImp::checkPropose(
3104  Job& job,
3106  RCLCxPeerPos peerPos)
3107 {
3108  bool isTrusted = (job.getType() == jtPROPOSAL_t);
3109 
3110  JLOG(p_journal_.trace())
3111  << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
3112 
3113  assert(packet);
3114 
3115  if (!cluster() && !peerPos.checkSign())
3116  {
3117  JLOG(p_journal_.warn()) << "Proposal fails sig check";
3118  charge(Resource::feeInvalidSignature);
3119  return;
3120  }
3121 
3122  bool relay;
3123 
3124  if (isTrusted)
3125  relay = app_.getOPs().processTrustedProposal(peerPos);
3126  else
3127  relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
3128 
3129  if (relay)
3130  {
3131  // haveMessage contains peers, which are suppressed; i.e. the peers
3132  // are the source of the message, consequently the message should
3133  // not be relayed to these peers. But the message must be counted
3134  // as part of the squelch logic.
3135  auto haveMessage = app_.overlay().relay(
3136  *packet, peerPos.suppressionID(), peerPos.publicKey());
3137  if (reduceRelayReady() && !haveMessage.empty())
3138  overlay_.updateSlotAndSquelch(
3139  peerPos.suppressionID(),
3140  peerPos.publicKey(),
3141  std::move(haveMessage),
3142  protocol::mtPROPOSE_LEDGER);
3143  }
3144 }
3145 
3146 void
3147 PeerImp::checkValidation(
3148  std::shared_ptr<STValidation> const& val,
3150 {
3151  if (!cluster() && !val->isValid())
3152  {
3153  JLOG(p_journal_.debug()) << "Validation forwarded by peer is invalid";
3154  charge(Resource::feeInvalidRequest);
3155  return;
3156  }
3157 
3158  // FIXME it should be safe to remove this try/catch. Investigate codepaths.
3159  try
3160  {
3161  if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
3162  cluster())
3163  {
3164  auto const suppression =
3165  sha512Half(makeSlice(val->getSerialized()));
3166  // haveMessage contains peers, which are suppressed; i.e. the peers
3167  // are the source of the message, consequently the message should
3168  // not be relayed to these peers. But the message must be counted
3169  // as part of the squelch logic.
3170  auto haveMessage =
3171  overlay_.relay(*packet, suppression, val->getSignerPublic());
3172  if (reduceRelayReady() && !haveMessage.empty())
3173  {
3174  overlay_.updateSlotAndSquelch(
3175  suppression,
3176  val->getSignerPublic(),
3177  std::move(haveMessage),
3178  protocol::mtVALIDATION);
3179  }
3180  }
3181  }
3182  catch (std::exception const&)
3183  {
3184  JLOG(p_journal_.trace()) << "Exception processing validation";
3185  charge(Resource::feeInvalidRequest);
3186  }
3187 }
3188 
3189 // Returns the set of peers that can help us get
3190 // the TX tree with the specified root hash.
3191 //
3193 getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
3194 {
3196  int retScore = 0;
3197 
3198  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
3199  if (p->hasTxSet(rootHash) && p.get() != skip)
3200  {
3201  auto score = p->getScore(true);
3202  if (!ret || (score > retScore))
3203  {
3204  ret = std::move(p);
3205  retScore = score;
3206  }
3207  }
3208  });
3209 
3210  return ret;
3211 }
3212 
3213 // Returns a random peer weighted by how likely to
3214 // have the ledger and how responsive it is.
3215 //
3218  OverlayImpl& ov,
3219  uint256 const& ledgerHash,
3220  LedgerIndex ledger,
3221  PeerImp const* skip)
3222 {
3224  int retScore = 0;
3225 
3226  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
3227  if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3228  {
3229  auto score = p->getScore(true);
3230  if (!ret || (score > retScore))
3231  {
3232  ret = std::move(p);
3233  retScore = score;
3234  }
3235  }
3236  });
3237 
3238  return ret;
3239 }
3240 
3241 void
3242 PeerImp::sendLedgerBase(
3243  std::shared_ptr<Ledger const> const& ledger,
3244  protocol::TMLedgerData& ledgerData)
3245 {
3246  JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
3247 
3248  Serializer s(sizeof(LedgerInfo));
3249  addRaw(ledger->info(), s);
3250  ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
3251 
3252  auto const& stateMap{ledger->stateMap()};
3253  if (stateMap.getHash() != beast::zero)
3254  {
3255  // Return account state root node if possible
3256  Serializer root(768);
3257 
3258  stateMap.serializeRoot(root);
3259  ledgerData.add_nodes()->set_nodedata(
3260  root.getDataPtr(), root.getLength());
3261 
3262  if (ledger->info().txHash != beast::zero)
3263  {
3264  auto const& txMap{ledger->txMap()};
3265  if (txMap.getHash() != beast::zero)
3266  {
3267  // Return TX root node if possible
3268  root.erase();
3269  txMap.serializeRoot(root);
3270  ledgerData.add_nodes()->set_nodedata(
3271  root.getDataPtr(), root.getLength());
3272  }
3273  }
3274  }
3275 
3276  auto message{
3277  std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3278  send(message);
3279 }
3280 
3282 PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
3283 {
3284  JLOG(p_journal_.trace()) << "getLedger: Ledger";
3285 
3287 
3288  if (m->has_ledgerhash())
3289  {
3290  // Attempt to find ledger by hash
3291  uint256 const ledgerHash{m->ledgerhash()};
3292  ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3293  if (!ledger)
3294  {
3295  if (m->has_ledgerseq())
3296  {
3297  // Attempt to find ledger by sequence in the shard store
3298  if (auto shards = app_.getShardStore())
3299  {
3300  if (m->ledgerseq() >= shards->earliestLedgerSeq())
3301  {
3302  ledger =
3303  shards->fetchLedger(ledgerHash, m->ledgerseq());
3304  }
3305  }
3306  }
3307 
3308  if (!ledger)
3309  {
3310  JLOG(p_journal_.trace())
3311  << "getLedger: Don't have ledger with hash " << ledgerHash;
3312 
3313  if (m->has_querytype() && !m->has_requestcookie())
3314  {
3315  // Attempt to relay the request to a peer
3316  if (auto const peer = getPeerWithLedger(
3317  overlay_,
3318  ledgerHash,
3319  m->has_ledgerseq() ? m->ledgerseq() : 0,
3320  this))
3321  {
3322  m->set_requestcookie(id());
3323  peer->send(std::make_shared<Message>(
3324  *m, protocol::mtGET_LEDGER));
3325  JLOG(p_journal_.debug())
3326  << "getLedger: Request relayed to peer";
3327  return ledger;
3328  }
3329 
3330  JLOG(p_journal_.trace())
3331  << "getLedger: Failed to find peer to relay request";
3332  }
3333  }
3334  }
3335  }
3336  else if (m->has_ledgerseq())
3337  {
3338  // Attempt to find ledger by sequence
3339  if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3340  {
3341  JLOG(p_journal_.debug())
3342  << "getLedger: Early ledger sequence request";
3343  }
3344  else
3345  {
3346  ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3347  if (!ledger)
3348  {
3349  JLOG(p_journal_.debug())
3350  << "getLedger: Don't have ledger with sequence "
3351  << m->ledgerseq();
3352  }
3353  }
3354  }
3355  else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3356  {
3357  ledger = app_.getLedgerMaster().getClosedLedger();
3358  }
3359 
3360  if (ledger)
3361  {
3362  // Validate retrieved ledger sequence
3363  auto const ledgerSeq{ledger->info().seq};
3364  if (m->has_ledgerseq())
3365  {
3366  if (ledgerSeq != m->ledgerseq())
3367  {
3368  // Do not resource charge a peer responding to a relay
3369  if (!m->has_requestcookie())
3370  charge(Resource::feeInvalidRequest);
3371 
3372  ledger.reset();
3373  JLOG(p_journal_.warn())
3374  << "getLedger: Invalid ledger sequence " << ledgerSeq;
3375  }
3376  }
3377  else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3378  {
3379  ledger.reset();
3380  JLOG(p_journal_.debug())
3381  << "getLedger: Early ledger sequence request " << ledgerSeq;
3382  }
3383  }
3384  else
3385  {
3386  JLOG(p_journal_.warn()) << "getLedger: Unable to find ledger";
3387  }
3388 
3389  return ledger;
3390 }
3391 
3393 PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3394 {
3395  JLOG(p_journal_.trace()) << "getTxSet: TX set";
3396 
3397  uint256 const txSetHash{m->ledgerhash()};
3398  std::shared_ptr<SHAMap> shaMap{
3399  app_.getInboundTransactions().getSet(txSetHash, false)};
3400  if (!shaMap)
3401  {
3402  if (m->has_querytype() && !m->has_requestcookie())
3403  {
3404  // Attempt to relay the request to a peer
3405  if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3406  {
3407  m->set_requestcookie(id());
3408  peer->send(
3409  std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3410  JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3411  }
3412  else
3413  {
3414  JLOG(p_journal_.debug())
3415  << "getTxSet: Failed to find relay peer";
3416  }
3417  }
3418  else
3419  {
3420  JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3421  }
3422  }
3423 
3424  return shaMap;
3425 }
3426 
3427 void
3428 PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3429 {
3430  // Do not resource charge a peer responding to a relay
3431  if (!m->has_requestcookie())
3432  charge(Resource::feeMediumBurdenPeer);
3433 
3436  SHAMap const* map{nullptr};
3437  protocol::TMLedgerData ledgerData;
3438  bool fatLeaves{true};
3439  auto const itype{m->itype()};
3440 
3441  if (itype == protocol::liTS_CANDIDATE)
3442  {
3443  if (sharedMap = getTxSet(m); !sharedMap)
3444  return;
3445  map = sharedMap.get();
3446 
3447  // Fill out the reply
3448  ledgerData.set_ledgerseq(0);
3449  ledgerData.set_ledgerhash(m->ledgerhash());
3450  ledgerData.set_type(protocol::liTS_CANDIDATE);
3451  if (m->has_requestcookie())
3452  ledgerData.set_requestcookie(m->requestcookie());
3453 
3454  // We'll already have most transactions
3455  fatLeaves = false;
3456  }
3457  else
3458  {
3459  if (send_queue_.size() >= Tuning::dropSendQueue)
3460  {
3461  JLOG(p_journal_.debug())
3462  << "processLedgerRequest: Large send queue";
3463  return;
3464  }
3465  if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3466  {
3467  JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3468  return;
3469  }
3470 
3471  if (ledger = getLedger(m); !ledger)
3472  return;
3473 
3474  // Fill out the reply
3475  auto const ledgerHash{ledger->info().hash};
3476  ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3477  ledgerData.set_ledgerseq(ledger->info().seq);
3478  ledgerData.set_type(itype);
3479  if (m->has_requestcookie())
3480  ledgerData.set_requestcookie(m->requestcookie());
3481 
3482  switch (itype)
3483  {
3484  case protocol::liBASE:
3485  sendLedgerBase(ledger, ledgerData);
3486  return;
3487 
3488  case protocol::liTX_NODE:
3489  map = &ledger->txMap();
3490  JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash "
3491  << to_string(map->getHash());
3492  break;
3493 
3494  case protocol::liAS_NODE:
3495  map = &ledger->stateMap();
3496  JLOG(p_journal_.trace())
3497  << "processLedgerRequest: Account state map hash "
3498  << to_string(map->getHash());
3499  break;
3500 
3501  default:
3502  // This case should not be possible here
3503  JLOG(p_journal_.error())
3504  << "processLedgerRequest: Invalid ledger info type";
3505  return;
3506  }
3507  }
3508 
3509  if (!map)
3510  {
3511  JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3512  return;
3513  }
3514 
3515  // Add requested node data to reply
3516  if (m->nodeids_size() > 0)
3517  {
3518  auto const queryDepth{
3519  m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3520  std::vector<SHAMapNodeID> nodeIds;
3521  std::vector<Blob> rawNodes;
3522 
3523  for (int i = 0; i < m->nodeids_size() &&
3524  ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3525  ++i)
3526  {
3527  auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3528 
3529  nodeIds.clear();
3530  rawNodes.clear();
3531  try
3532  {
3533  if (map->getNodeFat(
3534  *shaMapNodeId,
3535  nodeIds,
3536  rawNodes,
3537  fatLeaves,
3538  queryDepth))
3539  {
3540  assert(nodeIds.size() == rawNodes.size());
3541  JLOG(p_journal_.trace())
3542  << "processLedgerRequest: getNodeFat got "
3543  << rawNodes.size() << " nodes";
3544 
3545  auto rawNodeIter{rawNodes.begin()};
3546  for (auto const& nodeId : nodeIds)
3547  {
3548  protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3549  node->set_nodeid(nodeId.getRawString());
3550  node->set_nodedata(
3551  &rawNodeIter->front(), rawNodeIter->size());
3552  ++rawNodeIter;
3553  }
3554  }
3555  else
3556  {
3557  JLOG(p_journal_.warn())
3558  << "processLedgerRequest: getNodeFat returns false";
3559  }
3560  }
3561  catch (std::exception& e)
3562  {
3563  std::string info;
3564  switch (itype)
3565  {
3566  case protocol::liBASE:
3567  // This case should not be possible here
3568  info = "Ledger base";
3569  break;
3570 
3571  case protocol::liTX_NODE:
3572  info = "TX node";
3573  break;
3574 
3575  case protocol::liAS_NODE:
3576  info = "AS node";
3577  break;
3578 
3579  case protocol::liTS_CANDIDATE:
3580  info = "TS candidate";
3581  break;
3582 
3583  default:
3584  info = "Invalid";
3585  break;
3586  }
3587 
3588  if (!m->has_ledgerhash())
3589  info += ", no hash specified";
3590 
3591  JLOG(p_journal_.error())
3592  << "processLedgerRequest: getNodeFat with nodeId "
3593  << *shaMapNodeId << " and ledger info type " << info
3594  << " throws exception: " << e.what();
3595  }
3596  }
3597 
3598  JLOG(p_journal_.info())
3599  << "processLedgerRequest: Got request for " << m->nodeids_size()
3600  << " nodes at depth " << queryDepth << ", return "
3601  << ledgerData.nodes_size() << " nodes";
3602  }
3603 
3604  auto message{
3605  std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3606  send(message);
3607 }
3608 
3609 int
3610 PeerImp::getScore(bool haveItem) const
3611 {
3612  // Random component of score, used to break ties and avoid
3613  // overloading the "best" peer
3614  static const int spRandomMax = 9999;
3615 
3616  // Score for being very likely to have the thing we are
3617  // look for; should be roughly spRandomMax
3618  static const int spHaveItem = 10000;
3619 
3620  // Score reduction for each millisecond of latency; should
3621  // be roughly spRandomMax divided by the maximum reasonable
3622  // latency
3623  static const int spLatency = 30;
3624 
3625  // Penalty for unknown latency; should be roughly spRandomMax
3626  static const int spNoLatency = 8000;
3627 
3628  int score = rand_int(spRandomMax);
3629 
3630  if (haveItem)
3631  score += spHaveItem;
3632 
3634  {
3635  std::lock_guard sl(recentLock_);
3636  latency = latency_;
3637  }
3638 
3639  if (latency)
3640  score -= latency->count() * spLatency;
3641  else
3642  score -= spNoLatency;
3643 
3644  return score;
3645 }
3646 
3647 bool
3648 PeerImp::isHighLatency() const
3649 {
3650  std::lock_guard sl(recentLock_);
3651  return latency_ >= peerHighLatency;
3652 }
3653 
3654 bool
3655 PeerImp::reduceRelayReady()
3656 {
3657  if (!reduceRelayReady_)
3658  reduceRelayReady_ =
3659  reduce_relay::epoch<std::chrono::minutes>(UptimeClock::now()) >
3660  reduce_relay::WAIT_ON_BOOTUP;
3661  return vpReduceRelayEnabled_ && reduceRelayReady_;
3662 }
3663 
3664 void
3665 PeerImp::Metrics::add_message(std::uint64_t bytes)
3666 {
3667  using namespace std::chrono_literals;
3668  std::unique_lock lock{mutex_};
3669 
3670  totalBytes_ += bytes;
3671  accumBytes_ += bytes;
3672  auto const timeElapsed = clock_type::now() - intervalStart_;
3673  auto const timeElapsedInSecs =
3674  std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3675 
3676  if (timeElapsedInSecs >= 1s)
3677  {
3678  auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3679  rollingAvg_.push_back(avgBytes);
3680 
3681  auto const totalBytes =
3682  std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3683  rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3684 
3685  intervalStart_ = clock_type::now();
3686  accumBytes_ = 0;
3687  }
3688 }
3689 
3691 PeerImp::Metrics::average_bytes() const
3692 {
3693  std::shared_lock lock{mutex_};
3694  return rollingAvgBytes_;
3695 }
3696 
3698 PeerImp::Metrics::total_bytes() const
3699 {
3700  std::shared_lock lock{mutex_};
3701  return totalBytes_;
3702 }
3703 
3704 } // namespace ripple
ripple::PublicKey::data
std::uint8_t const * data() const noexcept
Definition: PublicKey.h:81
ripple::PeerImp::latency_
std::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:114
ripple::PeerImp::ledgerRange
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:541
ripple::PeerImp::uptime
clock_type::duration uptime() const
Definition: PeerImp.h:352
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:115
ripple::ClusterNode
Definition: ClusterNode.h:30
ripple::jtTRANSACTION
@ jtTRANSACTION
Definition: Job.h:62
ripple::PeerImp::inbound_
const bool inbound_
Definition: PeerImp.h:91
ripple::TrafficCount::categorize
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
Definition: TrafficCount.cpp:25
sstream
ripple::PeerImp::recentLock_
std::mutex recentLock_
Definition: PeerImp.h:149
ripple::HashRouter::addSuppressionPeerWithStatus
std::pair< bool, std::optional< Stopwatch::time_point > > addSuppressionPeerWithStatus(uint256 const &key, PeerShortID peer)
Add a suppression peer and get message's relay status.
Definition: HashRouter.cpp:57
ripple::RCLCxPeerPos
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:42
std::weak_ptr::lock
T lock(T... args)
std::for_each
T for_each(T... args)
ripple::PeerImp::stream_ptr_
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:78
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::Application::cluster
virtual Cluster & cluster()=0
ripple::PeerImp::socket_
socket_type & socket_
Definition: PeerImp.h:79
std::bind
T bind(T... args)
ripple::PeerImp::trackingTime_
clock_type::time_point trackingTime_
Definition: PeerImp.h:97
ripple::ShardState::complete
@ complete
ripple::HashRouter::addSuppressionPeer
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:51
std::string
STL class.
ripple::Resource::feeMediumBurdenPeer
const Charge feeMediumBurdenPeer
std::shared_ptr
STL class.
ripple::Resource::Consumer::disconnect
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:117
ripple::PeerImp::addTxQueue
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition: PeerImp.cpp:314
ripple::PeerImp::onMessage
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1057
ripple::ManifestCache::getMasterKey
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: app/misc/impl/Manifest.cpp:296
ripple::Overlay::Setup::networkID
std::optional< std::uint32_t > networkID
Definition: Overlay.h:75
std::exception
STL class.
ripple::PeerImp::hasTxSet
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:550
ripple::calcNodeID
NodeID calcNodeID(PublicKey const &pk)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:299
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::PeerImp::strand_
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:81
ripple::PeerImp::recentLedgers_
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:111
ripple::PeerImp::ledgerReplayEnabled_
bool ledgerReplayEnabled_
Definition: PeerImp.h:181
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::PeerImp::request_
http_request_type request_
Definition: PeerImp.h:155
ripple::Resource::Gossip
Data format for exchanging consumption information across peers.
Definition: Gossip.h:29
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:44
ripple::PeerImp::~PeerImp
virtual ~PeerImp()
Definition: PeerImp.cpp:134
ripple::Serializer::erase
void erase()
Definition: Serializer.h:209
ripple::relayLimit
static constexpr std::uint32_t relayLimit
Definition: ripple/overlay/Peer.h:36
beast::IP::Endpoint::to_string
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:54
std::pair
ripple::jtMISSING_TXN
@ jtMISSING_TXN
Definition: Job.h:63
ripple::PeerImp::doAccept
void doAccept()
Definition: PeerImp.cpp:772
std::vector::reserve
T reserve(T... args)
ripple::OverlayImpl::updateSlotAndSquelch
void updateSlotAndSquelch(uint256 const &key, PublicKey const &validator, std::set< Peer::id_t > &&peers, protocol::MessageType type)
Updates message count for validator/peer.
Definition: OverlayImpl.cpp:1484
ripple::HashRouter::shouldProcess
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:78
ripple::PeerImp::handleTransaction
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue)
Called from onMessage(TMTransaction(s)).
Definition: PeerImp.cpp:1519
ripple::HashPrefix::manifest
@ manifest
Manifest.
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:214
ripple::OverlayImpl::endOfPeerChain
void endOfPeerChain(std::uint32_t id)
Called when the reply from the last peer in a peer chain is received.
Definition: OverlayImpl.cpp:774
ripple::addRaw
void addRaw(LedgerInfo const &info, Serializer &s, bool includeHash)
Definition: View.cpp:164
Json::UInt
unsigned int UInt
Definition: json_forwards.h:27
ripple::verify
bool verify(PublicKey const &publicKey, Slice const &m, Slice const &sig, bool mustBeFullyCanonical) noexcept
Verify a signature on a message.
Definition: PublicKey.cpp:268
ripple::PeerImp::doProtocolStart
void doProtocolStart()
Definition: PeerImp.cpp:855
std::vector
STL class.
std::find
T find(T... args)
std::string::size
T size(T... args)
ripple::PeerImp::recentTxSets_
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:112
ripple::PublicKey::empty
bool empty() const noexcept
Definition: PublicKey.h:117
ripple::PeerImp::sendTxQueue
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition: PeerImp.cpp:295
ripple::make_protocol
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
Definition: ProtocolVersion.h:40
ripple::PeerImp::txReduceRelayEnabled
bool txReduceRelayEnabled() const override
Definition: PeerImp.h:429
ripple::FEATURE_LEDGER_REPLAY
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition: Handshake.h:148
std::chrono::milliseconds
ripple::PeerImp::setTimer
void setTimer()
Definition: PeerImp.cpp:660
ripple::ProtocolFeature::LedgerReplay
@ LedgerReplay
ripple::OverlayImpl::incPeerDisconnectCharges
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:380
ripple::toBase58
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:29
beast::IP::Endpoint::address
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:76
ripple::PeerImp::getVersion
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:372
std::set::emplace
T emplace(T... args)
std::stringstream
STL class.
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::shared_ptr::get
T get(T... args)
std::lock_guard
STL class.
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerImp::close
void close()
Definition: PeerImp.cpp:578
ripple::PeerImp::charge
void charge(Resource::Charge const &fee) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:343
ripple::PeerImp::onMessageUnknown
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:1008
ripple::makeSharedValue
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:145
ripple::Cluster::member
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:39
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::stopwatch
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:88
std::setfill
T setfill(T... args)
ripple::PeerImp::journal_
const beast::Journal journal_
Definition: PeerImp.h:76
ripple::PeerImp::send
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:241
ripple::Application::timeKeeper
virtual TimeKeeper & timeKeeper()=0
ripple::OverlayImpl::setup
Setup const & setup() const
Definition: OverlayImpl.h:178
ripple::ProtocolFeature
ProtocolFeature
Definition: ripple/overlay/Peer.h:38
ripple::PeerImp::onTimer
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:695
ripple::ShardState
ShardState
Shard states.
Definition: nodestore/Types.h:60
ripple::Cluster::update
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:58
ripple::PeerImp::lastPingTime_
clock_type::time_point lastPingTime_
Definition: PeerImp.h:116
ripple::OverlayImpl::incJqTransOverflow
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:356
ripple::PeerImp
Definition: PeerImp.h:52
ripple::PeerFinder::Config::peerPrivate
bool peerPrivate
true if we want our IP address kept private.
Definition: PeerfinderManager.h:61
ripple::Config::MAX_TRANSACTIONS
int MAX_TRANSACTIONS
Definition: Config.h:210
ripple::PeerImp::previousLedgerHash_
uint256 previousLedgerHash_
Definition: PeerImp.h:109
ripple::FEATURE_VPRR
static constexpr char FEATURE_VPRR[]
Definition: Handshake.h:144
std::optional::reset
T reset(T... args)
ripple::PeerImp::txQueue_
hash_set< uint256 > txQueue_
Definition: PeerImp.h:175
ripple::base_uint::data
pointer data()
Definition: base_uint.h:115
algorithm
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::PeerImp::name_
std::string name_
Definition: PeerImp.h:101
ripple::PeerFinder::Manager::on_endpoints
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
ripple::forceValidity
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:89
std::vector::clear
T clear(T... args)
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::Application::getFeeTrack
virtual LoadFeeTrack & getFeeTrack()=0
ripple::base_uint< 256 >::size
constexpr static std::size_t size()
Definition: base_uint.h:498
ripple::ValidatorList::sendValidatorList
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
Definition: ValidatorList.cpp:749
ripple::getPeerWithLedger
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:3217
ripple::PeerImp::publicKey_
const PublicKey publicKey_
Definition: PeerImp.h:100
ripple::protocolMessageName
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
Definition: ProtocolMessage.h:62
ripple::Serializer::data
void const * data() const noexcept
Definition: Serializer.h:75
ripple::PeerImp::read_buffer_
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:154
ripple::PeerImp::error_code
boost::system::error_code error_code
Definition: PeerImp.h:62
ripple::JobQueue::getJobCount
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:111
ripple::FEATURE_TXRR
static constexpr char FEATURE_TXRR[]
Definition: Handshake.h:146
std::tie
T tie(T... args)
ripple::PeerImp::remote_address_
const beast::IP::Endpoint remote_address_
Definition: PeerImp.h:86
ripple::publicKeyType
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:203
ripple::jtTXN_DATA
@ jtTXN_DATA
Definition: Job.h:68
ripple::PeerFinder::Manager::on_closed
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
ripple::OverlayImpl::peerFinder
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:166
ripple::getPeerWithTree
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:3193
ripple::base_uint< 256 >
ripple::INCLUDED
@ INCLUDED
Definition: Transaction.h:48
ripple::LoadFeeTrack::isLoadedLocal
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:126
ripple::PeerImp::addLedger
void addLedger(uint256 const &hash, std::lock_guard< std::mutex > const &lockedRecentLock)
Definition: PeerImp.cpp:2924
ripple::Config::RELAY_UNTRUSTED_PROPOSALS
int RELAY_UNTRUSTED_PROPOSALS
Definition: Config.h:152
ripple::Resource::feeInvalidSignature
const Charge feeInvalidSignature
ripple::OverlayImpl::onManifests
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Definition: OverlayImpl.cpp:625
ripple::Overlay::Setup::public_ip
beast::IP::Address public_ip
Definition: Overlay.h:72
std::enable_shared_from_this< PeerImp >::shared_from_this
T shared_from_this(T... args)
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NetworkOPs::isNeedNetworkLedger
virtual bool isNeedNetworkLedger()=0
ripple::Resource::drop
@ drop
Definition: Disposition.h:37
ripple::checkValidity
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
ripple::jtPROPOSAL_t
@ jtPROPOSAL_t
Definition: Job.h:73
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:511
ripple::OverlayImpl::resourceManager
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:172
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::LedgerReplayMsgHandler::processProofPathResponse
bool processProofPathResponse(std::shared_ptr< protocol::TMProofPathResponse > const &msg)
Process TMProofPathResponse.
Definition: LedgerReplayMsgHandler.cpp:105
ripple::PeerImp::gracefulClose
void gracefulClose()
Definition: PeerImp.cpp:639
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
std::atomic::load
T load(T... args)
ripple::Resource::feeBadData
const Charge feeBadData
ripple::PublicKey::size
std::size_t size() const noexcept
Definition: PublicKey.h:87
ripple::Serializer::getDataPtr
const void * getDataPtr() const
Definition: Serializer.h:189
ripple::Resource::Manager::importConsumers
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
ripple::PeerImp::closedLedgerHash_
uint256 closedLedgerHash_
Definition: PeerImp.h:108
ripple::PeerImp::detaching_
bool detaching_
Definition: PeerImp.h:98
ripple::PeerImp::onMessageEnd
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:1048
ripple::Application::config
virtual Config & config()=0
ripple::PeerImp::shardInfos_
hash_map< PublicKey, NodeStore::ShardInfo > shardInfos_
Definition: PeerImp.h:167
ripple::isCurrent
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:146
beast::Journal::active
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:301
ripple::PeerImp::stream_
stream_type & stream_
Definition: PeerImp.h:80
ripple::PeerImp::onWriteMessage
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:954
std::unique_lock
STL class.
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::InfoSub::Source::pubPeerStatus
virtual void pubPeerStatus(std::function< Json::Value(void)> const &)=0
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::Tuning::hardMaxReplyNodes
@ hardMaxReplyNodes
The hard cap on the number of ledger entries in a single reply.
Definition: overlay/impl/Tuning.h:42
ripple::jtVALIDATION_t
@ jtVALIDATION_t
Definition: Job.h:70
ripple::reduce_relay::IDLED
static constexpr auto IDLED
Definition: ReduceRelayCommon.h:39
ripple::PeerImp::hasRange
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:568
ripple::Resource::feeUnwantedData
const Charge feeUnwantedData
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::Resource::Gossip::items
std::vector< Item > items
Definition: Gossip.h:42
ripple::PeerImp::cycleStatus
void cycleStatus() override
Definition: PeerImp.cpp:558
ripple::set
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:313
ripple::PeerImp::app_
Application & app_
Definition: PeerImp.h:72
ripple::PeerImp::crawl
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:357
ripple::PeerImp::minLedger_
LedgerIndex minLedger_
Definition: PeerImp.h:106
ripple::FEATURE_COMPR
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:142
ripple::Serializer::slice
Slice slice() const noexcept
Definition: Serializer.h:63
ripple::PeerImp::ledgerReplayMsgHandler_
LedgerReplayMsgHandler ledgerReplayMsgHandler_
Definition: PeerImp.h:182
ripple::base64_decode
std::string base64_decode(std::string const &data)
Definition: base64.cpp:245
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::ShardState::finalized
@ finalized
ripple::PeerImp::hasLedger
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:515
ripple::PeerImp::Tracking::unknown
@ unknown
ripple::Resource::Consumer::balance
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:129
ripple::HashPrefix::proposal
@ proposal
proposal for signing
ripple::TimeKeeper::closeTime
virtual time_point closeTime() const =0
Returns the close time, in network time.
ripple::Job
Definition: Job.h:94
ripple::PeerImp::headers_
boost::beast::http::fields const & headers_
Definition: PeerImp.h:157
std::accumulate
T accumulate(T... args)
ripple::SerialIter
Definition: Serializer.h:310
ripple::PeerImp::metrics_
struct ripple::PeerImp::@13 metrics_
ripple::peerFeatureEnabled
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:199
ripple::PeerImp::reduceRelayReady
bool reduceRelayReady()
Definition: PeerImp.cpp:3655
std::uint32_t
ripple::PeerImp::send_queue_
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:158
ripple::PeerImp::slot_
const std::shared_ptr< PeerFinder::Slot > slot_
Definition: PeerImp.h:153
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:198
ripple::PeerImp::load_event_
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:161
ripple::ShardState::finalizing
@ finalizing
std::map
STL class.
ripple::PeerImp::protocol_
ProtocolVersion protocol_
Definition: PeerImp.h:94
ripple::Application::getValidationPublicKey
virtual PublicKey const & getValidationPublicKey() const =0
ripple::Cluster::size
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:50
std::nth_element
T nth_element(T... args)
memory
ripple::PeerImp::waitable_timer
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:69
ripple::jtPEER
@ jtPEER
Definition: Job.h:79
ripple::NodeStore::ShardInfo
Definition: ShardInfo.h:32
ripple::PeerImp::onShutdown
void onShutdown(error_code ec)
Definition: PeerImp.cpp:756
ripple::proposalUniqueId
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
Definition: RCLCxPeerPos.cpp:72
ripple::PeerImp::name
std::string name() const
Definition: PeerImp.cpp:838
ripple::Application::validators
virtual ValidatorList & validators()=0
ripple::KeyType::secp256k1
@ secp256k1
ripple::RCLCxPeerPos::publicKey
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:81
std::weak_ptr
STL class.
ripple::PeerImp::timer_
waitable_timer timer_
Definition: PeerImp.h:82
std::min
T min(T... args)
ripple::Serializer
Definition: Serializer.h:39
ripple::LedgerMaster::getValidatedLedgerAge
std::chrono::seconds getValidatedLedgerAge()
Definition: LedgerMaster.cpp:270
ripple::Resource::Gossip::Item
Describes a single consumer.
Definition: Gossip.h:34
ripple::OverlayImpl::deletePeer
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
Definition: OverlayImpl.cpp:1517
ripple::jtREQUESTED_TXN
@ jtREQUESTED_TXN
Definition: Job.h:64
ripple::PeerImp::Tracking::diverged
@ diverged
ripple::jtPACK
@ jtPACK
Definition: Job.h:42
ripple::PeerImp::gracefulClose_
bool gracefulClose_
Definition: PeerImp.h:159
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedgers::gotLedgerData
virtual bool gotLedgerData(LedgerHash const &ledgerHash, std::shared_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData >)=0
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::Application::validatorManifests
virtual ManifestCache & validatorManifests()=0
ripple::OverlayImpl::getManifestsMessage
std::shared_ptr< Message > getManifestsMessage()
Definition: OverlayImpl.cpp:1276
ripple::Serializer::size
std::size_t size() const noexcept
Definition: Serializer.h:69
ripple::send_if_not
send_if_not_pred< Predicate > send_if_not(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:107
ripple::ShardState::acquire
@ acquire
protocol
Definition: ValidatorList.h:38
ripple::jtVALIDATION_ut
@ jtVALIDATION_ut
Definition: Job.h:54
ripple::INVALID
@ INVALID
Definition: Transaction.h:47
ripple::reduce_relay::MAX_TX_QUEUE_SIZE
static constexpr std::size_t MAX_TX_QUEUE_SIZE
Definition: ReduceRelayCommon.h:55
ripple::ProtocolFeature::ValidatorList2Propagation
@ ValidatorList2Propagation
ripple::OverlayImpl::remove
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
Definition: OverlayImpl.cpp:462
ripple::PeerImp::squelch_
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:119
ripple::Config::TX_REDUCE_RELAY_METRICS
bool TX_REDUCE_RELAY_METRICS
Definition: Config.h:250
ripple::PeerImp::lastPingSeq_
std::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:115
ripple::base_uint::zero
void zero()
Definition: base_uint.h:521
std::vector::begin
T begin(T... args)
ripple::NodeStore::Database::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const noexcept
Calculates the shard index for a given ledger sequence.
Definition: Database.h:283
ripple::PeerFinder::Manager::config
virtual Config config()=0
Returns the configuration for the manager.
std
STL namespace.
beast::severities::kWarning
@ kWarning
Definition: Journal.h:37
ripple::NodeStore::Database::earliestShardIndex
std::uint32_t earliestShardIndex() const noexcept
Definition: Database.h:246
std::set::insert
T insert(T... args)
ripple::sha512Half
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:216
beast::IP::Endpoint::from_string
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:46
ripple::OverlayImpl::activate
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
Definition: OverlayImpl.cpp:594
ripple::OverlayImpl::onPeerDeactivate
void onPeerDeactivate(Peer::id_t id)
Definition: OverlayImpl.cpp:618
ripple::Tuning::readBufferBytes
constexpr std::size_t readBufferBytes
Size of buffer used to read from the socket.
Definition: overlay/impl/Tuning.h:65
ripple::Resource::Gossip::Item::address
beast::IP::Endpoint address
Definition: Gossip.h:39
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:208
ripple::Resource::Consumer
An endpoint that consumes resources.
Definition: Consumer.h:34
ripple::Resource::Charge
A consumption charge.
Definition: Charge.h:30
ripple::Resource::Gossip::Item::balance
int balance
Definition: Gossip.h:38
ripple::TimeKeeper::now
virtual time_point now() const override=0
Returns the estimate of wall time, in network time.
ripple::PeerImp::maxLedger_
LedgerIndex maxLedger_
Definition: PeerImp.h:107
ripple::PeerImp::run
virtual void run()
Definition: PeerImp.cpp:157
ripple::Tuning::targetSendQueue
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
Definition: overlay/impl/Tuning.h:52
ripple::LoadFeeTrack::setClusterFee
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:113
ripple::PeerImp::checkTracking
void checkTracking(std::uint32_t validationSeq)
Check if the peer is tracking.
Definition: PeerImp.cpp:2188
ripple::PeerImp::large_sendq_
int large_sendq_
Definition: PeerImp.h:160
ripple::PeerImp::domain
std::string domain() const
Definition: PeerImp.cpp:845
std::string::empty
T empty(T... args)
ripple::Resource::feeLightPeer
const Charge feeLightPeer
ripple::jtREPLAY_REQ
@ jtREPLAY_REQ
Definition: Job.h:57
ripple::jtPROPOSAL_ut
@ jtPROPOSAL_ut
Definition: Job.h:59
ripple::TokenType::NodePublic
@ NodePublic
ripple::PeerImp::last_status_
protocol::TMStatusChange last_status_
Definition: PeerImp.h:150
ripple::RCLCxPeerPos::suppressionID
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:88
ripple::PeerImp::supportsFeature
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:498
ripple::OverlayImpl::findPeerByPublicKey
std::shared_ptr< Peer > findPeerByPublicKey(PublicKey const &pubKey) override
Returns the peer with the matching public key, or null.
Definition: OverlayImpl.cpp:1209
std::optional
mutex
ripple::PeerImp::getPeerShardInfos
const hash_map< PublicKey, NodeStore::ShardInfo > getPeerShardInfos() const
Definition: PeerImp.cpp:632
ripple::PeerImp::onMessageBegin
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:1014
std::stringstream::str
T str(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
std::size_t
ripple::to_string
std::string to_string(Manifest const &m)
Format the specified manifest to a string for debugging purposes.
Definition: app/misc/impl/Manifest.cpp:38
ripple::PeerImp::json
Json::Value json() override
Definition: PeerImp.cpp:380
ripple::Cluster::for_each
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:84
ripple::PeerImp::compressionEnabled_
Compressed compressionEnabled_
Definition: PeerImp.h:170
ripple::Tuning::sendqIntervals
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
Definition: overlay/impl/Tuning.h:46
ripple::ProtocolFeature::ValidatorListPropagation
@ ValidatorListPropagation
beast::IP::Endpoint
A version-independent IP address and port combination.
Definition: IPEndpoint.h:38
ripple::OverlayImpl::incPeerDisconnect
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:368
ripple::OverlayImpl::addTxMetrics
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
Definition: OverlayImpl.h:449
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::LedgerInfo
Information about the notional ledger backing the view.
Definition: ReadView.h:84
ripple::strHex
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:45
std::set::end
T end(T... args)
ripple::PeerFinder::Manager::on_failure
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
ripple::Job::getType
JobType getType() const
Definition: Job.cpp:52
ripple::PeerImp::makePrefix
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:687
ripple::PeerImp::usage_
Resource::Consumer usage_
Definition: PeerImp.h:151
std::setw
T setw(T... args)
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const noexcept
Definition: Database.h:238
numeric
ripple::OverlayImpl
Definition: OverlayImpl.h:58
std::max
T max(T... args)
ripple::base_uint::parseHex
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:475
beast::IP::Endpoint::at_port
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:69
ripple::ValidatorList::trusted
bool trusted(PublicKey const &identity) const
Returns true if public key is trusted.
Definition: ValidatorList.cpp:1367
ripple::OverlayImpl::findPeerByShortID
std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id) const override
Returns the peer with the matching short id, or null.
Definition: OverlayImpl.cpp:1197
ripple::Serializer::getLength
int getLength() const
Definition: Serializer.h:199
ripple::OverlayImpl::reportTraffic
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
Definition: OverlayImpl.cpp:678
ripple::sfLastLedgerSequence
const SF_UINT32 sfLastLedgerSequence
ripple::JobQueue::makeLoadEvent
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:148
ripple::PeerImp::shardInfoMutex_
std::mutex shardInfoMutex_
Definition: PeerImp.h:168
ripple::Resource::Consumer::charge
Disposition charge(Charge const &fee)
Apply a load charge to the consumer.
Definition: Consumer.cpp:99
ripple::PeerImp::overlay_
OverlayImpl & overlay_
Definition: PeerImp.h:90
ripple::makeResponse
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:396
ripple::http_request_type
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handshake.h:47
std::unique_ptr< stream_type >
ripple::Tuning::sendQueueLogFreq
@ sendQueueLogFreq
How often to log send queue size.
Definition: overlay/impl/Tuning.h:55
ripple::PeerImp::tracking_
std::atomic< Tracking > tracking_
Definition: PeerImp.h:96
ripple::PeerImp::nameMutex_
boost::shared_mutex nameMutex_
Definition: PeerImp.h:102
ripple::PeerImp::cancelTimer
void cancelTimer()
Definition: PeerImp.cpp:678
ripple::invokeProtocolMessage
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
Definition: ProtocolMessage.h:343
std::unordered_map
STL class.
ripple::PeerImp::fee_
Resource::Charge fee_
Definition: PeerImp.h:152
ripple::stringIsUint256Sized
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:151
beast::IP::Endpoint::from_string_checked
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:35
ripple::ValidatorList::for_each_available
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
Definition: ValidatorList.cpp:1651
std::set
STL class.
ripple::PeerImp::stop
void stop() override
Definition: PeerImp.cpp:215
ripple::Tuning::maxQueryDepth
@ maxQueryDepth
The maximum number of levels to search.
Definition: overlay/impl/Tuning.h:61
ripple::Application::getHashRouter
virtual HashRouter & getHashRouter()=0
ripple::PeerImp::removeTxQueue
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition: PeerImp.cpp:331
ripple::PeerImp::Tracking::converged
@ converged
ripple::PeerImp::id_
const id_t id_
Definition: PeerImp.h:73
ripple::OverlayImpl::for_each
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:283
std::ref
T ref(T... args)
ripple::RCLCxPeerPos::checkSign
bool checkSign() const
Verify the signing hash of the proposal.
Definition: RCLCxPeerPos.cpp:55
std::exception::what
T what(T... args)
ripple::LedgerReplayMsgHandler::processReplayDeltaResponse
bool processReplayDeltaResponse(std::shared_ptr< protocol::TMReplayDeltaResponse > const &msg)
Process TMReplayDeltaResponse.
Definition: LedgerReplayMsgHandler.cpp:219
std::shared_lock
STL class.
ripple::PeerImp::fail
void fail(std::string const &reason)
Definition: PeerImp.cpp:600
ripple::PeerImp::cluster
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:366
ripple::ShardState::queued
@ queued
ripple::HashPrefix::shardInfo
@ shardInfo
shard info for signing
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::PeerImp::p_journal_
const beast::Journal p_journal_
Definition: PeerImp.h:77
ripple::Config::MAX_UNKNOWN_TIME
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:266
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:45
ripple::Config::MAX_DIVERGED_TIME
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:269
ripple::jtLEDGER_REQ
@ jtLEDGER_REQ
Definition: Job.h:58
ripple::PeerImp::onReadMessage
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:898
ripple::ConsensusProposal< NodeID, uint256, uint256 >
std::chrono::steady_clock::now
T now(T... args)