rippled
PeerImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/consensus/RCLValidations.h>
21 #include <ripple/app/ledger/InboundLedgers.h>
22 #include <ripple/app/ledger/InboundTransactions.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/misc/HashRouter.h>
25 #include <ripple/app/misc/LoadFeeTrack.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/app/misc/Transaction.h>
28 #include <ripple/app/misc/ValidatorList.h>
29 #include <ripple/app/tx/apply.h>
30 #include <ripple/basics/UptimeClock.h>
31 #include <ripple/basics/base64.h>
32 #include <ripple/basics/random.h>
33 #include <ripple/basics/safe_cast.h>
34 #include <ripple/beast/core/LexicalCast.h>
35 #include <ripple/beast/core/SemanticVersion.h>
36 #include <ripple/nodestore/DatabaseShard.h>
37 #include <ripple/overlay/Cluster.h>
38 #include <ripple/overlay/impl/PeerImp.h>
39 #include <ripple/overlay/impl/Tuning.h>
40 #include <ripple/overlay/predicates.h>
41 #include <ripple/protocol/digest.h>
42 
43 #include <boost/algorithm/clamp.hpp>
44 #include <boost/algorithm/string.hpp>
45 #include <boost/algorithm/string/predicate.hpp>
46 #include <boost/beast/core/ostream.hpp>
47 
48 #include <algorithm>
49 #include <memory>
50 #include <mutex>
51 #include <numeric>
52 #include <sstream>
53 
54 using namespace std::chrono_literals;
55 
56 namespace ripple {
57 
58 namespace {
60 std::chrono::milliseconds constexpr peerHighLatency{300};
61 
63 std::chrono::seconds constexpr peerTimerInterval{60};
64 } // namespace
65 
66 PeerImp::PeerImp(
67  Application& app,
68  id_t id,
70  http_request_type&& request,
71  PublicKey const& publicKey,
73  Resource::Consumer consumer,
74  std::unique_ptr<stream_type>&& stream_ptr,
75  OverlayImpl& overlay)
76  : Child(overlay)
77  , app_(app)
78  , id_(id)
79  , sink_(app_.journal("Peer"), makePrefix(id))
80  , p_sink_(app_.journal("Protocol"), makePrefix(id))
81  , journal_(sink_)
82  , p_journal_(p_sink_)
83  , stream_ptr_(std::move(stream_ptr))
84  , socket_(stream_ptr_->next_layer().socket())
85  , stream_(*stream_ptr_)
86  , strand_(socket_.get_executor())
87  , timer_(waitable_timer{socket_.get_executor()})
88  , remote_address_(slot->remote_endpoint())
89  , overlay_(overlay)
90  , inbound_(true)
91  , protocol_(protocol)
92  , tracking_(Tracking::unknown)
93  , trackingTime_(clock_type::now())
94  , publicKey_(publicKey)
95  , lastPingTime_(clock_type::now())
96  , creationTime_(clock_type::now())
97  , squelch_(app_.journal("Squelch"))
98  , usage_(consumer)
100  , slot_(slot)
101  , request_(std::move(request))
102  , headers_(request_)
103  , compressionEnabled_(
105  headers_,
107  "lz4",
108  app_.config().COMPRESSION)
109  ? Compressed::On
110  : Compressed::Off)
111  , vpReduceRelayEnabled_(peerFeatureEnabled(
112  headers_,
113  FEATURE_VPRR,
114  app_.config().VP_REDUCE_RELAY_ENABLE))
115 {
116  JLOG(journal_.debug()) << " compression enabled "
117  << (compressionEnabled_ == Compressed::On)
118  << " vp reduce-relay enabled "
119  << vpReduceRelayEnabled_ << " on " << remote_address_
120  << " " << id_;
121 }
122 
124 {
125  const bool inCluster{cluster()};
126 
131 
132  if (inCluster)
133  {
134  JLOG(journal_.warn()) << name() << " left cluster";
135  }
136 }
137 
138 // Helper function to check for valid uint256 values in protobuf buffers
139 static bool
141 {
142  return pBuffStr.size() == uint256::size();
143 }
144 
145 void
147 {
148  if (!strand_.running_in_this_thread())
149  return post(strand_, std::bind(&PeerImp::run, shared_from_this()));
150 
151  auto parseLedgerHash =
152  [](std::string const& value) -> boost::optional<uint256> {
153  if (uint256 ret; ret.parseHex(value))
154  return ret;
155 
156  if (auto const s = base64_decode(value); s.size() == uint256::size())
157  return uint256{s};
158 
159  return boost::none;
160  };
161 
162  boost::optional<uint256> closed;
163  boost::optional<uint256> previous;
164 
165  if (auto const iter = headers_.find("Closed-Ledger");
166  iter != headers_.end())
167  {
168  closed = parseLedgerHash(iter->value().to_string());
169 
170  if (!closed)
171  fail("Malformed handshake data (1)");
172  }
173 
174  if (auto const iter = headers_.find("Previous-Ledger");
175  iter != headers_.end())
176  {
177  previous = parseLedgerHash(iter->value().to_string());
178 
179  if (!previous)
180  fail("Malformed handshake data (2)");
181  }
182 
183  if (previous && !closed)
184  fail("Malformed handshake data (3)");
185 
186  {
188  if (closed)
189  closedLedgerHash_ = *closed;
190  if (previous)
191  previousLedgerHash_ = *previous;
192  }
193 
194  if (inbound_)
195  doAccept();
196  else
197  doProtocolStart();
198 
199  // Anything else that needs to be done with the connection should be
200  // done in doProtocolStart
201 }
202 
203 void
205 {
206  if (!strand_.running_in_this_thread())
207  return post(strand_, std::bind(&PeerImp::stop, shared_from_this()));
208  if (socket_.is_open())
209  {
210  // The rationale for using different severity levels is that
211  // outbound connections are under our control and may be logged
212  // at a higher level, but inbound connections are more numerous and
213  // uncontrolled so to prevent log flooding the severity is reduced.
214  //
215  if (inbound_)
216  {
217  JLOG(journal_.debug()) << "Stop";
218  }
219  else
220  {
221  JLOG(journal_.info()) << "Stop";
222  }
223  }
224  close();
225 }
226 
227 //------------------------------------------------------------------------------
228 
229 void
231 {
232  if (!strand_.running_in_this_thread())
233  return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
234  if (gracefulClose_)
235  return;
236  if (detaching_)
237  return;
238 
239  auto validator = m->getValidatorKey();
240  if (validator && !squelch_.expireSquelch(*validator))
241  return;
242 
244  safe_cast<TrafficCount::category>(m->getCategory()),
245  false,
246  static_cast<int>(m->getBuffer(compressionEnabled_).size()));
247 
248  auto sendq_size = send_queue_.size();
249 
250  if (sendq_size < Tuning::targetSendQueue)
251  {
252  // To detect a peer that does not read from their
253  // side of the connection, we expect a peer to have
254  // a small senq periodically
255  large_sendq_ = 0;
256  }
257  else if (auto sink = journal_.debug();
258  sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
259  {
260  std::string const n = name();
261  sink << (n.empty() ? remote_address_.to_string() : n)
262  << " sendq: " << sendq_size;
263  }
264 
265  send_queue_.push(m);
266 
267  if (sendq_size != 0)
268  return;
269 
270  boost::asio::async_write(
271  stream_,
272  boost::asio::buffer(
273  send_queue_.front()->getBuffer(compressionEnabled_)),
274  bind_executor(
275  strand_,
276  std::bind(
279  std::placeholders::_1,
280  std::placeholders::_2)));
281 }
282 
283 void
285 {
286  if ((usage_.charge(fee) == Resource::drop) && usage_.disconnect() &&
287  strand_.running_in_this_thread())
288  {
289  // Sever the connection
291  fail("charge: Resources");
292  }
293 }
294 
295 //------------------------------------------------------------------------------
296 
297 bool
299 {
300  auto const iter = headers_.find("Crawl");
301  if (iter == headers_.end())
302  return false;
303  return boost::iequals(iter->value(), "public");
304 }
305 
306 bool
308 {
309  return static_cast<bool>(app_.cluster().member(publicKey_));
310 }
311 
314 {
315  if (inbound_)
316  return headers_["User-Agent"].to_string();
317  return headers_["Server"].to_string();
318 }
319 
322 {
324 
325  ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
326  ret[jss::address] = remote_address_.to_string();
327 
328  if (inbound_)
329  ret[jss::inbound] = true;
330 
331  if (cluster())
332  {
333  ret[jss::cluster] = true;
334 
335  if (auto const n = name(); !n.empty())
336  // Could move here if Json::Value supported moving from a string
337  ret[jss::name] = n;
338  }
339 
340  if (auto const d = domain(); !d.empty())
341  ret[jss::server_domain] = domain();
342 
343  if (auto const nid = headers_["Network-ID"].to_string(); !nid.empty())
344  ret[jss::network_id] = nid;
345 
346  ret[jss::load] = usage_.balance();
347 
348  if (auto const version = getVersion(); !version.empty())
349  ret[jss::version] = version;
350 
351  ret[jss::protocol] = to_string(protocol_);
352 
353  {
355  if (latency_)
356  ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
357  }
358 
359  ret[jss::uptime] = static_cast<Json::UInt>(
360  std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
361 
362  std::uint32_t minSeq, maxSeq;
363  ledgerRange(minSeq, maxSeq);
364 
365  if ((minSeq != 0) || (maxSeq != 0))
366  ret[jss::complete_ledgers] =
367  std::to_string(minSeq) + " - " + std::to_string(maxSeq);
368 
369  switch (tracking_.load())
370  {
371  case Tracking::diverged:
372  ret[jss::track] = "diverged";
373  break;
374 
375  case Tracking::unknown:
376  ret[jss::track] = "unknown";
377  break;
378 
379  case Tracking::converged:
380  // Nothing to do here
381  break;
382  }
383 
384  uint256 closedLedgerHash;
385  protocol::TMStatusChange last_status;
386  {
388  closedLedgerHash = closedLedgerHash_;
389  last_status = last_status_;
390  }
391 
392  if (closedLedgerHash != beast::zero)
393  ret[jss::ledger] = to_string(closedLedgerHash);
394 
395  if (last_status.has_newstatus())
396  {
397  switch (last_status.newstatus())
398  {
399  case protocol::nsCONNECTING:
400  ret[jss::status] = "connecting";
401  break;
402 
403  case protocol::nsCONNECTED:
404  ret[jss::status] = "connected";
405  break;
406 
407  case protocol::nsMONITORING:
408  ret[jss::status] = "monitoring";
409  break;
410 
411  case protocol::nsVALIDATING:
412  ret[jss::status] = "validating";
413  break;
414 
415  case protocol::nsSHUTTING:
416  ret[jss::status] = "shutting";
417  break;
418 
419  default:
420  JLOG(p_journal_.warn())
421  << "Unknown status: " << last_status.newstatus();
422  }
423  }
424 
425  ret[jss::metrics] = Json::Value(Json::objectValue);
426  ret[jss::metrics][jss::total_bytes_recv] =
427  std::to_string(metrics_.recv.total_bytes());
428  ret[jss::metrics][jss::total_bytes_sent] =
429  std::to_string(metrics_.sent.total_bytes());
430  ret[jss::metrics][jss::avg_bps_recv] =
431  std::to_string(metrics_.recv.average_bytes());
432  ret[jss::metrics][jss::avg_bps_sent] =
433  std::to_string(metrics_.sent.average_bytes());
434 
435  return ret;
436 }
437 
438 bool
440 {
441  switch (f)
442  {
444  return protocol_ >= make_protocol(2, 1);
446  return protocol_ >= make_protocol(2, 2);
447  }
448  return false;
449 }
450 
451 //------------------------------------------------------------------------------
452 
453 bool
454 PeerImp::hasLedger(uint256 const& hash, std::uint32_t seq) const
455 {
456  {
458  if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
460  return true;
461  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
462  recentLedgers_.end())
463  return true;
464  }
465 
466  return seq >= app_.getNodeStore().earliestLedgerSeq() &&
468 }
469 
470 void
472 {
474 
475  minSeq = minLedger_;
476  maxSeq = maxLedger_;
477 }
478 
479 bool
481 {
483  auto const it{shardInfo_.find(publicKey_)};
484  if (it != shardInfo_.end())
485  return boost::icl::contains(it->second.shardIndexes, shardIndex);
486  return false;
487 }
488 
489 bool
490 PeerImp::hasTxSet(uint256 const& hash) const
491 {
493  return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
494  recentTxSets_.end();
495 }
496 
497 void
499 {
500  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
501  // guarded by recentLock_.
505 }
506 
507 bool
509 {
511  return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
512  (uMax <= maxLedger_);
513 }
514 
515 //------------------------------------------------------------------------------
516 
517 void
519 {
520  assert(strand_.running_in_this_thread());
521  if (socket_.is_open())
522  {
523  detaching_ = true; // DEPRECATED
524  error_code ec;
525  timer_.cancel(ec);
526  socket_.close(ec);
528  if (inbound_)
529  {
530  JLOG(journal_.debug()) << "Closed";
531  }
532  else
533  {
534  JLOG(journal_.info()) << "Closed";
535  }
536  }
537 }
538 
539 void
541 {
542  if (!strand_.running_in_this_thread())
543  return post(
544  strand_,
545  std::bind(
546  (void (Peer::*)(std::string const&)) & PeerImp::fail,
548  reason));
550  {
551  std::string const n = name();
552  JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
553  << " failed: " << reason;
554  }
555  close();
556 }
557 
558 void
560 {
561  assert(strand_.running_in_this_thread());
562  if (socket_.is_open())
563  {
564  JLOG(journal_.warn())
565  << name << " from " << toBase58(TokenType::NodePublic, publicKey_)
566  << " at " << remote_address_.to_string() << ": " << ec.message();
567  }
568  close();
569 }
570 
571 boost::optional<RangeSet<std::uint32_t>>
573 {
575  auto it{shardInfo_.find(publicKey_)};
576  if (it != shardInfo_.end())
577  return it->second.shardIndexes;
578  return boost::none;
579 }
580 
581 boost::optional<hash_map<PublicKey, PeerImp::ShardInfo>>
583 {
585  if (!shardInfo_.empty())
586  return shardInfo_;
587  return boost::none;
588 }
589 
590 void
592 {
593  assert(strand_.running_in_this_thread());
594  assert(socket_.is_open());
595  assert(!gracefulClose_);
596  gracefulClose_ = true;
597 #if 0
598  // Flush messages
599  while(send_queue_.size() > 1)
600  send_queue_.pop_back();
601 #endif
602  if (send_queue_.size() > 0)
603  return;
604  setTimer();
605  stream_.async_shutdown(bind_executor(
606  strand_,
607  std::bind(
608  &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
609 }
610 
611 void
613 {
614  error_code ec;
615  timer_.expires_from_now(peerTimerInterval, ec);
616 
617  if (ec)
618  {
619  JLOG(journal_.error()) << "setTimer: " << ec.message();
620  return;
621  }
622  timer_.async_wait(bind_executor(
623  strand_,
624  std::bind(
625  &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
626 }
627 
628 // convenience for ignoring the error code
629 void
631 {
632  error_code ec;
633  timer_.cancel(ec);
634 }
635 
636 //------------------------------------------------------------------------------
637 
640 {
642  ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
643  return ss.str();
644 }
645 
646 void
648 {
649  if (!socket_.is_open())
650  return;
651 
652  if (ec == boost::asio::error::operation_aborted)
653  return;
654 
655  if (ec)
656  {
657  // This should never happen
658  JLOG(journal_.error()) << "onTimer: " << ec.message();
659  return close();
660  }
661 
663  {
664  fail("Large send queue");
665  return;
666  }
667 
668  if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
669  {
670  clock_type::duration duration;
671 
672  {
674  duration = clock_type::now() - trackingTime_;
675  }
676 
677  if ((t == Tracking::diverged &&
678  (duration > app_.config().MAX_DIVERGED_TIME)) ||
679  (t == Tracking::unknown &&
680  (duration > app_.config().MAX_UNKNOWN_TIME)))
681  {
683  fail("Not useful");
684  return;
685  }
686  }
687 
688  // Already waiting for PONG
689  if (lastPingSeq_)
690  {
691  fail("Ping Timeout");
692  return;
693  }
694 
696  lastPingSeq_ = rand_int<std::uint32_t>();
697 
698  protocol::TMPing message;
699  message.set_type(protocol::TMPing::ptPING);
700  message.set_seq(*lastPingSeq_);
701 
702  send(std::make_shared<Message>(message, protocol::mtPING));
703 
704  setTimer();
705 }
706 
707 void
709 {
710  cancelTimer();
711  // If we don't get eof then something went wrong
712  if (!ec)
713  {
714  JLOG(journal_.error()) << "onShutdown: expected error condition";
715  return close();
716  }
717  if (ec != boost::asio::error::eof)
718  return fail("onShutdown", ec);
719  close();
720 }
721 
722 //------------------------------------------------------------------------------
723 void
725 {
726  assert(read_buffer_.size() == 0);
727 
728  JLOG(journal_.debug()) << "doAccept: " << remote_address_;
729 
730  auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
731 
732  // This shouldn't fail since we already computed
733  // the shared value successfully in OverlayImpl
734  if (!sharedValue)
735  return fail("makeSharedValue: Unexpected failure");
736 
737  JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
738  JLOG(journal_.info()) << "Public Key: "
740 
741  if (auto member = app_.cluster().member(publicKey_))
742  {
743  {
745  name_ = *member;
746  }
747  JLOG(journal_.info()) << "Cluster name: " << *member;
748  }
749 
751 
752  // XXX Set timer: connection is in grace period to be useful.
753  // XXX Set timer: connection idle (idle may vary depending on connection
754  // type.)
755 
756  auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
757 
758  boost::beast::ostream(*write_buffer) << makeResponse(
760  request_,
763  *sharedValue,
765  protocol_,
766  app_);
767 
768  // Write the whole buffer and only start protocol when that's done.
769  boost::asio::async_write(
770  stream_,
771  write_buffer->data(),
772  boost::asio::transfer_all(),
773  bind_executor(
774  strand_,
775  [this, write_buffer, self = shared_from_this()](
776  error_code ec, std::size_t bytes_transferred) {
777  if (!socket_.is_open())
778  return;
779  if (ec == boost::asio::error::operation_aborted)
780  return;
781  if (ec)
782  return fail("onWriteResponse", ec);
783  if (write_buffer->size() == bytes_transferred)
784  return doProtocolStart();
785  return fail("Failed to write header");
786  }));
787 }
788 
791 {
792  std::shared_lock read_lock{nameMutex_};
793  return name_;
794 }
795 
798 {
799  return headers_["Server-Domain"].to_string();
800 }
801 
802 //------------------------------------------------------------------------------
803 
804 // Protocol logic
805 
806 void
808 {
810 
811  // Send all the validator lists that have been loaded
813  {
815  [&](std::string const& manifest,
816  std::uint32_t version,
818  PublicKey const& pubKey,
819  std::size_t maxSequence,
820  uint256 const& hash) {
822  *this,
823  0,
824  pubKey,
825  maxSequence,
826  version,
827  manifest,
828  blobInfos,
830  p_journal_);
831 
832  // Don't send it next time.
834  });
835  }
836 
837  if (auto m = overlay_.getManifestsMessage())
838  send(m);
839 
840  // Request shard info from peer
841  protocol::TMGetPeerShardInfo tmGPS;
842  tmGPS.set_hops(0);
843  send(std::make_shared<Message>(tmGPS, protocol::mtGET_PEER_SHARD_INFO));
844 
845  setTimer();
846 }
847 
848 // Called repeatedly with protocol message data
849 void
851 {
852  if (!socket_.is_open())
853  return;
854  if (ec == boost::asio::error::operation_aborted)
855  return;
856  if (ec == boost::asio::error::eof)
857  {
858  JLOG(journal_.info()) << "EOF";
859  return gracefulClose();
860  }
861  if (ec)
862  return fail("onReadMessage", ec);
863  if (auto stream = journal_.trace())
864  {
865  if (bytes_transferred > 0)
866  stream << "onReadMessage: " << bytes_transferred << " bytes";
867  else
868  stream << "onReadMessage";
869  }
870 
871  metrics_.recv.add_message(bytes_transferred);
872 
873  read_buffer_.commit(bytes_transferred);
874 
875  auto hint = Tuning::readBufferBytes;
876 
877  while (read_buffer_.size() > 0)
878  {
879  std::size_t bytes_consumed;
880  std::tie(bytes_consumed, ec) =
881  invokeProtocolMessage(read_buffer_.data(), *this, hint);
882  if (ec)
883  return fail("onReadMessage", ec);
884  if (!socket_.is_open())
885  return;
886  if (gracefulClose_)
887  return;
888  if (bytes_consumed == 0)
889  break;
890  read_buffer_.consume(bytes_consumed);
891  }
892 
893  // Timeout on writes only
894  stream_.async_read_some(
896  bind_executor(
897  strand_,
898  std::bind(
901  std::placeholders::_1,
902  std::placeholders::_2)));
903 }
904 
905 void
907 {
908  if (!socket_.is_open())
909  return;
910  if (ec == boost::asio::error::operation_aborted)
911  return;
912  if (ec)
913  return fail("onWriteMessage", ec);
914  if (auto stream = journal_.trace())
915  {
916  if (bytes_transferred > 0)
917  stream << "onWriteMessage: " << bytes_transferred << " bytes";
918  else
919  stream << "onWriteMessage";
920  }
921 
922  metrics_.sent.add_message(bytes_transferred);
923 
924  assert(!send_queue_.empty());
925  send_queue_.pop();
926  if (!send_queue_.empty())
927  {
928  // Timeout on writes only
929  return boost::asio::async_write(
930  stream_,
931  boost::asio::buffer(
932  send_queue_.front()->getBuffer(compressionEnabled_)),
933  bind_executor(
934  strand_,
935  std::bind(
938  std::placeholders::_1,
939  std::placeholders::_2)));
940  }
941 
942  if (gracefulClose_)
943  {
944  return stream_.async_shutdown(bind_executor(
945  strand_,
946  std::bind(
949  std::placeholders::_1)));
950  }
951 }
952 
953 //------------------------------------------------------------------------------
954 //
955 // ProtocolHandler
956 //
957 //------------------------------------------------------------------------------
958 
959 void
961 {
962  // TODO
963 }
964 
965 void
967  std::uint16_t type,
969  std::size_t size,
970  std::size_t uncompressed_size,
971  bool isCompressed)
972 {
973  load_event_ =
977  TrafficCount::categorize(*m, type, true), true, static_cast<int>(size));
978  JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
979  << uncompressed_size << " " << isCompressed;
980 }
981 
982 void
986 {
987  load_event_.reset();
988  charge(fee_);
989 }
990 
991 void
993 {
994  auto const s = m->list_size();
995 
996  if (s == 0)
997  {
999  return;
1000  }
1001 
1002  if (s > 100)
1004 
1005  // VFALCO What's the right job type?
1006  auto that = shared_from_this();
1008  jtVALIDATION_ut, "receiveManifests", [this, that, m](Job&) {
1009  overlay_.onManifests(m, that);
1010  });
1011 }
1012 
1013 void
1015 {
1016  if (m->type() == protocol::TMPing::ptPING)
1017  {
1018  // We have received a ping request, reply with a pong
1020  m->set_type(protocol::TMPing::ptPONG);
1021  send(std::make_shared<Message>(*m, protocol::mtPING));
1022  return;
1023  }
1024 
1025  if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1026  {
1027  // Only reset the ping sequence if we actually received a
1028  // PONG with the correct cookie. That way, any peers which
1029  // respond with incorrect cookies will eventually time out.
1030  if (m->seq() == lastPingSeq_)
1031  {
1032  lastPingSeq_.reset();
1033 
1034  // Update latency estimate
1035  auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1037 
1039 
1040  if (latency_)
1041  latency_ = (*latency_ * 7 + rtt) / 8;
1042  else
1043  latency_ = rtt;
1044  }
1045 
1046  return;
1047  }
1048 }
1049 
1050 void
1052 {
1053  // VFALCO NOTE I think we should drop the peer immediately
1054  if (!cluster())
1055  {
1057  return;
1058  }
1059 
1060  for (int i = 0; i < m->clusternodes().size(); ++i)
1061  {
1062  protocol::TMClusterNode const& node = m->clusternodes(i);
1063 
1064  std::string name;
1065  if (node.has_nodename())
1066  name = node.nodename();
1067 
1068  auto const publicKey =
1069  parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1070 
1071  // NIKB NOTE We should drop the peer immediately if
1072  // they send us a public key we can't parse
1073  if (publicKey)
1074  {
1075  auto const reportTime =
1076  NetClock::time_point{NetClock::duration{node.reporttime()}};
1077 
1078  app_.cluster().update(
1079  *publicKey, name, node.nodeload(), reportTime);
1080  }
1081  }
1082 
1083  int loadSources = m->loadsources().size();
1084  if (loadSources != 0)
1085  {
1086  Resource::Gossip gossip;
1087  gossip.items.reserve(loadSources);
1088  for (int i = 0; i < m->loadsources().size(); ++i)
1089  {
1090  protocol::TMLoadSource const& node = m->loadsources(i);
1092  item.address = beast::IP::Endpoint::from_string(node.name());
1093  item.balance = node.cost();
1094  if (item.address != beast::IP::Endpoint())
1095  gossip.items.push_back(item);
1096  }
1098  }
1099 
1100  // Calculate the cluster fee:
1101  auto const thresh = app_.timeKeeper().now() - 90s;
1102  std::uint32_t clusterFee = 0;
1103 
1105  fees.reserve(app_.cluster().size());
1106 
1107  app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1108  if (status.getReportTime() >= thresh)
1109  fees.push_back(status.getLoadFee());
1110  });
1111 
1112  if (!fees.empty())
1113  {
1114  auto const index = fees.size() / 2;
1115  std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1116  clusterFee = fees[index];
1117  }
1118 
1119  app_.getFeeTrack().setClusterFee(clusterFee);
1120 }
1121 
1122 void
1124 {
1125  // DEPRECATED
1126 }
1127 
1128 void
1130 {
1131  // DEPRECATED
1132 }
1133 
1134 void
1136 {
1137  auto badData = [&](std::string msg) {
1139  JLOG(p_journal_.warn()) << msg;
1140  };
1141 
1142  if (m->hops() > csHopLimit)
1143  return badData("Invalid hops: " + std::to_string(m->hops()));
1144  if (m->peerchain_size() > csHopLimit)
1145  return badData("Invalid peer chain");
1146 
1147  // Reply with shard info we may have
1148  if (auto shardStore = app_.getShardStore())
1149  {
1151  auto shards{shardStore->getCompleteShards()};
1152  if (!shards.empty())
1153  {
1154  protocol::TMPeerShardInfo reply;
1155  reply.set_shardindexes(shards);
1156 
1157  if (m->has_lastlink())
1158  reply.set_lastlink(true);
1159 
1160  if (m->peerchain_size() > 0)
1161  {
1162  for (int i = 0; i < m->peerchain_size(); ++i)
1163  {
1164  if (!publicKeyType(makeSlice(m->peerchain(i).nodepubkey())))
1165  return badData("Invalid peer chain public key");
1166  }
1167 
1168  *reply.mutable_peerchain() = m->peerchain();
1169  }
1170 
1171  send(std::make_shared<Message>(reply, protocol::mtPEER_SHARD_INFO));
1172 
1173  JLOG(p_journal_.trace()) << "Sent shard indexes " << shards;
1174  }
1175  }
1176 
1177  // Relay request to peers
1178  if (m->hops() > 0)
1179  {
1181 
1182  m->set_hops(m->hops() - 1);
1183  if (m->hops() == 0)
1184  m->set_lastlink(true);
1185 
1186  m->add_peerchain()->set_nodepubkey(
1188 
1190  std::make_shared<Message>(*m, protocol::mtGET_PEER_SHARD_INFO),
1191  match_peer(this)));
1192  }
1193 }
1194 
1195 void
1197 {
1198  auto badData = [&](std::string msg) {
1200  JLOG(p_journal_.warn()) << msg;
1201  };
1202 
1203  if (m->shardindexes().empty())
1204  return badData("Missing shard indexes");
1205  if (m->peerchain_size() > csHopLimit)
1206  return badData("Invalid peer chain");
1207  if (m->has_nodepubkey() && !publicKeyType(makeSlice(m->nodepubkey())))
1208  return badData("Invalid public key");
1209 
1210  // Check if the message should be forwarded to another peer
1211  if (m->peerchain_size() > 0)
1212  {
1213  // Get the Public key of the last link in the peer chain
1214  auto const s{
1215  makeSlice(m->peerchain(m->peerchain_size() - 1).nodepubkey())};
1216  if (!publicKeyType(s))
1217  return badData("Invalid pubKey");
1218  PublicKey peerPubKey(s);
1219 
1220  if (auto peer = overlay_.findPeerByPublicKey(peerPubKey))
1221  {
1222  if (!m->has_nodepubkey())
1223  m->set_nodepubkey(publicKey_.data(), publicKey_.size());
1224 
1225  if (!m->has_endpoint())
1226  {
1227  // Check if peer will share IP publicly
1228  if (crawl())
1229  m->set_endpoint(remote_address_.address().to_string());
1230  else
1231  m->set_endpoint("0");
1232  }
1233 
1234  m->mutable_peerchain()->RemoveLast();
1235  peer->send(
1236  std::make_shared<Message>(*m, protocol::mtPEER_SHARD_INFO));
1237 
1238  JLOG(p_journal_.trace())
1239  << "Relayed TMPeerShardInfo to peer with IP "
1240  << remote_address_.address().to_string();
1241  }
1242  else
1243  {
1244  // Peer is no longer available so the relay ends
1246  JLOG(p_journal_.info()) << "Unable to route shard info";
1247  }
1248  return;
1249  }
1250 
1251  // Parse the shard indexes received in the shard info
1252  RangeSet<std::uint32_t> shardIndexes;
1253  {
1254  if (!from_string(shardIndexes, m->shardindexes()))
1255  return badData("Invalid shard indexes");
1256 
1257  std::uint32_t earliestShard;
1258  boost::optional<std::uint32_t> latestShard;
1259  {
1260  auto const curLedgerSeq{
1262  if (auto shardStore = app_.getShardStore())
1263  {
1264  earliestShard = shardStore->earliestShardIndex();
1265  if (curLedgerSeq >= shardStore->earliestLedgerSeq())
1266  latestShard = shardStore->seqToShardIndex(curLedgerSeq);
1267  }
1268  else
1269  {
1270  auto const earliestLedgerSeq{
1272  earliestShard = NodeStore::seqToShardIndex(earliestLedgerSeq);
1273  if (curLedgerSeq >= earliestLedgerSeq)
1274  latestShard = NodeStore::seqToShardIndex(curLedgerSeq);
1275  }
1276  }
1277 
1278  if (boost::icl::first(shardIndexes) < earliestShard ||
1279  (latestShard && boost::icl::last(shardIndexes) > latestShard))
1280  {
1281  return badData("Invalid shard indexes");
1282  }
1283  }
1284 
1285  // Get the IP of the node reporting the shard info
1286  beast::IP::Endpoint endpoint;
1287  if (m->has_endpoint())
1288  {
1289  if (m->endpoint() != "0")
1290  {
1291  auto result =
1293  if (!result)
1294  return badData("Invalid incoming endpoint: " + m->endpoint());
1295  endpoint = std::move(*result);
1296  }
1297  }
1298  else if (crawl()) // Check if peer will share IP publicly
1299  {
1300  endpoint = remote_address_;
1301  }
1302 
1303  // Get the Public key of the node reporting the shard info
1304  PublicKey publicKey;
1305  if (m->has_nodepubkey())
1306  publicKey = PublicKey(makeSlice(m->nodepubkey()));
1307  else
1308  publicKey = publicKey_;
1309 
1310  {
1312  auto it{shardInfo_.find(publicKey)};
1313  if (it != shardInfo_.end())
1314  {
1315  // Update the IP address for the node
1316  it->second.endpoint = std::move(endpoint);
1317 
1318  // Join the shard index range set
1319  it->second.shardIndexes += shardIndexes;
1320  }
1321  else
1322  {
1323  // Add a new node
1324  ShardInfo shardInfo;
1325  shardInfo.endpoint = std::move(endpoint);
1326  shardInfo.shardIndexes = std::move(shardIndexes);
1327  shardInfo_.emplace(publicKey, std::move(shardInfo));
1328  }
1329  }
1330 
1331  JLOG(p_journal_.trace())
1332  << "Consumed TMPeerShardInfo originating from public key "
1333  << toBase58(TokenType::NodePublic, publicKey) << " shard indexes "
1334  << m->shardindexes();
1335 
1336  if (m->has_lastlink())
1338 }
1339 
1340 void
1342 {
1343  // Don't allow endpoints from peers that are not known tracking or are
1344  // not using a version of the message that we support:
1345  if (tracking_.load() != Tracking::converged || m->version() != 2)
1346  return;
1347 
1349  endpoints.reserve(m->endpoints_v2().size());
1350 
1351  for (auto const& tm : m->endpoints_v2())
1352  {
1353  auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1354  if (!result)
1355  {
1356  JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1357  << tm.endpoint() << "}";
1358  continue;
1359  }
1360 
1361  // If hops == 0, this Endpoint describes the peer we are connected
1362  // to -- in that case, we take the remote address seen on the
1363  // socket and store that in the IP::Endpoint. If this is the first
1364  // time, then we'll verify that their listener can receive incoming
1365  // by performing a connectivity test. if hops > 0, then we just
1366  // take the address/port we were given
1367 
1368  endpoints.emplace_back(
1369  tm.hops() > 0 ? *result : remote_address_.at_port(result->port()),
1370  tm.hops());
1371  }
1372 
1373  if (!endpoints.empty())
1374  overlay_.peerFinder().on_endpoints(slot_, endpoints);
1375 }
1376 
1377 void
1379 {
1381  return;
1382 
1384  {
1385  // If we've never been in synch, there's nothing we can do
1386  // with a transaction
1387  JLOG(p_journal_.debug()) << "Ignoring incoming transaction: "
1388  << "Need network ledger";
1389  return;
1390  }
1391 
1392  SerialIter sit(makeSlice(m->rawtransaction()));
1393 
1394  try
1395  {
1396  auto stx = std::make_shared<STTx const>(sit);
1397  uint256 txID = stx->getTransactionID();
1398 
1399  int flags;
1400  constexpr std::chrono::seconds tx_interval = 10s;
1401 
1402  if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1403  {
1404  // we have seen this transaction recently
1405  if (flags & SF_BAD)
1406  {
1408  JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1409  }
1410 
1411  return;
1412  }
1413 
1414  JLOG(p_journal_.debug()) << "Got tx " << txID;
1415 
1416  bool checkSignature = true;
1417  if (cluster())
1418  {
1419  if (!m->has_deferred() || !m->deferred())
1420  {
1421  // Skip local checks if a server we trust
1422  // put the transaction in its open ledger
1423  flags |= SF_TRUSTED;
1424  }
1425 
1427  {
1428  // For now, be paranoid and have each validator
1429  // check each transaction, regardless of source
1430  checkSignature = false;
1431  }
1432  }
1433 
1436  {
1438  JLOG(p_journal_.info()) << "Transaction queue is full";
1439  }
1440  else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
1441  {
1442  JLOG(p_journal_.trace())
1443  << "No new transactions until synchronized";
1444  }
1445  else
1446  {
1448  jtTRANSACTION,
1449  "recvTransaction->checkTransaction",
1451  flags,
1452  checkSignature,
1453  stx](Job&) {
1454  if (auto peer = weak.lock())
1455  peer->checkTransaction(flags, checkSignature, stx);
1456  });
1457  }
1458  }
1459  catch (std::exception const&)
1460  {
1461  JLOG(p_journal_.warn())
1462  << "Transaction invalid: " << strHex(m->rawtransaction());
1463  }
1464 }
1465 
1466 void
1468 {
1471  app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m](Job&) {
1472  if (auto peer = weak.lock())
1473  peer->getLedger(m);
1474  });
1475 }
1476 
1477 void
1479 {
1480  protocol::TMLedgerData& packet = *m;
1481 
1482  if (m->nodes().size() <= 0)
1483  {
1484  JLOG(p_journal_.warn()) << "Ledger/TXset data with no nodes";
1485  return;
1486  }
1487 
1488  if (m->has_requestcookie())
1489  {
1490  std::shared_ptr<Peer> target =
1491  overlay_.findPeerByShortID(m->requestcookie());
1492  if (target)
1493  {
1494  m->clear_requestcookie();
1495  target->send(
1496  std::make_shared<Message>(packet, protocol::mtLEDGER_DATA));
1497  }
1498  else
1499  {
1500  JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1502  }
1503  return;
1504  }
1505 
1506  if (!stringIsUint256Sized(m->ledgerhash()))
1507  {
1508  JLOG(p_journal_.warn()) << "TX candidate reply with invalid hash size";
1510  return;
1511  }
1512 
1513  uint256 const hash{m->ledgerhash()};
1514 
1515  if (m->type() == protocol::liTS_CANDIDATE)
1516  {
1517  // got data for a candidate transaction set
1520  jtTXN_DATA, "recvPeerData", [weak, hash, m](Job&) {
1521  if (auto peer = weak.lock())
1522  peer->app_.getInboundTransactions().gotData(hash, peer, m);
1523  });
1524  return;
1525  }
1526 
1528  {
1529  JLOG(p_journal_.trace()) << "Got data for unwanted ledger";
1531  }
1532 }
1533 
1534 void
1536 {
1537  protocol::TMProposeSet& set = *m;
1538 
1539  auto const sig = makeSlice(set.signature());
1540 
1541  // Preliminary check for the validity of the signature: A DER encoded
1542  // signature can't be longer than 72 bytes.
1543  if ((boost::algorithm::clamp(sig.size(), 64, 72) != sig.size()) ||
1544  (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1545  {
1546  JLOG(p_journal_.warn()) << "Proposal: malformed";
1548  return;
1549  }
1550 
1551  if (!stringIsUint256Sized(set.currenttxhash()) ||
1552  !stringIsUint256Sized(set.previousledger()))
1553  {
1554  JLOG(p_journal_.warn()) << "Proposal: malformed";
1556  return;
1557  }
1558 
1559  uint256 const proposeHash{set.currenttxhash()};
1560  uint256 const prevLedger{set.previousledger()};
1561 
1562  PublicKey const publicKey{makeSlice(set.nodepubkey())};
1563  NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1564 
1565  uint256 const suppression = proposalUniqueId(
1566  proposeHash,
1567  prevLedger,
1568  set.proposeseq(),
1569  closeTime,
1570  publicKey.slice(),
1571  sig);
1572 
1573  if (auto [added, relayed] =
1575  !added)
1576  {
1577  // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1578  // receives within IDLED seconds since the message has been relayed.
1579  if (reduceRelayReady() && relayed &&
1580  (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1582  suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1583  JLOG(p_journal_.trace()) << "Proposal: duplicate";
1584  return;
1585  }
1586 
1587  auto const isTrusted = app_.validators().trusted(publicKey);
1588 
1589  if (!isTrusted)
1590  {
1592  {
1593  JLOG(p_journal_.debug())
1594  << "Proposal: Dropping untrusted (peer divergence)";
1595  return;
1596  }
1597 
1598  if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1599  {
1600  JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1601  return;
1602  }
1603  }
1604 
1605  JLOG(p_journal_.trace())
1606  << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1607 
1608  auto proposal = RCLCxPeerPos(
1609  publicKey,
1610  sig,
1611  suppression,
1613  prevLedger,
1614  set.proposeseq(),
1615  proposeHash,
1616  closeTime,
1619 
1622  isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1623  "recvPropose->checkPropose",
1624  [weak, m, proposal](Job& job) {
1625  if (auto peer = weak.lock())
1626  peer->checkPropose(job, m, proposal);
1627  });
1628 }
1629 
1630 void
1632 {
1633  JLOG(p_journal_.trace()) << "Status: Change";
1634 
1635  if (!m->has_networktime())
1636  m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1637 
1638  {
1640  if (!last_status_.has_newstatus() || m->has_newstatus())
1641  last_status_ = *m;
1642  else
1643  {
1644  // preserve old status
1645  protocol::NodeStatus status = last_status_.newstatus();
1646  last_status_ = *m;
1647  m->set_newstatus(status);
1648  }
1649  }
1650 
1651  if (m->newevent() == protocol::neLOST_SYNC)
1652  {
1653  bool outOfSync{false};
1654  {
1655  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1656  // guarded by recentLock_.
1658  if (!closedLedgerHash_.isZero())
1659  {
1660  outOfSync = true;
1662  }
1664  }
1665  if (outOfSync)
1666  {
1667  JLOG(p_journal_.debug()) << "Status: Out of sync";
1668  }
1669  return;
1670  }
1671 
1672  {
1673  uint256 closedLedgerHash{};
1674  bool const peerChangedLedgers{
1675  m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1676 
1677  {
1678  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1679  // guarded by recentLock_.
1681  if (peerChangedLedgers)
1682  {
1683  closedLedgerHash_ = m->ledgerhash();
1684  closedLedgerHash = closedLedgerHash_;
1685  addLedger(closedLedgerHash, sl);
1686  }
1687  else
1688  {
1690  }
1691 
1692  if (m->has_ledgerhashprevious() &&
1693  stringIsUint256Sized(m->ledgerhashprevious()))
1694  {
1695  previousLedgerHash_ = m->ledgerhashprevious();
1697  }
1698  else
1699  {
1701  }
1702  }
1703  if (peerChangedLedgers)
1704  {
1705  JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1706  }
1707  else
1708  {
1709  JLOG(p_journal_.debug()) << "Status: No ledger";
1710  }
1711  }
1712 
1713  if (m->has_firstseq() && m->has_lastseq())
1714  {
1716 
1717  minLedger_ = m->firstseq();
1718  maxLedger_ = m->lastseq();
1719 
1720  if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1721  minLedger_ = maxLedger_ = 0;
1722  }
1723 
1724  if (m->has_ledgerseq() &&
1726  {
1727  checkTracking(
1728  m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1729  }
1730 
1731  app_.getOPs().pubPeerStatus([=]() -> Json::Value {
1733 
1734  if (m->has_newstatus())
1735  {
1736  switch (m->newstatus())
1737  {
1738  case protocol::nsCONNECTING:
1739  j[jss::status] = "CONNECTING";
1740  break;
1741  case protocol::nsCONNECTED:
1742  j[jss::status] = "CONNECTED";
1743  break;
1744  case protocol::nsMONITORING:
1745  j[jss::status] = "MONITORING";
1746  break;
1747  case protocol::nsVALIDATING:
1748  j[jss::status] = "VALIDATING";
1749  break;
1750  case protocol::nsSHUTTING:
1751  j[jss::status] = "SHUTTING";
1752  break;
1753  }
1754  }
1755 
1756  if (m->has_newevent())
1757  {
1758  switch (m->newevent())
1759  {
1760  case protocol::neCLOSING_LEDGER:
1761  j[jss::action] = "CLOSING_LEDGER";
1762  break;
1763  case protocol::neACCEPTED_LEDGER:
1764  j[jss::action] = "ACCEPTED_LEDGER";
1765  break;
1766  case protocol::neSWITCHED_LEDGER:
1767  j[jss::action] = "SWITCHED_LEDGER";
1768  break;
1769  case protocol::neLOST_SYNC:
1770  j[jss::action] = "LOST_SYNC";
1771  break;
1772  }
1773  }
1774 
1775  if (m->has_ledgerseq())
1776  {
1777  j[jss::ledger_index] = m->ledgerseq();
1778  }
1779 
1780  if (m->has_ledgerhash())
1781  {
1782  uint256 closedLedgerHash{};
1783  {
1784  std::lock_guard sl(recentLock_);
1785  closedLedgerHash = closedLedgerHash_;
1786  }
1787  j[jss::ledger_hash] = to_string(closedLedgerHash);
1788  }
1789 
1790  if (m->has_networktime())
1791  {
1792  j[jss::date] = Json::UInt(m->networktime());
1793  }
1794 
1795  if (m->has_firstseq() && m->has_lastseq())
1796  {
1797  j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1798  j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1799  }
1800 
1801  return j;
1802  });
1803 }
1804 
1805 void
1806 PeerImp::checkTracking(std::uint32_t validationSeq)
1807 {
1808  std::uint32_t serverSeq;
1809  {
1810  // Extract the sequence number of the highest
1811  // ledger this peer has
1812  std::lock_guard sl(recentLock_);
1813 
1814  serverSeq = maxLedger_;
1815  }
1816  if (serverSeq != 0)
1817  {
1818  // Compare the peer's ledger sequence to the
1819  // sequence of a recently-validated ledger
1820  checkTracking(serverSeq, validationSeq);
1821  }
1822 }
1823 
1824 void
1825 PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1826 {
1827  int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1828 
1829  if (diff < Tuning::convergedLedgerLimit)
1830  {
1831  // The peer's ledger sequence is close to the validation's
1832  tracking_ = Tracking::converged;
1833  }
1834 
1835  if ((diff > Tuning::divergedLedgerLimit) &&
1836  (tracking_.load() != Tracking::diverged))
1837  {
1838  // The peer's ledger sequence is way off the validation's
1839  std::lock_guard sl(recentLock_);
1840 
1841  tracking_ = Tracking::diverged;
1842  trackingTime_ = clock_type::now();
1843  }
1844 }
1845 
1846 void
1848 {
1849  if (!stringIsUint256Sized(m->hash()))
1850  {
1851  fee_ = Resource::feeInvalidRequest;
1852  return;
1853  }
1854 
1855  uint256 const hash{m->hash()};
1856 
1857  if (m->status() == protocol::tsHAVE)
1858  {
1859  std::lock_guard sl(recentLock_);
1860 
1861  if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
1862  recentTxSets_.end())
1863  {
1864  fee_ = Resource::feeUnwantedData;
1865  return;
1866  }
1867 
1868  recentTxSets_.push_back(hash);
1869  }
1870 }
1871 
1872 void
1873 PeerImp::onValidatorListMessage(
1874  std::string const& messageType,
1875  std::string const& manifest,
1876  std::uint32_t version,
1877  std::vector<ValidatorBlobInfo> const& blobs)
1878 {
1879  // If there are no blobs, the message is malformed (possibly because of
1880  // ValidatorList class rules), so charge accordingly and skip processing.
1881  if (blobs.empty())
1882  {
1883  JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
1884  << " from peer " << remote_address_;
1885  // This shouldn't ever happen with a well-behaved peer
1886  fee_ = Resource::feeHighBurdenPeer;
1887  return;
1888  }
1889 
1890  auto const hash = sha512Half(manifest, blobs, version);
1891 
1892  JLOG(p_journal_.debug())
1893  << "Received " << messageType << " from " << remote_address_.to_string()
1894  << " (" << id_ << ")";
1895 
1896  if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
1897  {
1898  JLOG(p_journal_.debug())
1899  << messageType << ": received duplicate " << messageType;
1900  // Charging this fee here won't hurt the peer in the normal
1901  // course of operation (ie. refresh every 5 minutes), but
1902  // will add up if the peer is misbehaving.
1903  fee_ = Resource::feeUnwantedData;
1904  return;
1905  }
1906 
1907  auto const applyResult = app_.validators().applyListsAndBroadcast(
1908  manifest,
1909  version,
1910  blobs,
1911  remote_address_.to_string(),
1912  hash,
1913  app_.overlay(),
1914  app_.getHashRouter(),
1915  app_.getOPs());
1916 
1917  JLOG(p_journal_.debug())
1918  << "Processed " << messageType << " version " << version << " from "
1919  << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
1920  : "unknown or invalid publisher")
1921  << " from " << remote_address_.to_string() << " (" << id_
1922  << ") with best result " << to_string(applyResult.bestDisposition());
1923 
1924  // Act based on the best result
1925  switch (applyResult.bestDisposition())
1926  {
1927  // New list
1928  case ListDisposition::accepted:
1929  // Newest list is expired, and that needs to be broadcast, too
1930  case ListDisposition::expired:
1931  // Future list
1932  case ListDisposition::pending: {
1933  std::lock_guard<std::mutex> sl(recentLock_);
1934 
1935  assert(applyResult.publisherKey);
1936  auto const& pubKey = *applyResult.publisherKey;
1937 #ifndef NDEBUG
1938  if (auto const iter = publisherListSequences_.find(pubKey);
1939  iter != publisherListSequences_.end())
1940  {
1941  assert(iter->second < applyResult.sequence);
1942  }
1943 #endif
1944  publisherListSequences_[pubKey] = applyResult.sequence;
1945  }
1946  break;
1947  case ListDisposition::same_sequence:
1948  case ListDisposition::known_sequence:
1949 #ifndef NDEBUG
1950  {
1951  std::lock_guard<std::mutex> sl(recentLock_);
1952  assert(applyResult.sequence && applyResult.publisherKey);
1953  assert(
1954  publisherListSequences_[*applyResult.publisherKey] <=
1955  applyResult.sequence);
1956  }
1957 #endif // !NDEBUG
1958 
1959  break;
1960  case ListDisposition::stale:
1961  case ListDisposition::untrusted:
1962  case ListDisposition::invalid:
1963  case ListDisposition::unsupported_version:
1964  break;
1965  default:
1966  assert(false);
1967  }
1968 
1969  // Charge based on the worst result
1970  switch (applyResult.worstDisposition())
1971  {
1972  case ListDisposition::accepted:
1973  case ListDisposition::expired:
1974  case ListDisposition::pending:
1975  // No charges for good data
1976  break;
1977  case ListDisposition::same_sequence:
1978  case ListDisposition::known_sequence:
1979  // Charging this fee here won't hurt the peer in the normal
1980  // course of operation (ie. refresh every 5 minutes), but
1981  // will add up if the peer is misbehaving.
1982  fee_ = Resource::feeUnwantedData;
1983  break;
1984  case ListDisposition::stale:
1985  // There are very few good reasons for a peer to send an
1986  // old list, particularly more than once.
1987  fee_ = Resource::feeBadData;
1988  break;
1989  case ListDisposition::untrusted:
1990  // Charging this fee here won't hurt the peer in the normal
1991  // course of operation (ie. refresh every 5 minutes), but
1992  // will add up if the peer is misbehaving.
1993  fee_ = Resource::feeUnwantedData;
1994  break;
1995  case ListDisposition::invalid:
1996  // This shouldn't ever happen with a well-behaved peer
1997  fee_ = Resource::feeInvalidSignature;
1998  break;
1999  case ListDisposition::unsupported_version:
2000  // During a version transition, this may be legitimate.
2001  // If it happens frequently, that's probably bad.
2002  fee_ = Resource::feeBadData;
2003  break;
2004  default:
2005  assert(false);
2006  }
2007 
2008  // Log based on all the results.
2009  for (auto const [disp, count] : applyResult.dispositions)
2010  {
2011  switch (disp)
2012  {
2013  // New list
2014  case ListDisposition::accepted:
2015  JLOG(p_journal_.debug())
2016  << "Applied " << count << " new " << messageType
2017  << "(s) from peer " << remote_address_;
2018  break;
2019  // Newest list is expired, and that needs to be broadcast, too
2020  case ListDisposition::expired:
2021  JLOG(p_journal_.debug())
2022  << "Applied " << count << " expired " << messageType
2023  << "(s) from peer " << remote_address_;
2024  break;
2025  // Future list
2026  case ListDisposition::pending:
2027  JLOG(p_journal_.debug())
2028  << "Processed " << count << " future " << messageType
2029  << "(s) from peer " << remote_address_;
2030  break;
2031  case ListDisposition::same_sequence:
2032  JLOG(p_journal_.warn())
2033  << "Ignored " << count << " " << messageType
2034  << "(s) with current sequence from peer "
2035  << remote_address_;
2036  break;
2037  case ListDisposition::known_sequence:
2038  JLOG(p_journal_.warn())
2039  << "Ignored " << count << " " << messageType
2040  << "(s) with future sequence from peer " << remote_address_;
2041  break;
2042  case ListDisposition::stale:
2043  JLOG(p_journal_.warn())
2044  << "Ignored " << count << "stale " << messageType
2045  << "(s) from peer " << remote_address_;
2046  break;
2047  case ListDisposition::untrusted:
2048  JLOG(p_journal_.warn())
2049  << "Ignored " << count << " untrusted " << messageType
2050  << "(s) from peer " << remote_address_;
2051  break;
2052  case ListDisposition::unsupported_version:
2053  JLOG(p_journal_.warn())
2054  << "Ignored " << count << "unsupported version "
2055  << messageType << "(s) from peer " << remote_address_;
2056  break;
2057  case ListDisposition::invalid:
2058  JLOG(p_journal_.warn())
2059  << "Ignored " << count << "invalid " << messageType
2060  << "(s) from peer " << remote_address_;
2061  break;
2062  default:
2063  assert(false);
2064  }
2065  }
2066 }
2067 
2068 void
2070 {
2071  try
2072  {
2073  if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2074  {
2075  JLOG(p_journal_.debug())
2076  << "ValidatorList: received validator list from peer using "
2077  << "protocol version " << to_string(protocol_)
2078  << " which shouldn't support this feature.";
2079  fee_ = Resource::feeUnwantedData;
2080  return;
2081  }
2082  onValidatorListMessage(
2083  "ValidatorList",
2084  m->manifest(),
2085  m->version(),
2086  ValidatorList::parseBlobs(*m));
2087  }
2088  catch (std::exception const& e)
2089  {
2090  JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2091  << " from peer " << remote_address_;
2092  fee_ = Resource::feeBadData;
2093  }
2094 }
2095 
2096 void
2097 PeerImp::onMessage(
2099 {
2100  try
2101  {
2102  if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2103  {
2104  JLOG(p_journal_.debug())
2105  << "ValidatorListCollection: received validator list from peer "
2106  << "using protocol version " << to_string(protocol_)
2107  << " which shouldn't support this feature.";
2108  fee_ = Resource::feeUnwantedData;
2109  return;
2110  }
2111  else if (m->version() < 2)
2112  {
2113  JLOG(p_journal_.debug())
2114  << "ValidatorListCollection: received invalid validator list "
2115  "version "
2116  << m->version() << " from peer using protocol version "
2117  << to_string(protocol_);
2118  fee_ = Resource::feeBadData;
2119  return;
2120  }
2121  onValidatorListMessage(
2122  "ValidatorListCollection",
2123  m->manifest(),
2124  m->version(),
2125  ValidatorList::parseBlobs(*m));
2126  }
2127  catch (std::exception const& e)
2128  {
2129  JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2130  << e.what() << " from peer " << remote_address_;
2131  fee_ = Resource::feeBadData;
2132  }
2133 }
2134 
2135 void
2136 PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
2137 {
2138  auto const closeTime = app_.timeKeeper().closeTime();
2139 
2140  if (m->validation().size() < 50)
2141  {
2142  JLOG(p_journal_.warn()) << "Validation: Too small";
2143  fee_ = Resource::feeInvalidRequest;
2144  return;
2145  }
2146 
2147  try
2148  {
2150  {
2151  SerialIter sit(makeSlice(m->validation()));
2152  val = std::make_shared<STValidation>(
2153  std::ref(sit),
2154  [this](PublicKey const& pk) {
2155  return calcNodeID(
2156  app_.validatorManifests().getMasterKey(pk));
2157  },
2158  false);
2159  val->setSeen(closeTime);
2160  }
2161 
2162  if (!isCurrent(
2163  app_.getValidations().parms(),
2164  app_.timeKeeper().closeTime(),
2165  val->getSignTime(),
2166  val->getSeenTime()))
2167  {
2168  JLOG(p_journal_.trace()) << "Validation: Not current";
2169  fee_ = Resource::feeUnwantedData;
2170  return;
2171  }
2172 
2173  auto key = sha512Half(makeSlice(m->validation()));
2174  if (auto [added, relayed] =
2175  app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2176  !added)
2177  {
2178  // Count unique messages (Slots has it's own 'HashRouter'), which a
2179  // peer receives within IDLED seconds since the message has been
2180  // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2181  // connections to peers.
2182  if (reduceRelayReady() && relayed &&
2183  (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2184  overlay_.updateSlotAndSquelch(
2185  key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2186  JLOG(p_journal_.trace()) << "Validation: duplicate";
2187  return;
2188  }
2189 
2190  auto const isTrusted =
2191  app_.validators().trusted(val->getSignerPublic());
2192 
2193  if (!isTrusted && (tracking_.load() == Tracking::diverged))
2194  {
2195  JLOG(p_journal_.debug())
2196  << "Validation: dropping untrusted from diverged peer";
2197  }
2198  if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal())
2199  {
2200  std::weak_ptr<PeerImp> weak = shared_from_this();
2201  app_.getJobQueue().addJob(
2202  isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2203  "recvValidation->checkValidation",
2204  [weak, val, m](Job&) {
2205  if (auto peer = weak.lock())
2206  peer->checkValidation(val, m);
2207  });
2208  }
2209  else
2210  {
2211  JLOG(p_journal_.debug()) << "Validation: Dropping UNTRUSTED (load)";
2212  }
2213  }
2214  catch (std::exception const& e)
2215  {
2216  JLOG(p_journal_.warn())
2217  << "Exception processing validation: " << e.what();
2218  fee_ = Resource::feeInvalidRequest;
2219  }
2220 }
2221 
2222 void
2224 {
2225  protocol::TMGetObjectByHash& packet = *m;
2226 
2227  if (packet.query())
2228  {
2229  // this is a query
2230  if (send_queue_.size() >= Tuning::dropSendQueue)
2231  {
2232  JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2233  return;
2234  }
2235 
2236  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2237  {
2238  doFetchPack(m);
2239  return;
2240  }
2241 
2242  fee_ = Resource::feeMediumBurdenPeer;
2243 
2244  protocol::TMGetObjectByHash reply;
2245 
2246  reply.set_query(false);
2247 
2248  if (packet.has_seq())
2249  reply.set_seq(packet.seq());
2250 
2251  reply.set_type(packet.type());
2252 
2253  if (packet.has_ledgerhash())
2254  {
2255  if (!stringIsUint256Sized(packet.ledgerhash()))
2256  {
2257  fee_ = Resource::feeInvalidRequest;
2258  return;
2259  }
2260 
2261  reply.set_ledgerhash(packet.ledgerhash());
2262  }
2263 
2264  // This is a very minimal implementation
2265  for (int i = 0; i < packet.objects_size(); ++i)
2266  {
2267  auto const& obj = packet.objects(i);
2268  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2269  {
2270  uint256 const hash{obj.hash()};
2271  // VFALCO TODO Move this someplace more sensible so we dont
2272  // need to inject the NodeStore interfaces.
2273  std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2274  auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2275  if (!nodeObject)
2276  {
2277  if (auto shardStore = app_.getShardStore())
2278  {
2279  if (seq >= shardStore->earliestLedgerSeq())
2280  nodeObject = shardStore->fetchNodeObject(hash, seq);
2281  }
2282  }
2283  if (nodeObject)
2284  {
2285  protocol::TMIndexedObject& newObj = *reply.add_objects();
2286  newObj.set_hash(hash.begin(), hash.size());
2287  newObj.set_data(
2288  &nodeObject->getData().front(),
2289  nodeObject->getData().size());
2290 
2291  if (obj.has_nodeid())
2292  newObj.set_index(obj.nodeid());
2293  if (obj.has_ledgerseq())
2294  newObj.set_ledgerseq(obj.ledgerseq());
2295 
2296  // VFALCO NOTE "seq" in the message is obsolete
2297  }
2298  }
2299  }
2300 
2301  JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2302  << packet.objects_size();
2303  send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2304  }
2305  else
2306  {
2307  // this is a reply
2308  std::uint32_t pLSeq = 0;
2309  bool pLDo = true;
2310  bool progress = false;
2311 
2312  for (int i = 0; i < packet.objects_size(); ++i)
2313  {
2314  const protocol::TMIndexedObject& obj = packet.objects(i);
2315 
2316  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2317  {
2318  if (obj.has_ledgerseq())
2319  {
2320  if (obj.ledgerseq() != pLSeq)
2321  {
2322  if (pLDo && (pLSeq != 0))
2323  {
2324  JLOG(p_journal_.debug())
2325  << "GetObj: Full fetch pack for " << pLSeq;
2326  }
2327  pLSeq = obj.ledgerseq();
2328  pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2329 
2330  if (!pLDo)
2331  {
2332  JLOG(p_journal_.debug())
2333  << "GetObj: Late fetch pack for " << pLSeq;
2334  }
2335  else
2336  progress = true;
2337  }
2338  }
2339 
2340  if (pLDo)
2341  {
2342  uint256 const hash{obj.hash()};
2343 
2344  app_.getLedgerMaster().addFetchPack(
2345  hash,
2346  std::make_shared<Blob>(
2347  obj.data().begin(), obj.data().end()));
2348  }
2349  }
2350  }
2351 
2352  if (pLDo && (pLSeq != 0))
2353  {
2354  JLOG(p_journal_.debug())
2355  << "GetObj: Partial fetch pack for " << pLSeq;
2356  }
2357  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2358  app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2359  }
2360 }
2361 
2362 void
2363 PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2364 {
2365  using on_message_fn =
2367  if (!strand_.running_in_this_thread())
2368  return post(
2369  strand_,
2370  std::bind(
2371  (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2372 
2373  if (!m->has_validatorpubkey())
2374  {
2375  charge(Resource::feeBadData);
2376  return;
2377  }
2378  auto validator = m->validatorpubkey();
2379  auto const slice{makeSlice(validator)};
2380  if (!publicKeyType(slice))
2381  {
2382  charge(Resource::feeBadData);
2383  return;
2384  }
2385  PublicKey key(slice);
2386 
2387  // Ignore the squelch for validator's own messages.
2388  if (key == app_.getValidationPublicKey())
2389  {
2390  JLOG(p_journal_.debug())
2391  << "onMessage: TMSquelch discarding validator's squelch " << slice;
2392  return;
2393  }
2394 
2395  std::uint32_t duration =
2396  m->has_squelchduration() ? m->squelchduration() : 0;
2397  if (!m->squelch())
2398  squelch_.removeSquelch(key);
2399  else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2400  charge(Resource::feeBadData);
2401 
2402  JLOG(p_journal_.debug())
2403  << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2404 }
2405 
2406 //--------------------------------------------------------------------------
2407 
2408 void
2409 PeerImp::addLedger(
2410  uint256 const& hash,
2411  std::lock_guard<std::mutex> const& lockedRecentLock)
2412 {
2413  // lockedRecentLock is passed as a reminder that recentLock_ must be
2414  // locked by the caller.
2415  (void)lockedRecentLock;
2416 
2417  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2418  recentLedgers_.end())
2419  return;
2420 
2421  recentLedgers_.push_back(hash);
2422 }
2423 
2424 void
2425 PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2426 {
2427  // VFALCO TODO Invert this dependency using an observer and shared state
2428  // object. Don't queue fetch pack jobs if we're under load or we already
2429  // have some queued.
2430  if (app_.getFeeTrack().isLoadedLocal() ||
2431  (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2432  (app_.getJobQueue().getJobCount(jtPACK) > 10))
2433  {
2434  JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2435  return;
2436  }
2437 
2438  if (!stringIsUint256Sized(packet->ledgerhash()))
2439  {
2440  JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2441  fee_ = Resource::feeInvalidRequest;
2442  return;
2443  }
2444 
2445  fee_ = Resource::feeHighBurdenPeer;
2446 
2447  uint256 const hash{packet->ledgerhash()};
2448 
2449  std::weak_ptr<PeerImp> weak = shared_from_this();
2450  auto elapsed = UptimeClock::now();
2451  auto const pap = &app_;
2452  app_.getJobQueue().addJob(
2453  jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed](Job&) {
2454  pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2455  });
2456 }
2457 
2458 void
2459 PeerImp::checkTransaction(
2460  int flags,
2461  bool checkSignature,
2462  std::shared_ptr<STTx const> const& stx)
2463 {
2464  // VFALCO TODO Rewrite to not use exceptions
2465  try
2466  {
2467  // Expired?
2468  if (stx->isFieldPresent(sfLastLedgerSequence) &&
2469  (stx->getFieldU32(sfLastLedgerSequence) <
2470  app_.getLedgerMaster().getValidLedgerIndex()))
2471  {
2472  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2473  charge(Resource::feeUnwantedData);
2474  return;
2475  }
2476 
2477  if (checkSignature)
2478  {
2479  // Check the signature before handing off to the job queue.
2480  if (auto [valid, validReason] = checkValidity(
2481  app_.getHashRouter(),
2482  *stx,
2483  app_.getLedgerMaster().getValidatedRules(),
2484  app_.config());
2485  valid != Validity::Valid)
2486  {
2487  if (!validReason.empty())
2488  {
2489  JLOG(p_journal_.trace())
2490  << "Exception checking transaction: " << validReason;
2491  }
2492 
2493  // Probably not necessary to set SF_BAD, but doesn't hurt.
2494  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2495  charge(Resource::feeInvalidSignature);
2496  return;
2497  }
2498  }
2499  else
2500  {
2501  forceValidity(
2502  app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2503  }
2504 
2505  std::string reason;
2506  auto tx = std::make_shared<Transaction>(stx, reason, app_);
2507 
2508  if (tx->getStatus() == INVALID)
2509  {
2510  if (!reason.empty())
2511  {
2512  JLOG(p_journal_.trace())
2513  << "Exception checking transaction: " << reason;
2514  }
2515  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2516  charge(Resource::feeInvalidSignature);
2517  return;
2518  }
2519 
2520  bool const trusted(flags & SF_TRUSTED);
2521  app_.getOPs().processTransaction(
2522  tx, trusted, false, NetworkOPs::FailHard::no);
2523  }
2524  catch (std::exception const&)
2525  {
2526  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2527  charge(Resource::feeBadData);
2528  }
2529 }
2530 
2531 // Called from our JobQueue
2532 void
2533 PeerImp::checkPropose(
2534  Job& job,
2536  RCLCxPeerPos peerPos)
2537 {
2538  bool isTrusted = (job.getType() == jtPROPOSAL_t);
2539 
2540  JLOG(p_journal_.trace())
2541  << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2542 
2543  assert(packet);
2544 
2545  if (!cluster() && !peerPos.checkSign())
2546  {
2547  JLOG(p_journal_.warn()) << "Proposal fails sig check";
2548  charge(Resource::feeInvalidSignature);
2549  return;
2550  }
2551 
2552  bool relay;
2553 
2554  if (isTrusted)
2555  relay = app_.getOPs().processTrustedProposal(peerPos);
2556  else
2557  relay = app_.config().RELAY_UNTRUSTED_PROPOSALS || cluster();
2558 
2559  if (relay)
2560  {
2561  // haveMessage contains peers, which are suppressed; i.e. the peers
2562  // are the source of the message, consequently the message should
2563  // not be relayed to these peers. But the message must be counted
2564  // as part of the squelch logic.
2565  auto haveMessage = app_.overlay().relay(
2566  *packet, peerPos.suppressionID(), peerPos.publicKey());
2567  if (reduceRelayReady() && !haveMessage.empty())
2568  overlay_.updateSlotAndSquelch(
2569  peerPos.suppressionID(),
2570  peerPos.publicKey(),
2571  std::move(haveMessage),
2572  protocol::mtPROPOSE_LEDGER);
2573  }
2574 }
2575 
2576 void
2577 PeerImp::checkValidation(
2578  std::shared_ptr<STValidation> const& val,
2580 {
2581  try
2582  {
2583  // VFALCO Which functions throw?
2584  if (!cluster() && !val->isValid())
2585  {
2586  JLOG(p_journal_.warn()) << "Validation is invalid";
2587  charge(Resource::feeInvalidRequest);
2588  return;
2589  }
2590 
2591  if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
2592  cluster())
2593  {
2594  auto const suppression =
2595  sha512Half(makeSlice(val->getSerialized()));
2596  // haveMessage contains peers, which are suppressed; i.e. the peers
2597  // are the source of the message, consequently the message should
2598  // not be relayed to these peers. But the message must be counted
2599  // as part of the squelch logic.
2600  auto haveMessage =
2601  overlay_.relay(*packet, suppression, val->getSignerPublic());
2602  if (reduceRelayReady() && !haveMessage.empty())
2603  {
2604  overlay_.updateSlotAndSquelch(
2605  suppression,
2606  val->getSignerPublic(),
2607  std::move(haveMessage),
2608  protocol::mtVALIDATION);
2609  }
2610  }
2611  }
2612  catch (std::exception const&)
2613  {
2614  JLOG(p_journal_.trace()) << "Exception processing validation";
2615  charge(Resource::feeInvalidRequest);
2616  }
2617 }
2618 
2619 // Returns the set of peers that can help us get
2620 // the TX tree with the specified root hash.
2621 //
2623 getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
2624 {
2626  int retScore = 0;
2627 
2628  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
2629  if (p->hasTxSet(rootHash) && p.get() != skip)
2630  {
2631  auto score = p->getScore(true);
2632  if (!ret || (score > retScore))
2633  {
2634  ret = std::move(p);
2635  retScore = score;
2636  }
2637  }
2638  });
2639 
2640  return ret;
2641 }
2642 
2643 // Returns a random peer weighted by how likely to
2644 // have the ledger and how responsive it is.
2645 //
2648  OverlayImpl& ov,
2649  uint256 const& ledgerHash,
2650  LedgerIndex ledger,
2651  PeerImp const* skip)
2652 {
2654  int retScore = 0;
2655 
2656  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
2657  if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
2658  {
2659  auto score = p->getScore(true);
2660  if (!ret || (score > retScore))
2661  {
2662  ret = std::move(p);
2663  retScore = score;
2664  }
2665  }
2666  });
2667 
2668  return ret;
2669 }
2670 
2671 // VFALCO NOTE This function is way too big and cumbersome.
2672 void
2673 PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
2674 {
2675  protocol::TMGetLedger& packet = *m;
2676  std::shared_ptr<SHAMap> shared;
2677  SHAMap const* map = nullptr;
2678  protocol::TMLedgerData reply;
2679  bool fatLeaves = true;
2681 
2682  if (packet.has_requestcookie())
2683  reply.set_requestcookie(packet.requestcookie());
2684 
2685  std::string logMe;
2686 
2687  if (packet.itype() == protocol::liTS_CANDIDATE)
2688  {
2689  // Request is for a transaction candidate set
2690  JLOG(p_journal_.trace()) << "GetLedger: Tx candidate set";
2691 
2692  if (!packet.has_ledgerhash() ||
2693  !stringIsUint256Sized(packet.ledgerhash()))
2694  {
2695  charge(Resource::feeInvalidRequest);
2696  JLOG(p_journal_.warn()) << "GetLedger: Tx candidate set invalid";
2697  return;
2698  }
2699 
2700  uint256 const txHash{packet.ledgerhash()};
2701 
2702  shared = app_.getInboundTransactions().getSet(txHash, false);
2703  map = shared.get();
2704 
2705  if (!map)
2706  {
2707  if (packet.has_querytype() && !packet.has_requestcookie())
2708  {
2709  JLOG(p_journal_.debug()) << "GetLedger: Routing Tx set request";
2710 
2711  if (auto const v = getPeerWithTree(overlay_, txHash, this))
2712  {
2713  packet.set_requestcookie(id());
2714  v->send(std::make_shared<Message>(
2715  packet, protocol::mtGET_LEDGER));
2716  return;
2717  }
2718 
2719  JLOG(p_journal_.info()) << "GetLedger: Route TX set failed";
2720  return;
2721  }
2722 
2723  JLOG(p_journal_.debug()) << "GetLedger: Can't provide map ";
2724  charge(Resource::feeInvalidRequest);
2725  return;
2726  }
2727 
2728  reply.set_ledgerseq(0);
2729  reply.set_ledgerhash(txHash.begin(), txHash.size());
2730  reply.set_type(protocol::liTS_CANDIDATE);
2731  fatLeaves = false; // We'll already have most transactions
2732  }
2733  else
2734  {
2735  if (send_queue_.size() >= Tuning::dropSendQueue)
2736  {
2737  JLOG(p_journal_.debug()) << "GetLedger: Large send queue";
2738  return;
2739  }
2740 
2741  if (app_.getFeeTrack().isLoadedLocal() && !cluster())
2742  {
2743  JLOG(p_journal_.debug()) << "GetLedger: Too busy";
2744  return;
2745  }
2746 
2747  // Figure out what ledger they want
2748  JLOG(p_journal_.trace()) << "GetLedger: Received";
2749 
2750  if (packet.has_ledgerhash())
2751  {
2752  if (!stringIsUint256Sized(packet.ledgerhash()))
2753  {
2754  charge(Resource::feeInvalidRequest);
2755  JLOG(p_journal_.warn()) << "GetLedger: Invalid request";
2756  return;
2757  }
2758 
2759  uint256 const ledgerhash{packet.ledgerhash()};
2760  logMe += "LedgerHash:";
2761  logMe += to_string(ledgerhash);
2762  ledger = app_.getLedgerMaster().getLedgerByHash(ledgerhash);
2763 
2764  if (!ledger && packet.has_ledgerseq())
2765  {
2766  if (auto shardStore = app_.getShardStore())
2767  {
2768  auto seq = packet.ledgerseq();
2769  if (seq >= shardStore->earliestLedgerSeq())
2770  ledger = shardStore->fetchLedger(ledgerhash, seq);
2771  }
2772  }
2773 
2774  if (!ledger)
2775  {
2776  JLOG(p_journal_.trace())
2777  << "GetLedger: Don't have " << ledgerhash;
2778  }
2779 
2780  if (!ledger &&
2781  (packet.has_querytype() && !packet.has_requestcookie()))
2782  {
2783  // We don't have the requested ledger
2784  // Search for a peer who might
2785  auto const v = getPeerWithLedger(
2786  overlay_,
2787  ledgerhash,
2788  packet.has_ledgerseq() ? packet.ledgerseq() : 0,
2789  this);
2790  if (!v)
2791  {
2792  JLOG(p_journal_.trace()) << "GetLedger: Cannot route";
2793  return;
2794  }
2795 
2796  packet.set_requestcookie(id());
2797  v->send(
2798  std::make_shared<Message>(packet, protocol::mtGET_LEDGER));
2799  JLOG(p_journal_.debug()) << "GetLedger: Request routed";
2800  return;
2801  }
2802  }
2803  else if (packet.has_ledgerseq())
2804  {
2805  if (packet.ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
2806  {
2807  JLOG(p_journal_.debug()) << "GetLedger: Early ledger request";
2808  return;
2809  }
2810  ledger = app_.getLedgerMaster().getLedgerBySeq(packet.ledgerseq());
2811  if (!ledger)
2812  {
2813  JLOG(p_journal_.debug())
2814  << "GetLedger: Don't have " << packet.ledgerseq();
2815  }
2816  }
2817  else if (packet.has_ltype() && (packet.ltype() == protocol::ltCLOSED))
2818  {
2819  ledger = app_.getLedgerMaster().getClosedLedger();
2820  assert(!ledger->open());
2821  // VFALCO ledger should never be null!
2822  // VFALCO How can the closed ledger be open?
2823 #if 0
2824  if (ledger && ledger->info().open)
2825  ledger = app_.getLedgerMaster ().getLedgerBySeq (
2826  ledger->info().seq - 1);
2827 #endif
2828  }
2829  else
2830  {
2831  charge(Resource::feeInvalidRequest);
2832  JLOG(p_journal_.warn()) << "GetLedger: Unknown request";
2833  return;
2834  }
2835 
2836  if ((!ledger) ||
2837  (packet.has_ledgerseq() &&
2838  (packet.ledgerseq() != ledger->info().seq)))
2839  {
2840  charge(Resource::feeInvalidRequest);
2841 
2842  if (ledger)
2843  {
2844  JLOG(p_journal_.warn()) << "GetLedger: Invalid sequence";
2845  }
2846  return;
2847  }
2848 
2849  if (!packet.has_ledgerseq() &&
2850  (ledger->info().seq < app_.getLedgerMaster().getEarliestFetch()))
2851  {
2852  JLOG(p_journal_.debug()) << "GetLedger: Early ledger request";
2853  return;
2854  }
2855 
2856  // Fill out the reply
2857  auto const lHash = ledger->info().hash;
2858  reply.set_ledgerhash(lHash.begin(), lHash.size());
2859  reply.set_ledgerseq(ledger->info().seq);
2860  reply.set_type(packet.itype());
2861 
2862  if (packet.itype() == protocol::liBASE)
2863  {
2864  // they want the ledger base data
2865  JLOG(p_journal_.trace()) << "GetLedger: Base data";
2866  Serializer nData(128);
2867  addRaw(ledger->info(), nData);
2868  reply.add_nodes()->set_nodedata(
2869  nData.getDataPtr(), nData.getLength());
2870 
2871  auto const& stateMap = ledger->stateMap();
2872  if (stateMap.getHash() != beast::zero)
2873  {
2874  // return account state root node if possible
2875  Serializer rootNode(768);
2876 
2877  stateMap.serializeRoot(rootNode);
2878  reply.add_nodes()->set_nodedata(
2879  rootNode.getDataPtr(), rootNode.getLength());
2880 
2881  if (ledger->info().txHash != beast::zero)
2882  {
2883  auto const& txMap = ledger->txMap();
2884  if (txMap.getHash() != beast::zero)
2885  {
2886  rootNode.erase();
2887 
2888  txMap.serializeRoot(rootNode);
2889  reply.add_nodes()->set_nodedata(
2890  rootNode.getDataPtr(), rootNode.getLength());
2891  }
2892  }
2893  }
2894 
2895  auto oPacket =
2896  std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2897  send(oPacket);
2898  return;
2899  }
2900 
2901  if (packet.itype() == protocol::liTX_NODE)
2902  {
2903  map = &ledger->txMap();
2904  logMe += " TX:";
2905  logMe += to_string(map->getHash());
2906  }
2907  else if (packet.itype() == protocol::liAS_NODE)
2908  {
2909  map = &ledger->stateMap();
2910  logMe += " AS:";
2911  logMe += to_string(map->getHash());
2912  }
2913  }
2914 
2915  if (!map || (packet.nodeids_size() == 0))
2916  {
2917  JLOG(p_journal_.warn()) << "GetLedger: Can't find map or empty request";
2918  charge(Resource::feeInvalidRequest);
2919  return;
2920  }
2921 
2922  JLOG(p_journal_.trace()) << "GetLedger: " << logMe;
2923 
2924  auto const depth = packet.has_querydepth()
2925  ? (std::min(packet.querydepth(), 3u))
2926  : (isHighLatency() ? 2 : 1);
2927 
2928  for (int i = 0;
2929  (i < packet.nodeids().size() &&
2930  (reply.nodes().size() < Tuning::maxReplyNodes));
2931  ++i)
2932  {
2933  auto const mn = deserializeSHAMapNodeID(packet.nodeids(i));
2934 
2935  if (!mn)
2936  {
2937  JLOG(p_journal_.warn()) << "GetLedger: Invalid node " << logMe;
2938  charge(Resource::feeBadData);
2939  return;
2940  }
2941 
2942  std::vector<SHAMapNodeID> nodeIDs;
2943  std::vector<Blob> rawNodes;
2944 
2945  try
2946  {
2947  if (map->getNodeFat(*mn, nodeIDs, rawNodes, fatLeaves, depth))
2948  {
2949  assert(nodeIDs.size() == rawNodes.size());
2950  JLOG(p_journal_.trace()) << "GetLedger: getNodeFat got "
2951  << rawNodes.size() << " nodes";
2952  std::vector<SHAMapNodeID>::iterator nodeIDIterator;
2953  std::vector<Blob>::iterator rawNodeIterator;
2954 
2955  for (nodeIDIterator = nodeIDs.begin(),
2956  rawNodeIterator = rawNodes.begin();
2957  nodeIDIterator != nodeIDs.end();
2958  ++nodeIDIterator, ++rawNodeIterator)
2959  {
2960  protocol::TMLedgerNode* node = reply.add_nodes();
2961  node->set_nodeid(nodeIDIterator->getRawString());
2962  node->set_nodedata(
2963  &rawNodeIterator->front(), rawNodeIterator->size());
2964  }
2965  }
2966  else
2967  {
2968  JLOG(p_journal_.warn())
2969  << "GetLedger: getNodeFat returns false";
2970  }
2971  }
2972  catch (std::exception&)
2973  {
2974  std::string info;
2975 
2976  if (packet.itype() == protocol::liTS_CANDIDATE)
2977  info = "TS candidate";
2978  else if (packet.itype() == protocol::liBASE)
2979  info = "Ledger base";
2980  else if (packet.itype() == protocol::liTX_NODE)
2981  info = "TX node";
2982  else if (packet.itype() == protocol::liAS_NODE)
2983  info = "AS node";
2984 
2985  if (!packet.has_ledgerhash())
2986  info += ", no hash specified";
2987 
2988  JLOG(p_journal_.warn())
2989  << "getNodeFat( " << *mn << ") throws exception: " << info;
2990  }
2991  }
2992 
2993  JLOG(p_journal_.info())
2994  << "Got request for " << packet.nodeids().size() << " nodes at depth "
2995  << depth << ", return " << reply.nodes().size() << " nodes";
2996 
2997  auto oPacket = std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2998  send(oPacket);
2999 }
3000 
3001 int
3002 PeerImp::getScore(bool haveItem) const
3003 {
3004  // Random component of score, used to break ties and avoid
3005  // overloading the "best" peer
3006  static const int spRandomMax = 9999;
3007 
3008  // Score for being very likely to have the thing we are
3009  // look for; should be roughly spRandomMax
3010  static const int spHaveItem = 10000;
3011 
3012  // Score reduction for each millisecond of latency; should
3013  // be roughly spRandomMax divided by the maximum reasonable
3014  // latency
3015  static const int spLatency = 30;
3016 
3017  // Penalty for unknown latency; should be roughly spRandomMax
3018  static const int spNoLatency = 8000;
3019 
3020  int score = rand_int(spRandomMax);
3021 
3022  if (haveItem)
3023  score += spHaveItem;
3024 
3025  boost::optional<std::chrono::milliseconds> latency;
3026  {
3027  std::lock_guard sl(recentLock_);
3028  latency = latency_;
3029  }
3030 
3031  if (latency)
3032  score -= latency->count() * spLatency;
3033  else
3034  score -= spNoLatency;
3035 
3036  return score;
3037 }
3038 
3039 bool
3040 PeerImp::isHighLatency() const
3041 {
3042  std::lock_guard sl(recentLock_);
3043  return latency_ >= peerHighLatency;
3044 }
3045 
3046 bool
3047 PeerImp::reduceRelayReady()
3048 {
3049  if (!reduceRelayReady_)
3050  reduceRelayReady_ =
3051  reduce_relay::epoch<std::chrono::minutes>(UptimeClock::now()) >
3052  reduce_relay::WAIT_ON_BOOTUP;
3053  return vpReduceRelayEnabled_ && reduceRelayReady_;
3054 }
3055 
3056 void
3057 PeerImp::Metrics::add_message(std::uint64_t bytes)
3058 {
3059  using namespace std::chrono_literals;
3060  std::unique_lock lock{mutex_};
3061 
3062  totalBytes_ += bytes;
3063  accumBytes_ += bytes;
3064  auto const timeElapsed = clock_type::now() - intervalStart_;
3065  auto const timeElapsedInSecs =
3066  std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3067 
3068  if (timeElapsedInSecs >= 1s)
3069  {
3070  auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3071  rollingAvg_.push_back(avgBytes);
3072 
3073  auto const totalBytes =
3074  std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3075  rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3076 
3077  intervalStart_ = clock_type::now();
3078  accumBytes_ = 0;
3079  }
3080 }
3081 
3083 PeerImp::Metrics::average_bytes() const
3084 {
3085  std::shared_lock lock{mutex_};
3086  return rollingAvgBytes_;
3087 }
3088 
3090 PeerImp::Metrics::total_bytes() const
3091 {
3092  std::shared_lock lock{mutex_};
3093  return totalBytes_;
3094 }
3095 
3096 } // namespace ripple
ripple::PublicKey::data
std::uint8_t const * data() const noexcept
Definition: PublicKey.h:81
ripple::PeerImp::ledgerRange
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:471
ripple::PeerImp::uptime
clock_type::duration uptime() const
Definition: PeerImp.h:329
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:101
ripple::ClusterNode
Definition: ClusterNode.h:30
ripple::jtTRANSACTION
@ jtTRANSACTION
Definition: Job.h:51
ripple::PeerImp::inbound_
const bool inbound_
Definition: PeerImp.h:93
ripple::TrafficCount::categorize
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
Definition: TrafficCount.cpp:25
sstream
ripple::PeerImp::recentLock_
std::mutex recentLock_
Definition: PeerImp.h:151
ripple::HashRouter::addSuppressionPeerWithStatus
std::pair< bool, std::optional< Stopwatch::time_point > > addSuppressionPeerWithStatus(uint256 const &key, PeerShortID peer)
Add a suppression peer and get message's relay status.
Definition: HashRouter.cpp:57
ripple::RCLCxPeerPos
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:42
std::weak_ptr::lock
T lock(T... args)
ripple::PeerImp::stream_ptr_
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:80
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::csHopLimit
static constexpr std::uint32_t csHopLimit
Definition: ripple/overlay/Peer.h:36
ripple::Application::cluster
virtual Cluster & cluster()=0
ripple::PeerImp::socket_
socket_type & socket_
Definition: PeerImp.h:81
std::bind
T bind(T... args)
ripple::PeerImp::trackingTime_
clock_type::time_point trackingTime_
Definition: PeerImp.h:99
ripple::HashRouter::addSuppressionPeer
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:51
std::string
STL class.
ripple::Resource::feeMediumBurdenPeer
const Charge feeMediumBurdenPeer
std::shared_ptr
STL class.
ripple::PeerImp::onMessage
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:992
ripple::ManifestCache::getMasterKey
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: app/misc/impl/Manifest.cpp:280
ripple::SHAMap::getHash
SHAMapHash getHash() const
Definition: SHAMap.cpp:783
ripple::Overlay::Setup::networkID
std::optional< std::uint32_t > networkID
Definition: Overlay.h:78
std::exception
STL class.
ripple::PeerImp::hasTxSet
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:490
ripple::calcNodeID
NodeID calcNodeID(PublicKey const &pk)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:299
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::publicKeyType
boost::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:203
ripple::PeerImp::strand_
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:83
ripple::PeerImp::recentLedgers_
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:113
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::PeerImp::request_
http_request_type request_
Definition: PeerImp.h:157
ripple::Resource::Gossip
Data format for exchanging consumption information across peers.
Definition: Gossip.h:29
ripple::PeerImp::~PeerImp
virtual ~PeerImp()
Definition: PeerImp.cpp:123
ripple::PeerImp::getShardIndexes
boost::optional< RangeSet< std::uint32_t > > getShardIndexes() const
Return a range set of known shard indexes from this peer.
Definition: PeerImp.cpp:572
ripple::Serializer::erase
void erase()
Definition: Serializer.h:207
beast::IP::Endpoint::to_string
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:54
std::pair
ripple::PeerImp::doAccept
void doAccept()
Definition: PeerImp.cpp:724
std::vector::reserve
T reserve(T... args)
ripple::OverlayImpl::updateSlotAndSquelch
void updateSlotAndSquelch(uint256 const &key, PublicKey const &validator, std::set< Peer::id_t > &&peers, protocol::MessageType type)
Updates message count for validator/peer.
Definition: OverlayImpl.cpp:1410
ripple::HashRouter::shouldProcess
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:78
ripple::HashPrefix::manifest
@ manifest
Manifest.
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:215
ripple::addRaw
void addRaw(LedgerInfo const &info, Serializer &s, bool includeHash)
Definition: View.cpp:43
Json::UInt
unsigned int UInt
Definition: json_forwards.h:27
ripple::PeerImp::doProtocolStart
void doProtocolStart()
Definition: PeerImp.cpp:807
std::vector
STL class.
std::find
T find(T... args)
std::string::size
T size(T... args)
ripple::PeerImp::recentTxSets_
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:114
ripple::PublicKey::empty
bool empty() const noexcept
Definition: PublicKey.h:117
ripple::make_protocol
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
Definition: ProtocolVersion.h:40
std::chrono::milliseconds
ripple::PeerImp::setTimer
void setTimer()
Definition: PeerImp.cpp:612
ripple::OverlayImpl::incPeerDisconnectCharges
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:357
ripple::toBase58
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:29
beast::IP::Endpoint::address
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:77
ripple::PeerImp::getVersion
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:313
std::stringstream
STL class.
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::shared_ptr::get
T get(T... args)
std::lock_guard
STL class.
ripple::SBoxCmp::diff
@ diff
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerImp::close
void close()
Definition: PeerImp.cpp:518
ripple::PeerImp::charge
void charge(Resource::Charge const &fee) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:284
ripple::match_peer
Select the specific peer.
Definition: predicates.h:115
ripple::PeerImp::onMessageUnknown
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:960
ripple::from_string
bool from_string(RangeSet< T > &rs, std::string const &s)
Convert the given styled string to a RangeSet.
Definition: RangeSet.h:126
ripple::makeSharedValue
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:131
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::stopwatch
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:86
std::setfill
T setfill(T... args)
ripple::PeerImp::ShardInfo::endpoint
beast::IP::Endpoint endpoint
Definition: PeerImp.h:58
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:45
ripple::PeerImp::journal_
const beast::Journal journal_
Definition: PeerImp.h:78
ripple::PeerImp::send
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:230
ripple::Application::timeKeeper
virtual TimeKeeper & timeKeeper()=0
ripple::OverlayImpl::setup
Setup const & setup() const
Definition: OverlayImpl.h:177
ripple::ProtocolFeature
ProtocolFeature
Definition: ripple/overlay/Peer.h:38
ripple::PeerImp::onTimer
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:647
ripple::Cluster::update
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:58
ripple::PeerImp::lastPingTime_
clock_type::time_point lastPingTime_
Definition: PeerImp.h:118
ripple::OverlayImpl::incJqTransOverflow
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:333
ripple::PeerImp
Definition: PeerImp.h:48
ripple::PeerFinder::Config::peerPrivate
bool peerPrivate
true if we want our IP address kept private.
Definition: PeerfinderManager.h:62
ripple::Config::MAX_TRANSACTIONS
int MAX_TRANSACTIONS
Definition: Config.h:189
ripple::PeerImp::previousLedgerHash_
uint256 previousLedgerHash_
Definition: PeerImp.h:111
std::vector::front
T front(T... args)
ripple::FEATURE_VPRR
static constexpr char FEATURE_VPRR[]
Definition: Handshake.h:132
algorithm
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::PeerImp::name_
std::string name_
Definition: PeerImp.h:103
ripple::PeerFinder::Manager::on_endpoints
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
ripple::forceValidity
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:89
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::Application::getFeeTrack
virtual LoadFeeTrack & getFeeTrack()=0
ripple::base_uint< 256 >::size
constexpr static std::size_t size()
Definition: base_uint.h:426
ripple::ValidatorList::sendValidatorList
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
Definition: ValidatorList.cpp:751
ripple::getPeerWithLedger
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:2647
ripple::PeerImp::publicKey_
const PublicKey publicKey_
Definition: PeerImp.h:102
ripple::protocolMessageName
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
Definition: ProtocolMessage.h:43
ripple::PeerImp::read_buffer_
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:156
ripple::PeerImp::error_code
boost::system::error_code error_code
Definition: PeerImp.h:64
ripple::JobQueue::getJobCount
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:123
std::tie
T tie(T... args)
ripple::PeerImp::remote_address_
const beast::IP::Endpoint remote_address_
Definition: PeerImp.h:88
ripple::Cluster::member
boost::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:39
ripple::jtTXN_DATA
@ jtTXN_DATA
Definition: Job.h:55
ripple::PeerFinder::Manager::on_closed
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
ripple::OverlayImpl::peerFinder
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:159
ripple::getPeerWithTree
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:2623
ripple::base_uint< 256 >
ripple::LoadFeeTrack::isLoadedLocal
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:123
ripple::PeerImp::addLedger
void addLedger(uint256 const &hash, std::lock_guard< std::mutex > const &lockedRecentLock)
Definition: PeerImp.cpp:2409
ripple::Resource::feeInvalidSignature
const Charge feeInvalidSignature
ripple::OverlayImpl::onManifests
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Definition: OverlayImpl.cpp:657
ripple::Overlay::Setup::public_ip
beast::IP::Address public_ip
Definition: Overlay.h:75
std::enable_shared_from_this< PeerImp >::shared_from_this
T shared_from_this(T... args)
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NetworkOPs::isNeedNetworkLedger
virtual bool isNeedNetworkLedger()=0
ripple::Resource::drop
@ drop
Definition: Disposition.h:37
ripple::checkValidity
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
ripple::jtPROPOSAL_t
@ jtPROPOSAL_t
Definition: Job.h:60
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:439
ripple::OverlayImpl::resourceManager
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:165
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::PeerImp::gracefulClose
void gracefulClose()
Definition: PeerImp.cpp:591
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
std::atomic::load
T load(T... args)
ripple::Resource::feeBadData
const Charge feeBadData
ripple::PublicKey::size
std::size_t size() const noexcept
Definition: PublicKey.h:87
ripple::PeerImp::shardInfo_
hash_map< PublicKey, ShardInfo > shardInfo_
Definition: PeerImp.h:169
ripple::Serializer::getDataPtr
const void * getDataPtr() const
Definition: Serializer.h:187
ripple::Resource::Manager::importConsumers
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
ripple::PeerImp::closedLedgerHash_
uint256 closedLedgerHash_
Definition: PeerImp.h:110
ripple::PeerImp::lastPingSeq_
boost::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:117
ripple::PeerImp::detaching_
bool detaching_
Definition: PeerImp.h:100
ripple::PeerImp::onMessageEnd
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:983
ripple::Application::config
virtual Config & config()=0
ripple::isCurrent
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:145
beast::Journal::active
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:301
ripple::PeerImp::stream_
stream_type & stream_
Definition: PeerImp.h:82
ripple::PeerImp::onWriteMessage
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:906
std::unique_lock
STL class.
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::InfoSub::Source::pubPeerStatus
virtual void pubPeerStatus(std::function< Json::Value(void)> const &)=0
ripple::jtVALIDATION_t
@ jtVALIDATION_t
Definition: Job.h:57
ripple::reduce_relay::IDLED
static constexpr auto IDLED
Definition: ReduceRelayCommon.h:39
ripple::PeerImp::hasRange
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:508
ripple::Resource::feeUnwantedData
const Charge feeUnwantedData
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::Resource::Gossip::items
std::vector< Item > items
Definition: Gossip.h:42
ripple::PeerImp::cycleStatus
void cycleStatus() override
Definition: PeerImp.cpp:498
ripple::set
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:276
ripple::PeerImp::app_
Application & app_
Definition: PeerImp.h:74
ripple::PeerImp::crawl
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:298
ripple::PeerImp::minLedger_
LedgerIndex minLedger_
Definition: PeerImp.h:108
ripple::FEATURE_COMPR
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:131
ripple::base64_decode
std::string base64_decode(std::string const &data)
Definition: base64.cpp:245
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::PeerImp::hasLedger
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:454
ripple::PeerImp::Tracking::unknown
@ unknown
ripple::Resource::Consumer::balance
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:124
ripple::HashPrefix::proposal
@ proposal
proposal for signing
ripple::TimeKeeper::closeTime
virtual time_point closeTime() const =0
Returns the close time, in network time.
ripple::Job
Definition: Job.h:82
ripple::PeerImp::headers_
boost::beast::http::fields const & headers_
Definition: PeerImp.h:159
std::accumulate
T accumulate(T... args)
ripple::SerialIter
Definition: Serializer.h:308
ripple::PeerImp::metrics_
struct ripple::PeerImp::@13 metrics_
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
ripple::peerFeatureEnabled
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:184
ripple::PeerImp::reduceRelayReady
bool reduceRelayReady()
Definition: PeerImp.cpp:3047
std::uint32_t
ripple::PeerImp::send_queue_
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:160
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:237
ripple::PeerImp::slot_
const std::shared_ptr< PeerFinder::Slot > slot_
Definition: PeerImp.h:155
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:178
ripple::PeerImp::load_event_
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:163
std::map
STL class.
ripple::PeerImp::protocol_
ProtocolVersion protocol_
Definition: PeerImp.h:96
ripple::Application::getValidationPublicKey
virtual PublicKey const & getValidationPublicKey() const =0
ripple::Cluster::size
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:50
std::nth_element
T nth_element(T... args)
memory
ripple::PeerImp::waitable_timer
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:71
ripple::jtPEER
@ jtPEER
Definition: Job.h:67
ripple::PeerImp::onShutdown
void onShutdown(error_code ec)
Definition: PeerImp.cpp:708
ripple::proposalUniqueId
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
Definition: RCLCxPeerPos.cpp:72
ripple::PeerImp::name
std::string name() const
Definition: PeerImp.cpp:790
ripple::Application::validators
virtual ValidatorList & validators()=0
ripple::KeyType::secp256k1
@ secp256k1
ripple::RCLCxPeerPos::publicKey
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:81
std::weak_ptr
STL class.
ripple::PeerImp::timer_
waitable_timer timer_
Definition: PeerImp.h:84
std::min
T min(T... args)
ripple::Serializer
Definition: Serializer.h:39
ripple::LedgerMaster::getValidatedLedgerAge
std::chrono::seconds getValidatedLedgerAge()
Definition: LedgerMaster.cpp:271
ripple::Resource::Gossip::Item
Describes a single consumer.
Definition: Gossip.h:34
ripple::PeerImp::ShardInfo
Definition: PeerImp.h:56
ripple::OverlayImpl::deletePeer
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
Definition: OverlayImpl.cpp:1443
ripple::PeerImp::Tracking::diverged
@ diverged
ripple::jtPACK
@ jtPACK
Definition: Job.h:41
ripple::PeerImp::gracefulClose_
bool gracefulClose_
Definition: PeerImp.h:161
ripple::PeerImp::latency_
boost::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:116
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedgers::gotLedgerData
virtual bool gotLedgerData(LedgerHash const &ledgerHash, std::shared_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData >)=0
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::Application::validatorManifests
virtual ManifestCache & validatorManifests()=0
ripple::OverlayImpl::getManifestsMessage
std::shared_ptr< Message > getManifestsMessage()
Definition: OverlayImpl.cpp:1272
ripple::send_if_not
send_if_not_pred< Predicate > send_if_not(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:107
protocol
Definition: ValidatorList.h:38
ripple::jtVALIDATION_ut
@ jtVALIDATION_ut
Definition: Job.h:43
ripple::INVALID
@ INVALID
Definition: Transaction.h:46
ripple::base_uint::parseHex
bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:384
ripple::ProtocolFeature::ValidatorList2Propagation
@ ValidatorList2Propagation
ripple::OverlayImpl::remove
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
Definition: OverlayImpl.cpp:474
ripple::PeerImp::squelch_
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:121
ripple::base_uint::zero
void zero()
Definition: base_uint.h:449
std::vector::begin
T begin(T... args)
ripple::PeerFinder::Manager::config
virtual Config config()=0
Returns the configuration for the manager.
std
STL namespace.
ripple::Resource::Consumer::disconnect
bool disconnect()
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:117
beast::severities::kWarning
@ kWarning
Definition: Journal.h:37
ripple::sha512Half
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:216
beast::IP::Endpoint::from_string
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:46
ripple::OverlayImpl::activate
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
Definition: OverlayImpl.cpp:626
ripple::OverlayImpl::onPeerDeactivate
void onPeerDeactivate(Peer::id_t id)
Definition: OverlayImpl.cpp:650
ripple::PeerImp::hasShard
bool hasShard(std::uint32_t shardIndex) const override
Definition: PeerImp.cpp:480
ripple::Tuning::readBufferBytes
constexpr std::size_t readBufferBytes
Size of buffer used to read from the socket.
Definition: overlay/impl/Tuning.h:60
ripple::Resource::Gossip::Item::address
beast::IP::Endpoint address
Definition: Gossip.h:39
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:209
ripple::Resource::Consumer
An endpoint that consumes resources.
Definition: Consumer.h:33
ripple::Resource::Charge
A consumption charge.
Definition: Charge.h:30
ripple::Resource::Gossip::Item::balance
int balance
Definition: Gossip.h:38
ripple::TimeKeeper::now
virtual time_point now() const override=0
Returns the estimate of wall time, in network time.
ripple::OverlayImpl::lastLink
void lastLink(std::uint32_t id)
Called when the last link from a peer chain is received.
Definition: OverlayImpl.cpp:812
ripple::PeerImp::maxLedger_
LedgerIndex maxLedger_
Definition: PeerImp.h:109
ripple::PeerImp::run
void run()
Definition: PeerImp.cpp:146
ripple::Tuning::targetSendQueue
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
Definition: overlay/impl/Tuning.h:50
ripple::LoadFeeTrack::setClusterFee
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:111
ripple::PeerImp::checkTracking
void checkTracking(std::uint32_t validationSeq)
Check if the peer is tracking.
Definition: PeerImp.cpp:1806
ripple::PeerImp::large_sendq_
int large_sendq_
Definition: PeerImp.h:162
ripple::PeerImp::domain
std::string domain() const
Definition: PeerImp.cpp:797
std::string::empty
T empty(T... args)
ripple::Resource::feeLightPeer
const Charge feeLightPeer
ripple::jtPROPOSAL_ut
@ jtPROPOSAL_ut
Definition: Job.h:46
ripple::TokenType::NodePublic
@ NodePublic
ripple::PeerImp::last_status_
protocol::TMStatusChange last_status_
Definition: PeerImp.h:152
ripple::RCLCxPeerPos::suppressionID
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:88
ripple::PeerImp::supportsFeature
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:439
ripple::OverlayImpl::findPeerByPublicKey
std::shared_ptr< Peer > findPeerByPublicKey(PublicKey const &pubKey) override
Returns the peer with the matching public key, or null.
Definition: OverlayImpl.cpp:1205
mutex
ripple::PeerImp::onMessageBegin
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:966
std::stringstream::str
T str(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
std::size_t
ripple::PeerImp::json
Json::Value json() override
Definition: PeerImp.cpp:321
ripple::Cluster::for_each
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:84
ripple::PeerImp::compressionEnabled_
Compressed compressionEnabled_
Definition: PeerImp.h:171
ripple::Tuning::sendqIntervals
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
Definition: overlay/impl/Tuning.h:44
ripple::ProtocolFeature::ValidatorListPropagation
@ ValidatorListPropagation
beast::IP::Endpoint
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
ripple::OverlayImpl::incPeerDisconnect
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:345
ripple::strHex
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:45
std::vector::end
T end(T... args)
ripple::PeerFinder::Manager::on_failure
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
ripple::Job::getType
JobType getType() const
Definition: Job.cpp:52
ripple::PeerImp::makePrefix
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:639
ripple::PeerImp::usage_
Resource::Consumer usage_
Definition: PeerImp.h:153
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::setw
T setw(T... args)
numeric
ripple::OverlayImpl
Definition: OverlayImpl.h:57
beast::IP::Endpoint::from_string_checked
static boost::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:35
std::max
T max(T... args)
beast::IP::Endpoint::at_port
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:70
ripple::ValidatorList::trusted
bool trusted(PublicKey const &identity) const
Returns true if public key is trusted.
Definition: ValidatorList.cpp:1369
ripple::OverlayImpl::findPeerByShortID
std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id) const override
Returns the peer with the matching short id, or null.
Definition: OverlayImpl.cpp:1193
ripple::Serializer::getLength
int getLength() const
Definition: Serializer.h:197
ripple::OverlayImpl::reportTraffic
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
Definition: OverlayImpl.cpp:718
ripple::sfLastLedgerSequence
const SF_UINT32 sfLastLedgerSequence
ripple::JobQueue::makeLoadEvent
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:183
ripple::PeerImp::getPeerShardInfo
boost::optional< hash_map< PublicKey, ShardInfo > > getPeerShardInfo() const
Return any known shard info from this peer and its sub peers.
Definition: PeerImp.cpp:582
ripple::PeerImp::shardInfoMutex_
std::mutex shardInfoMutex_
Definition: PeerImp.h:168
ripple::Resource::Consumer::charge
Disposition charge(Charge const &fee)
Apply a load charge to the consumer.
Definition: Consumer.cpp:99
ripple::PeerImp::overlay_
OverlayImpl & overlay_
Definition: PeerImp.h:92
ripple::makeResponse
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:375
ripple::SHAMap::getNodeFat
bool getNodeFat(SHAMapNodeID const &wanted, std::vector< SHAMapNodeID > &nodeIDs, std::vector< Blob > &rawNodes, bool fatLeaves, std::uint32_t depth) const
Definition: SHAMapSync.cpp:427
ripple::http_request_type
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handshake.h:47
std::unique_ptr< stream_type >
ripple::Tuning::sendQueueLogFreq
@ sendQueueLogFreq
How often to log send queue size.
Definition: overlay/impl/Tuning.h:53
ripple::PeerImp::tracking_
std::atomic< Tracking > tracking_
Definition: PeerImp.h:98
ripple::PeerImp::nameMutex_
boost::shared_mutex nameMutex_
Definition: PeerImp.h:104
ripple::PeerImp::cancelTimer
void cancelTimer()
Definition: PeerImp.cpp:630
ripple::invokeProtocolMessage
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
Definition: ProtocolMessage.h:312
ripple::PeerImp::fee_
Resource::Charge fee_
Definition: PeerImp.h:154
ripple::stringIsUint256Sized
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:140
ripple::ValidatorList::for_each_available
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
Definition: ValidatorList.cpp:1653
ripple::PeerImp::stop
void stop() override
Definition: PeerImp.cpp:204
ripple::Application::getHashRouter
virtual HashRouter & getHashRouter()=0
ripple::PeerImp::Tracking::converged
@ converged
ripple::PeerImp::id_
const id_t id_
Definition: PeerImp.h:75
ripple::OverlayImpl::for_each
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:260
std::ref
T ref(T... args)
ripple::RCLCxPeerPos::checkSign
bool checkSign() const
Verify the signing hash of the proposal.
Definition: RCLCxPeerPos.cpp:55
std::exception::what
T what(T... args)
std::shared_lock
STL class.
ripple::PeerImp::fail
void fail(std::string const &reason)
Definition: PeerImp.cpp:540
ripple::PeerImp::cluster
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:307
ripple::PeerImp::p_journal_
const beast::Journal p_journal_
Definition: PeerImp.h:79
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::Config::MAX_UNKNOWN_TIME
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:221
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:44
ripple::Config::MAX_DIVERGED_TIME
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:224
ripple::PeerImp::ShardInfo::shardIndexes
RangeSet< std::uint32_t > shardIndexes
Definition: PeerImp.h:59
ripple::jtLEDGER_REQ
@ jtLEDGER_REQ
Definition: Job.h:45
ripple::PeerImp::onReadMessage
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:850
ripple::ConsensusProposal< NodeID, uint256, uint256 >
std::chrono::steady_clock::now
T now(T... args)