rippled
PeerImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/consensus/RCLValidations.h>
21 #include <ripple/app/ledger/InboundLedgers.h>
22 #include <ripple/app/ledger/InboundTransactions.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/misc/HashRouter.h>
25 #include <ripple/app/misc/LoadFeeTrack.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/app/misc/Transaction.h>
28 #include <ripple/app/misc/ValidatorList.h>
29 #include <ripple/app/tx/apply.h>
30 #include <ripple/basics/UptimeClock.h>
31 #include <ripple/basics/base64.h>
32 #include <ripple/basics/random.h>
33 #include <ripple/basics/safe_cast.h>
34 #include <ripple/beast/core/LexicalCast.h>
35 #include <ripple/beast/core/SemanticVersion.h>
36 #include <ripple/nodestore/DatabaseShard.h>
37 #include <ripple/overlay/Cluster.h>
38 #include <ripple/overlay/impl/PeerImp.h>
39 #include <ripple/overlay/impl/Tuning.h>
40 #include <ripple/overlay/predicates.h>
41 #include <ripple/protocol/digest.h>
42 
43 #include <boost/algorithm/clamp.hpp>
44 #include <boost/algorithm/string.hpp>
45 #include <boost/algorithm/string/predicate.hpp>
46 #include <boost/beast/core/ostream.hpp>
47 
48 #include <algorithm>
49 #include <memory>
50 #include <mutex>
51 #include <numeric>
52 #include <sstream>
53 
54 using namespace std::chrono_literals;
55 
56 namespace ripple {
57 
58 namespace {
60 std::chrono::milliseconds constexpr peerHighLatency{300};
61 
63 std::chrono::seconds constexpr peerTimerInterval{60};
64 } // namespace
65 
66 PeerImp::PeerImp(
67  Application& app,
68  id_t id,
70  http_request_type&& request,
71  PublicKey const& publicKey,
72  ProtocolVersion protocol,
73  Resource::Consumer consumer,
74  std::unique_ptr<stream_type>&& stream_ptr,
75  OverlayImpl& overlay)
76  : Child(overlay)
77  , app_(app)
78  , id_(id)
79  , sink_(app_.journal("Peer"), makePrefix(id))
80  , p_sink_(app_.journal("Protocol"), makePrefix(id))
81  , journal_(sink_)
82  , p_journal_(p_sink_)
83  , stream_ptr_(std::move(stream_ptr))
84  , socket_(stream_ptr_->next_layer().socket())
85  , stream_(*stream_ptr_)
86  , strand_(socket_.get_executor())
87  , timer_(waitable_timer{socket_.get_executor()})
88  , remote_address_(slot->remote_endpoint())
89  , overlay_(overlay)
90  , inbound_(true)
91  , protocol_(protocol)
92  , tracking_(Tracking::unknown)
93  , trackingTime_(clock_type::now())
94  , publicKey_(publicKey)
95  , lastPingTime_(clock_type::now())
96  , creationTime_(clock_type::now())
97  , usage_(consumer)
99  , slot_(slot)
100  , request_(std::move(request))
101  , headers_(request_)
102  , compressionEnabled_(
103  headers_["X-Offer-Compression"] == "lz4" && app_.config().COMPRESSION
104  ? Compressed::On
105  : Compressed::Off)
106 {
107 }
108 
110 {
111  const bool inCluster{cluster()};
112 
117 
118  if (inCluster)
119  {
120  JLOG(journal_.warn()) << name() << " left cluster";
121  }
122 }
123 
124 // Helper function to check for valid uint256 values in protobuf buffers
125 static bool
127 {
128  return pBuffStr.size() == uint256::size();
129 }
130 
131 void
133 {
134  if (!strand_.running_in_this_thread())
135  return post(strand_, std::bind(&PeerImp::run, shared_from_this()));
136 
137  auto parseLedgerHash =
138  [](std::string const& value) -> boost::optional<uint256> {
139  if (uint256 ret; ret.parseHex(value))
140  return ret;
141 
142  if (auto const s = base64_decode(value); s.size() == uint256::size())
143  return uint256{s};
144 
145  return boost::none;
146  };
147 
148  boost::optional<uint256> closed;
149  boost::optional<uint256> previous;
150 
151  if (auto const iter = headers_.find("Closed-Ledger");
152  iter != headers_.end())
153  {
154  closed = parseLedgerHash(iter->value().to_string());
155 
156  if (!closed)
157  fail("Malformed handshake data (1)");
158  }
159 
160  if (auto const iter = headers_.find("Previous-Ledger");
161  iter != headers_.end())
162  {
163  previous = parseLedgerHash(iter->value().to_string());
164 
165  if (!previous)
166  fail("Malformed handshake data (2)");
167  }
168 
169  if (previous && !closed)
170  fail("Malformed handshake data (3)");
171 
172  {
174  if (closed)
175  closedLedgerHash_ = *closed;
176  if (previous)
177  previousLedgerHash_ = *previous;
178  }
179 
180  if (inbound_)
181  doAccept();
182  else
183  doProtocolStart();
184 
185  // Request shard info from peer
186  protocol::TMGetPeerShardInfo tmGPS;
187  tmGPS.set_hops(0);
188  send(std::make_shared<Message>(tmGPS, protocol::mtGET_PEER_SHARD_INFO));
189 
190  setTimer();
191 }
192 
193 void
195 {
196  if (!strand_.running_in_this_thread())
197  return post(strand_, std::bind(&PeerImp::stop, shared_from_this()));
198  if (socket_.is_open())
199  {
200  // The rationale for using different severity levels is that
201  // outbound connections are under our control and may be logged
202  // at a higher level, but inbound connections are more numerous and
203  // uncontrolled so to prevent log flooding the severity is reduced.
204  //
205  if (inbound_)
206  {
207  JLOG(journal_.debug()) << "Stop";
208  }
209  else
210  {
211  JLOG(journal_.info()) << "Stop";
212  }
213  }
214  close();
215 }
216 
217 //------------------------------------------------------------------------------
218 
219 void
221 {
222  if (!strand_.running_in_this_thread())
223  return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
224  if (gracefulClose_)
225  return;
226  if (detaching_)
227  return;
228 
229  auto validator = m->getValidatorKey();
230  if (validator && squelch_.isSquelched(*validator))
231  return;
232 
234  safe_cast<TrafficCount::category>(m->getCategory()),
235  false,
236  static_cast<int>(m->getBuffer(compressionEnabled_).size()));
237 
238  auto sendq_size = send_queue_.size();
239 
240  if (sendq_size < Tuning::targetSendQueue)
241  {
242  // To detect a peer that does not read from their
243  // side of the connection, we expect a peer to have
244  // a small senq periodically
245  large_sendq_ = 0;
246  }
247  else if (
249  (sendq_size % Tuning::sendQueueLogFreq) == 0)
250  {
251  std::string const n = name();
252  JLOG(journal_.debug()) << (n.empty() ? remote_address_.to_string() : n)
253  << " sendq: " << sendq_size;
254  }
255 
256  send_queue_.push(m);
257 
258  if (sendq_size != 0)
259  return;
260 
261  boost::asio::async_write(
262  stream_,
263  boost::asio::buffer(
264  send_queue_.front()->getBuffer(compressionEnabled_)),
265  bind_executor(
266  strand_,
267  std::bind(
270  std::placeholders::_1,
271  std::placeholders::_2)));
272 }
273 
274 void
276 {
277  if ((usage_.charge(fee) == Resource::drop) && usage_.disconnect() &&
278  strand_.running_in_this_thread())
279  {
280  // Sever the connection
282  fail("charge: Resources");
283  }
284 }
285 
286 //------------------------------------------------------------------------------
287 
288 bool
290 {
291  auto const iter = headers_.find("Crawl");
292  if (iter == headers_.end())
293  return false;
294  return boost::iequals(iter->value(), "public");
295 }
296 
297 bool
299 {
300  return static_cast<bool>(app_.cluster().member(publicKey_));
301 }
302 
305 {
306  if (inbound_)
307  return headers_["User-Agent"].to_string();
308  return headers_["Server"].to_string();
309 }
310 
313 {
315 
316  ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
317  ret[jss::address] = remote_address_.to_string();
318 
319  if (inbound_)
320  ret[jss::inbound] = true;
321 
322  if (cluster())
323  {
324  ret[jss::cluster] = true;
325 
326  if (auto const n = name(); !n.empty())
327  // Could move here if Json::Value supported moving from a string
328  ret[jss::name] = n;
329  }
330 
331  if (auto const d = domain(); !d.empty())
332  ret[jss::server_domain] = domain();
333 
334  if (auto const nid = headers_["Network-ID"].to_string(); !nid.empty())
335  ret[jss::network_id] = nid;
336 
337  ret[jss::load] = usage_.balance();
338 
339  if (auto const version = getVersion(); !version.empty())
340  ret[jss::version] = version;
341 
342  ret[jss::protocol] = to_string(protocol_);
343 
344  {
346  if (latency_)
347  ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
348  }
349 
350  ret[jss::uptime] = static_cast<Json::UInt>(
351  std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
352 
353  std::uint32_t minSeq, maxSeq;
354  ledgerRange(minSeq, maxSeq);
355 
356  if ((minSeq != 0) || (maxSeq != 0))
357  ret[jss::complete_ledgers] =
358  std::to_string(minSeq) + " - " + std::to_string(maxSeq);
359 
360  switch (tracking_.load())
361  {
362  case Tracking::diverged:
363  ret[jss::track] = "diverged";
364  break;
365 
366  case Tracking::unknown:
367  ret[jss::track] = "unknown";
368  break;
369 
370  case Tracking::converged:
371  // Nothing to do here
372  break;
373  }
374 
375  uint256 closedLedgerHash;
376  protocol::TMStatusChange last_status;
377  {
379  closedLedgerHash = closedLedgerHash_;
380  last_status = last_status_;
381  }
382 
383  if (closedLedgerHash != beast::zero)
384  ret[jss::ledger] = to_string(closedLedgerHash);
385 
386  if (last_status.has_newstatus())
387  {
388  switch (last_status.newstatus())
389  {
390  case protocol::nsCONNECTING:
391  ret[jss::status] = "connecting";
392  break;
393 
394  case protocol::nsCONNECTED:
395  ret[jss::status] = "connected";
396  break;
397 
398  case protocol::nsMONITORING:
399  ret[jss::status] = "monitoring";
400  break;
401 
402  case protocol::nsVALIDATING:
403  ret[jss::status] = "validating";
404  break;
405 
406  case protocol::nsSHUTTING:
407  ret[jss::status] = "shutting";
408  break;
409 
410  default:
411  JLOG(p_journal_.warn())
412  << "Unknown status: " << last_status.newstatus();
413  }
414  }
415 
416  ret[jss::metrics] = Json::Value(Json::objectValue);
417  ret[jss::metrics][jss::total_bytes_recv] =
418  std::to_string(metrics_.recv.total_bytes());
419  ret[jss::metrics][jss::total_bytes_sent] =
420  std::to_string(metrics_.sent.total_bytes());
421  ret[jss::metrics][jss::avg_bps_recv] =
422  std::to_string(metrics_.recv.average_bytes());
423  ret[jss::metrics][jss::avg_bps_sent] =
424  std::to_string(metrics_.sent.average_bytes());
425 
426  return ret;
427 }
428 
429 bool
431 {
432  switch (f)
433  {
435  return protocol_ >= make_protocol(2, 1);
436  }
437  return false;
438 }
439 
440 //------------------------------------------------------------------------------
441 
442 bool
443 PeerImp::hasLedger(uint256 const& hash, std::uint32_t seq) const
444 {
445  {
447  if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
449  return true;
450  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
451  recentLedgers_.end())
452  return true;
453  }
454 
455  return seq >= app_.getNodeStore().earliestLedgerSeq() &&
457 }
458 
459 void
461 {
463 
464  minSeq = minLedger_;
465  maxSeq = maxLedger_;
466 }
467 
468 bool
470 {
472  auto const it{shardInfo_.find(publicKey_)};
473  if (it != shardInfo_.end())
474  return boost::icl::contains(it->second.shardIndexes, shardIndex);
475  return false;
476 }
477 
478 bool
479 PeerImp::hasTxSet(uint256 const& hash) const
480 {
482  return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
483  recentTxSets_.end();
484 }
485 
486 void
488 {
489  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
490  // guarded by recentLock_.
494 }
495 
496 bool
498 {
500  return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
501  (uMax <= maxLedger_);
502 }
503 
504 //------------------------------------------------------------------------------
505 
506 void
508 {
509  assert(strand_.running_in_this_thread());
510  if (socket_.is_open())
511  {
512  detaching_ = true; // DEPRECATED
513  error_code ec;
514  timer_.cancel(ec);
515  socket_.close(ec);
517  if (inbound_)
518  {
519  JLOG(journal_.debug()) << "Closed";
520  }
521  else
522  {
523  JLOG(journal_.info()) << "Closed";
524  }
525  }
526 }
527 
528 void
530 {
531  if (!strand_.running_in_this_thread())
532  return post(
533  strand_,
534  std::bind(
535  (void (Peer::*)(std::string const&)) & PeerImp::fail,
537  reason));
539  {
540  std::string const n = name();
541  JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
542  << " failed: " << reason;
543  }
544  close();
545 }
546 
547 void
549 {
550  assert(strand_.running_in_this_thread());
551  if (socket_.is_open())
552  {
553  JLOG(journal_.warn())
554  << name << " from " << toBase58(TokenType::NodePublic, publicKey_)
555  << " at " << remote_address_.to_string() << ": " << ec.message();
556  }
557  close();
558 }
559 
560 boost::optional<RangeSet<std::uint32_t>>
562 {
564  auto it{shardInfo_.find(publicKey_)};
565  if (it != shardInfo_.end())
566  return it->second.shardIndexes;
567  return boost::none;
568 }
569 
570 boost::optional<hash_map<PublicKey, PeerImp::ShardInfo>>
572 {
574  if (!shardInfo_.empty())
575  return shardInfo_;
576  return boost::none;
577 }
578 
579 void
581 {
582  assert(strand_.running_in_this_thread());
583  assert(socket_.is_open());
584  assert(!gracefulClose_);
585  gracefulClose_ = true;
586 #if 0
587  // Flush messages
588  while(send_queue_.size() > 1)
589  send_queue_.pop_back();
590 #endif
591  if (send_queue_.size() > 0)
592  return;
593  setTimer();
594  stream_.async_shutdown(bind_executor(
595  strand_,
596  std::bind(
597  &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
598 }
599 
600 void
602 {
603  error_code ec;
604  timer_.expires_from_now(peerTimerInterval, ec);
605 
606  if (ec)
607  {
608  JLOG(journal_.error()) << "setTimer: " << ec.message();
609  return;
610  }
611  timer_.async_wait(bind_executor(
612  strand_,
613  std::bind(
614  &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
615 }
616 
617 // convenience for ignoring the error code
618 void
620 {
621  error_code ec;
622  timer_.cancel(ec);
623 }
624 
625 //------------------------------------------------------------------------------
626 
629 {
631  ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
632  return ss.str();
633 }
634 
635 void
637 {
638  if (!socket_.is_open())
639  return;
640 
641  if (ec == boost::asio::error::operation_aborted)
642  return;
643 
644  if (ec)
645  {
646  // This should never happen
647  JLOG(journal_.error()) << "onTimer: " << ec.message();
648  return close();
649  }
650 
652  {
653  fail("Large send queue");
654  return;
655  }
656 
657  if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
658  {
659  clock_type::duration duration;
660 
661  {
663  duration = clock_type::now() - trackingTime_;
664  }
665 
666  if ((t == Tracking::diverged &&
667  (duration > app_.config().MAX_DIVERGED_TIME)) ||
668  (t == Tracking::unknown &&
669  (duration > app_.config().MAX_UNKNOWN_TIME)))
670  {
672  fail("Not useful");
673  return;
674  }
675  }
676 
677  // Already waiting for PONG
678  if (lastPingSeq_)
679  {
680  fail("Ping Timeout");
681  return;
682  }
683 
685  lastPingSeq_ = rand_int<std::uint32_t>();
686 
687  protocol::TMPing message;
688  message.set_type(protocol::TMPing::ptPING);
689  message.set_seq(*lastPingSeq_);
690 
691  send(std::make_shared<Message>(message, protocol::mtPING));
692 
693  setTimer();
694 }
695 
696 void
698 {
699  cancelTimer();
700  // If we don't get eof then something went wrong
701  if (!ec)
702  {
703  JLOG(journal_.error()) << "onShutdown: expected error condition";
704  return close();
705  }
706  if (ec != boost::asio::error::eof)
707  return fail("onShutdown", ec);
708  close();
709 }
710 
711 //------------------------------------------------------------------------------
712 void
714 {
715  assert(read_buffer_.size() == 0);
716 
717  JLOG(journal_.debug()) << "doAccept: " << remote_address_;
718 
719  auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
720 
721  // This shouldn't fail since we already computed
722  // the shared value successfully in OverlayImpl
723  if (!sharedValue)
724  return fail("makeSharedValue: Unexpected failure");
725 
726  JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
727  JLOG(journal_.info()) << "Public Key: "
729 
730  if (auto member = app_.cluster().member(publicKey_))
731  {
732  {
734  name_ = *member;
735  }
736  JLOG(journal_.info()) << "Cluster name: " << *member;
737  }
738 
740 
741  // XXX Set timer: connection is in grace period to be useful.
742  // XXX Set timer: connection idle (idle may vary depending on connection
743  // type.)
744 
745  auto write_buffer = [this, sharedValue]() {
746  auto buf = std::make_shared<boost::beast::multi_buffer>();
747 
748  http_response_type resp;
749  resp.result(boost::beast::http::status::switching_protocols);
750  resp.version(request_.version());
751  resp.insert("Connection", "Upgrade");
752  resp.insert("Upgrade", to_string(protocol_));
753  resp.insert("Connect-As", "Peer");
754  resp.insert("Server", BuildInfo::getFullVersionString());
755  resp.insert(
756  "Crawl",
757  overlay_.peerFinder().config().peerPrivate ? "private" : "public");
758 
759  if (request_["X-Offer-Compression"] == "lz4" &&
761  resp.insert("X-Offer-Compression", "lz4");
762 
764  resp,
765  *sharedValue,
769  app_);
770 
771  boost::beast::ostream(*buf) << resp;
772 
773  return buf;
774  }();
775 
776  // Write the whole buffer and only start protocol when that's done.
777  boost::asio::async_write(
778  stream_,
779  write_buffer->data(),
780  boost::asio::transfer_all(),
781  [this, write_buffer, self = shared_from_this()](
782  error_code ec, std::size_t bytes_transferred) {
783  if (!socket_.is_open())
784  return;
785  if (ec == boost::asio::error::operation_aborted)
786  return;
787  if (ec)
788  return fail("onWriteResponse", ec);
789  if (write_buffer->size() == bytes_transferred)
790  return doProtocolStart();
791  return fail("Failed to write header");
792  });
793 }
794 
797 {
798  std::shared_lock read_lock{nameMutex_};
799  return name_;
800 }
801 
804 {
805  return headers_["Server-Domain"].to_string();
806 }
807 
808 //------------------------------------------------------------------------------
809 
810 // Protocol logic
811 
812 void
814 {
816 
817  // Send all the validator lists that have been loaded
819  {
821  std::string const& blob,
822  std::string const& signature,
823  std::uint32_t version,
824  PublicKey const& pubKey,
825  std::size_t sequence,
826  uint256 const& hash) {
827  protocol::TMValidatorList vl;
828 
829  vl.set_manifest(manifest);
830  vl.set_blob(blob);
831  vl.set_signature(signature);
832  vl.set_version(version);
833 
834  JLOG(p_journal_.debug())
835  << "Sending validator list for " << strHex(pubKey)
836  << " with sequence " << sequence << " to "
837  << remote_address_.to_string() << " (" << id_ << ")";
838  send(std::make_shared<Message>(vl, protocol::mtVALIDATORLIST));
839  // Don't send it next time.
841  setPublisherListSequence(pubKey, sequence);
842  });
843  }
844 
845  if (auto m = overlay_.getManifestsMessage())
846  send(m);
847 }
848 
849 // Called repeatedly with protocol message data
850 void
852 {
853  if (!socket_.is_open())
854  return;
855  if (ec == boost::asio::error::operation_aborted)
856  return;
857  if (ec == boost::asio::error::eof)
858  {
859  JLOG(journal_.info()) << "EOF";
860  return gracefulClose();
861  }
862  if (ec)
863  return fail("onReadMessage", ec);
864  if (auto stream = journal_.trace())
865  {
866  if (bytes_transferred > 0)
867  stream << "onReadMessage: " << bytes_transferred << " bytes";
868  else
869  stream << "onReadMessage";
870  }
871 
872  metrics_.recv.add_message(bytes_transferred);
873 
874  read_buffer_.commit(bytes_transferred);
875 
876  auto hint = Tuning::readBufferBytes;
877 
878  while (read_buffer_.size() > 0)
879  {
880  std::size_t bytes_consumed;
881  std::tie(bytes_consumed, ec) =
882  invokeProtocolMessage(read_buffer_.data(), *this, hint);
883  if (ec)
884  return fail("onReadMessage", ec);
885  if (!socket_.is_open())
886  return;
887  if (gracefulClose_)
888  return;
889  if (bytes_consumed == 0)
890  break;
891  read_buffer_.consume(bytes_consumed);
892  }
893 
894  // Timeout on writes only
895  stream_.async_read_some(
897  bind_executor(
898  strand_,
899  std::bind(
902  std::placeholders::_1,
903  std::placeholders::_2)));
904 }
905 
906 void
908 {
909  if (!socket_.is_open())
910  return;
911  if (ec == boost::asio::error::operation_aborted)
912  return;
913  if (ec)
914  return fail("onWriteMessage", ec);
915  if (auto stream = journal_.trace())
916  {
917  if (bytes_transferred > 0)
918  stream << "onWriteMessage: " << bytes_transferred << " bytes";
919  else
920  stream << "onWriteMessage";
921  }
922 
923  metrics_.sent.add_message(bytes_transferred);
924 
925  assert(!send_queue_.empty());
926  send_queue_.pop();
927  if (!send_queue_.empty())
928  {
929  // Timeout on writes only
930  return boost::asio::async_write(
931  stream_,
932  boost::asio::buffer(
933  send_queue_.front()->getBuffer(compressionEnabled_)),
934  bind_executor(
935  strand_,
936  std::bind(
939  std::placeholders::_1,
940  std::placeholders::_2)));
941  }
942 
943  if (gracefulClose_)
944  {
945  return stream_.async_shutdown(bind_executor(
946  strand_,
947  std::bind(
950  std::placeholders::_1)));
951  }
952 }
953 
954 //------------------------------------------------------------------------------
955 //
956 // ProtocolHandler
957 //
958 //------------------------------------------------------------------------------
959 
960 void
962 {
963  // TODO
964 }
965 
966 void
968  std::uint16_t type,
970  std::size_t size)
971 {
972  load_event_ =
976  TrafficCount::categorize(*m, type, true), true, static_cast<int>(size));
977 }
978 
979 void
983 {
984  load_event_.reset();
985  charge(fee_);
986 }
987 
988 void
990 {
991  // VFALCO What's the right job type?
992  auto that = shared_from_this();
994  jtVALIDATION_ut, "receiveManifests", [this, that, m](Job&) {
995  overlay_.onManifests(m, that);
996  });
997 }
998 
999 void
1001 {
1002  if (m->type() == protocol::TMPing::ptPING)
1003  {
1004  // We have received a ping request, reply with a pong
1006  m->set_type(protocol::TMPing::ptPONG);
1007  send(std::make_shared<Message>(*m, protocol::mtPING));
1008  return;
1009  }
1010 
1011  if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1012  {
1013  // Only reset the ping sequence if we actually received a
1014  // PONG with the correct cookie. That way, any peers which
1015  // respond with incorrect cookies will eventually time out.
1016  if (m->seq() == lastPingSeq_)
1017  {
1018  lastPingSeq_.reset();
1019 
1020  // Update latency estimate
1021  auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1023 
1025 
1026  if (latency_)
1027  latency_ = (*latency_ * 7 + rtt) / 8;
1028  else
1029  latency_ = rtt;
1030  }
1031 
1032  return;
1033  }
1034 }
1035 
1036 void
1038 {
1039  // VFALCO NOTE I think we should drop the peer immediately
1040  if (!cluster())
1041  {
1043  return;
1044  }
1045 
1046  for (int i = 0; i < m->clusternodes().size(); ++i)
1047  {
1048  protocol::TMClusterNode const& node = m->clusternodes(i);
1049 
1050  std::string name;
1051  if (node.has_nodename())
1052  name = node.nodename();
1053 
1054  auto const publicKey =
1055  parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1056 
1057  // NIKB NOTE We should drop the peer immediately if
1058  // they send us a public key we can't parse
1059  if (publicKey)
1060  {
1061  auto const reportTime =
1062  NetClock::time_point{NetClock::duration{node.reporttime()}};
1063 
1064  app_.cluster().update(
1065  *publicKey, name, node.nodeload(), reportTime);
1066  }
1067  }
1068 
1069  int loadSources = m->loadsources().size();
1070  if (loadSources != 0)
1071  {
1072  Resource::Gossip gossip;
1073  gossip.items.reserve(loadSources);
1074  for (int i = 0; i < m->loadsources().size(); ++i)
1075  {
1076  protocol::TMLoadSource const& node = m->loadsources(i);
1078  item.address = beast::IP::Endpoint::from_string(node.name());
1079  item.balance = node.cost();
1080  if (item.address != beast::IP::Endpoint())
1081  gossip.items.push_back(item);
1082  }
1084  }
1085 
1086  // Calculate the cluster fee:
1087  auto const thresh = app_.timeKeeper().now() - 90s;
1088  std::uint32_t clusterFee = 0;
1089 
1091  fees.reserve(app_.cluster().size());
1092 
1093  app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1094  if (status.getReportTime() >= thresh)
1095  fees.push_back(status.getLoadFee());
1096  });
1097 
1098  if (!fees.empty())
1099  {
1100  auto const index = fees.size() / 2;
1101  std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1102  clusterFee = fees[index];
1103  }
1104 
1105  app_.getFeeTrack().setClusterFee(clusterFee);
1106 }
1107 
1108 void
1110 {
1111  // DEPRECATED
1112 }
1113 
1114 void
1116 {
1117  // DEPRECATED
1118 }
1119 
1120 void
1122 {
1123  auto badData = [&](std::string msg) {
1125  JLOG(p_journal_.warn()) << msg;
1126  };
1127 
1128  if (m->hops() > csHopLimit)
1129  return badData("Invalid hops: " + std::to_string(m->hops()));
1130  if (m->peerchain_size() > csHopLimit)
1131  return badData("Invalid peer chain");
1132 
1133  // Reply with shard info we may have
1134  if (auto shardStore = app_.getShardStore())
1135  {
1137  auto shards{shardStore->getCompleteShards()};
1138  if (!shards.empty())
1139  {
1140  protocol::TMPeerShardInfo reply;
1141  reply.set_shardindexes(shards);
1142 
1143  if (m->has_lastlink())
1144  reply.set_lastlink(true);
1145 
1146  if (m->peerchain_size() > 0)
1147  {
1148  for (int i = 0; i < m->peerchain_size(); ++i)
1149  {
1150  if (!publicKeyType(makeSlice(m->peerchain(i).nodepubkey())))
1151  return badData("Invalid peer chain public key");
1152  }
1153 
1154  *reply.mutable_peerchain() = m->peerchain();
1155  }
1156 
1157  send(std::make_shared<Message>(reply, protocol::mtPEER_SHARD_INFO));
1158 
1159  JLOG(p_journal_.trace()) << "Sent shard indexes " << shards;
1160  }
1161  }
1162 
1163  // Relay request to peers
1164  if (m->hops() > 0)
1165  {
1167 
1168  m->set_hops(m->hops() - 1);
1169  if (m->hops() == 0)
1170  m->set_lastlink(true);
1171 
1172  m->add_peerchain()->set_nodepubkey(
1174 
1176  std::make_shared<Message>(*m, protocol::mtGET_PEER_SHARD_INFO),
1177  match_peer(this)));
1178  }
1179 }
1180 
1181 void
1183 {
1184  auto badData = [&](std::string msg) {
1186  JLOG(p_journal_.warn()) << msg;
1187  };
1188 
1189  if (m->shardindexes().empty())
1190  return badData("Missing shard indexes");
1191  if (m->peerchain_size() > csHopLimit)
1192  return badData("Invalid peer chain");
1193  if (m->has_nodepubkey() && !publicKeyType(makeSlice(m->nodepubkey())))
1194  return badData("Invalid public key");
1195 
1196  // Check if the message should be forwarded to another peer
1197  if (m->peerchain_size() > 0)
1198  {
1199  // Get the Public key of the last link in the peer chain
1200  auto const s{
1201  makeSlice(m->peerchain(m->peerchain_size() - 1).nodepubkey())};
1202  if (!publicKeyType(s))
1203  return badData("Invalid pubKey");
1204  PublicKey peerPubKey(s);
1205 
1206  if (auto peer = overlay_.findPeerByPublicKey(peerPubKey))
1207  {
1208  if (!m->has_nodepubkey())
1209  m->set_nodepubkey(publicKey_.data(), publicKey_.size());
1210 
1211  if (!m->has_endpoint())
1212  {
1213  // Check if peer will share IP publicly
1214  if (crawl())
1215  m->set_endpoint(remote_address_.address().to_string());
1216  else
1217  m->set_endpoint("0");
1218  }
1219 
1220  m->mutable_peerchain()->RemoveLast();
1221  peer->send(
1222  std::make_shared<Message>(*m, protocol::mtPEER_SHARD_INFO));
1223 
1224  JLOG(p_journal_.trace())
1225  << "Relayed TMPeerShardInfo to peer with IP "
1226  << remote_address_.address().to_string();
1227  }
1228  else
1229  {
1230  // Peer is no longer available so the relay ends
1232  JLOG(p_journal_.info()) << "Unable to route shard info";
1233  }
1234  return;
1235  }
1236 
1237  // Parse the shard indexes received in the shard info
1238  RangeSet<std::uint32_t> shardIndexes;
1239  {
1240  if (!from_string(shardIndexes, m->shardindexes()))
1241  return badData("Invalid shard indexes");
1242 
1243  std::uint32_t earliestShard;
1244  boost::optional<std::uint32_t> latestShard;
1245  {
1246  auto const curLedgerSeq{
1248  if (auto shardStore = app_.getShardStore())
1249  {
1250  earliestShard = shardStore->earliestShardIndex();
1251  if (curLedgerSeq >= shardStore->earliestLedgerSeq())
1252  latestShard = shardStore->seqToShardIndex(curLedgerSeq);
1253  }
1254  else
1255  {
1256  auto const earliestLedgerSeq{
1258  earliestShard = NodeStore::seqToShardIndex(earliestLedgerSeq);
1259  if (curLedgerSeq >= earliestLedgerSeq)
1260  latestShard = NodeStore::seqToShardIndex(curLedgerSeq);
1261  }
1262  }
1263 
1264  if (boost::icl::first(shardIndexes) < earliestShard ||
1265  (latestShard && boost::icl::last(shardIndexes) > latestShard))
1266  {
1267  return badData("Invalid shard indexes");
1268  }
1269  }
1270 
1271  // Get the IP of the node reporting the shard info
1272  beast::IP::Endpoint endpoint;
1273  if (m->has_endpoint())
1274  {
1275  if (m->endpoint() != "0")
1276  {
1277  auto result =
1279  if (!result)
1280  return badData("Invalid incoming endpoint: " + m->endpoint());
1281  endpoint = std::move(*result);
1282  }
1283  }
1284  else if (crawl()) // Check if peer will share IP publicly
1285  {
1286  endpoint = remote_address_;
1287  }
1288 
1289  // Get the Public key of the node reporting the shard info
1290  PublicKey publicKey;
1291  if (m->has_nodepubkey())
1292  publicKey = PublicKey(makeSlice(m->nodepubkey()));
1293  else
1294  publicKey = publicKey_;
1295 
1296  {
1298  auto it{shardInfo_.find(publicKey)};
1299  if (it != shardInfo_.end())
1300  {
1301  // Update the IP address for the node
1302  it->second.endpoint = std::move(endpoint);
1303 
1304  // Join the shard index range set
1305  it->second.shardIndexes += shardIndexes;
1306  }
1307  else
1308  {
1309  // Add a new node
1310  ShardInfo shardInfo;
1311  shardInfo.endpoint = std::move(endpoint);
1312  shardInfo.shardIndexes = std::move(shardIndexes);
1313  shardInfo_.emplace(publicKey, std::move(shardInfo));
1314  }
1315  }
1316 
1317  JLOG(p_journal_.trace())
1318  << "Consumed TMPeerShardInfo originating from public key "
1319  << toBase58(TokenType::NodePublic, publicKey) << " shard indexes "
1320  << m->shardindexes();
1321 
1322  if (m->has_lastlink())
1324 }
1325 
1326 void
1328 {
1329  // Don't allow endpoints from peers that are not known tracking or are
1330  // not using a version of the message that we support:
1331  if (tracking_.load() != Tracking::converged || m->version() != 2)
1332  return;
1333 
1335  endpoints.reserve(m->endpoints_v2().size());
1336 
1337  for (auto const& tm : m->endpoints_v2())
1338  {
1339  auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1340  if (!result)
1341  {
1342  JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1343  << tm.endpoint() << "}";
1344  continue;
1345  }
1346 
1347  // If hops == 0, this Endpoint describes the peer we are connected
1348  // to -- in that case, we take the remote address seen on the
1349  // socket and store that in the IP::Endpoint. If this is the first
1350  // time, then we'll verify that their listener can receive incoming
1351  // by performing a connectivity test. if hops > 0, then we just
1352  // take the address/port we were given
1353 
1354  endpoints.emplace_back(
1355  tm.hops() > 0 ? *result : remote_address_.at_port(result->port()),
1356  tm.hops());
1357  }
1358 
1359  if (!endpoints.empty())
1360  overlay_.peerFinder().on_endpoints(slot_, endpoints);
1361 }
1362 
1363 void
1365 {
1367  return;
1368 
1370  {
1371  // If we've never been in synch, there's nothing we can do
1372  // with a transaction
1373  JLOG(p_journal_.debug()) << "Ignoring incoming transaction: "
1374  << "Need network ledger";
1375  return;
1376  }
1377 
1378  SerialIter sit(makeSlice(m->rawtransaction()));
1379 
1380  try
1381  {
1382  auto stx = std::make_shared<STTx const>(sit);
1383  uint256 txID = stx->getTransactionID();
1384 
1385  int flags;
1386  constexpr std::chrono::seconds tx_interval = 10s;
1387 
1388  if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1389  {
1390  // we have seen this transaction recently
1391  if (flags & SF_BAD)
1392  {
1394  JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1395  }
1396 
1397  return;
1398  }
1399 
1400  JLOG(p_journal_.debug()) << "Got tx " << txID;
1401 
1402  bool checkSignature = true;
1403  if (cluster())
1404  {
1405  if (!m->has_deferred() || !m->deferred())
1406  {
1407  // Skip local checks if a server we trust
1408  // put the transaction in its open ledger
1409  flags |= SF_TRUSTED;
1410  }
1411 
1413  {
1414  // For now, be paranoid and have each validator
1415  // check each transaction, regardless of source
1416  checkSignature = false;
1417  }
1418  }
1419 
1422  {
1424  JLOG(p_journal_.info()) << "Transaction queue is full";
1425  }
1426  else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
1427  {
1428  JLOG(p_journal_.trace())
1429  << "No new transactions until synchronized";
1430  }
1431  else
1432  {
1434  jtTRANSACTION,
1435  "recvTransaction->checkTransaction",
1437  flags,
1438  checkSignature,
1439  stx](Job&) {
1440  if (auto peer = weak.lock())
1441  peer->checkTransaction(flags, checkSignature, stx);
1442  });
1443  }
1444  }
1445  catch (std::exception const&)
1446  {
1447  JLOG(p_journal_.warn())
1448  << "Transaction invalid: " << strHex(m->rawtransaction());
1449  }
1450 }
1451 
1452 void
1454 {
1457  app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m](Job&) {
1458  if (auto peer = weak.lock())
1459  peer->getLedger(m);
1460  });
1461 }
1462 
1463 void
1465 {
1466  protocol::TMLedgerData& packet = *m;
1467 
1468  if (m->nodes().size() <= 0)
1469  {
1470  JLOG(p_journal_.warn()) << "Ledger/TXset data with no nodes";
1471  return;
1472  }
1473 
1474  if (m->has_requestcookie())
1475  {
1476  std::shared_ptr<Peer> target =
1477  overlay_.findPeerByShortID(m->requestcookie());
1478  if (target)
1479  {
1480  m->clear_requestcookie();
1481  target->send(
1482  std::make_shared<Message>(packet, protocol::mtLEDGER_DATA));
1483  }
1484  else
1485  {
1486  JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1488  }
1489  return;
1490  }
1491 
1492  if (!stringIsUint256Sized(m->ledgerhash()))
1493  {
1494  JLOG(p_journal_.warn()) << "TX candidate reply with invalid hash size";
1496  return;
1497  }
1498 
1499  uint256 const hash{m->ledgerhash()};
1500 
1501  if (m->type() == protocol::liTS_CANDIDATE)
1502  {
1503  // got data for a candidate transaction set
1506  jtTXN_DATA, "recvPeerData", [weak, hash, m](Job&) {
1507  if (auto peer = weak.lock())
1508  peer->app_.getInboundTransactions().gotData(hash, peer, m);
1509  });
1510  return;
1511  }
1512 
1514  {
1515  JLOG(p_journal_.trace()) << "Got data for unwanted ledger";
1517  }
1518 }
1519 
1520 void
1522 {
1523  protocol::TMProposeSet& set = *m;
1524 
1525  auto const sig = makeSlice(set.signature());
1526 
1527  // Preliminary check for the validity of the signature: A DER encoded
1528  // signature can't be longer than 72 bytes.
1529  if ((boost::algorithm::clamp(sig.size(), 64, 72) != sig.size()) ||
1530  (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1531  {
1532  JLOG(p_journal_.warn()) << "Proposal: malformed";
1534  return;
1535  }
1536 
1537  if (!stringIsUint256Sized(set.currenttxhash()) ||
1538  !stringIsUint256Sized(set.previousledger()))
1539  {
1540  JLOG(p_journal_.warn()) << "Proposal: malformed";
1542  return;
1543  }
1544 
1545  uint256 const proposeHash{set.currenttxhash()};
1546  uint256 const prevLedger{set.previousledger()};
1547 
1548  PublicKey const publicKey{makeSlice(set.nodepubkey())};
1549  NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1550 
1551  uint256 const suppression = proposalUniqueId(
1552  proposeHash,
1553  prevLedger,
1554  set.proposeseq(),
1555  closeTime,
1556  publicKey.slice(),
1557  sig);
1558 
1559  if (auto [added, relayed] =
1561  !added)
1562  {
1563  // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1564  // receives within IDLED seconds since the message has been relayed.
1565  // Wait WAIT_ON_BOOTUP time to let the server establish connections to
1566  // peers.
1567  if (app_.config().REDUCE_RELAY_ENABLE && relayed &&
1568  (stopwatch().now() - *relayed) < squelch::IDLED &&
1569  squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
1572  suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1573  JLOG(p_journal_.trace()) << "Proposal: duplicate";
1574  return;
1575  }
1576 
1577  auto const isTrusted = app_.validators().trusted(publicKey);
1578 
1579  if (!isTrusted)
1580  {
1582  {
1583  JLOG(p_journal_.debug())
1584  << "Proposal: Dropping untrusted (peer divergence)";
1585  return;
1586  }
1587 
1588  if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1589  {
1590  JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1591  return;
1592  }
1593  }
1594 
1595  JLOG(p_journal_.trace())
1596  << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1597 
1598  auto proposal = RCLCxPeerPos(
1599  publicKey,
1600  sig,
1601  suppression,
1603  prevLedger,
1604  set.proposeseq(),
1605  proposeHash,
1606  closeTime,
1609 
1612  isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1613  "recvPropose->checkPropose",
1614  [weak, m, proposal](Job& job) {
1615  if (auto peer = weak.lock())
1616  peer->checkPropose(job, m, proposal);
1617  });
1618 }
1619 
1620 void
1622 {
1623  JLOG(p_journal_.trace()) << "Status: Change";
1624 
1625  if (!m->has_networktime())
1626  m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1627 
1628  {
1630  if (!last_status_.has_newstatus() || m->has_newstatus())
1631  last_status_ = *m;
1632  else
1633  {
1634  // preserve old status
1635  protocol::NodeStatus status = last_status_.newstatus();
1636  last_status_ = *m;
1637  m->set_newstatus(status);
1638  }
1639  }
1640 
1641  if (m->newevent() == protocol::neLOST_SYNC)
1642  {
1643  bool outOfSync{false};
1644  {
1645  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1646  // guarded by recentLock_.
1648  if (!closedLedgerHash_.isZero())
1649  {
1650  outOfSync = true;
1652  }
1654  }
1655  if (outOfSync)
1656  {
1657  JLOG(p_journal_.debug()) << "Status: Out of sync";
1658  }
1659  return;
1660  }
1661 
1662  {
1663  uint256 closedLedgerHash{};
1664  bool const peerChangedLedgers{
1665  m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1666 
1667  {
1668  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1669  // guarded by recentLock_.
1671  if (peerChangedLedgers)
1672  {
1673  closedLedgerHash_ = m->ledgerhash();
1674  closedLedgerHash = closedLedgerHash_;
1675  addLedger(closedLedgerHash, sl);
1676  }
1677  else
1678  {
1680  }
1681 
1682  if (m->has_ledgerhashprevious() &&
1683  stringIsUint256Sized(m->ledgerhashprevious()))
1684  {
1685  previousLedgerHash_ = m->ledgerhashprevious();
1687  }
1688  else
1689  {
1691  }
1692  }
1693  if (peerChangedLedgers)
1694  {
1695  JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1696  }
1697  else
1698  {
1699  JLOG(p_journal_.debug()) << "Status: No ledger";
1700  }
1701  }
1702 
1703  if (m->has_firstseq() && m->has_lastseq())
1704  {
1706 
1707  minLedger_ = m->firstseq();
1708  maxLedger_ = m->lastseq();
1709 
1710  if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1711  minLedger_ = maxLedger_ = 0;
1712  }
1713 
1714  if (m->has_ledgerseq() &&
1716  {
1717  checkTracking(
1718  m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1719  }
1720 
1721  app_.getOPs().pubPeerStatus([=]() -> Json::Value {
1723 
1724  if (m->has_newstatus())
1725  {
1726  switch (m->newstatus())
1727  {
1728  case protocol::nsCONNECTING:
1729  j[jss::status] = "CONNECTING";
1730  break;
1731  case protocol::nsCONNECTED:
1732  j[jss::status] = "CONNECTED";
1733  break;
1734  case protocol::nsMONITORING:
1735  j[jss::status] = "MONITORING";
1736  break;
1737  case protocol::nsVALIDATING:
1738  j[jss::status] = "VALIDATING";
1739  break;
1740  case protocol::nsSHUTTING:
1741  j[jss::status] = "SHUTTING";
1742  break;
1743  }
1744  }
1745 
1746  if (m->has_newevent())
1747  {
1748  switch (m->newevent())
1749  {
1750  case protocol::neCLOSING_LEDGER:
1751  j[jss::action] = "CLOSING_LEDGER";
1752  break;
1753  case protocol::neACCEPTED_LEDGER:
1754  j[jss::action] = "ACCEPTED_LEDGER";
1755  break;
1756  case protocol::neSWITCHED_LEDGER:
1757  j[jss::action] = "SWITCHED_LEDGER";
1758  break;
1759  case protocol::neLOST_SYNC:
1760  j[jss::action] = "LOST_SYNC";
1761  break;
1762  }
1763  }
1764 
1765  if (m->has_ledgerseq())
1766  {
1767  j[jss::ledger_index] = m->ledgerseq();
1768  }
1769 
1770  if (m->has_ledgerhash())
1771  {
1772  uint256 closedLedgerHash{};
1773  {
1774  std::lock_guard sl(recentLock_);
1775  closedLedgerHash = closedLedgerHash_;
1776  }
1777  j[jss::ledger_hash] = to_string(closedLedgerHash);
1778  }
1779 
1780  if (m->has_networktime())
1781  {
1782  j[jss::date] = Json::UInt(m->networktime());
1783  }
1784 
1785  if (m->has_firstseq() && m->has_lastseq())
1786  {
1787  j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1788  j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1789  }
1790 
1791  return j;
1792  });
1793 }
1794 
1795 void
1796 PeerImp::checkTracking(std::uint32_t validationSeq)
1797 {
1798  std::uint32_t serverSeq;
1799  {
1800  // Extract the sequence number of the highest
1801  // ledger this peer has
1802  std::lock_guard sl(recentLock_);
1803 
1804  serverSeq = maxLedger_;
1805  }
1806  if (serverSeq != 0)
1807  {
1808  // Compare the peer's ledger sequence to the
1809  // sequence of a recently-validated ledger
1810  checkTracking(serverSeq, validationSeq);
1811  }
1812 }
1813 
1814 void
1815 PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1816 {
1817  int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1818 
1819  if (diff < Tuning::convergedLedgerLimit)
1820  {
1821  // The peer's ledger sequence is close to the validation's
1822  tracking_ = Tracking::converged;
1823  }
1824 
1825  if ((diff > Tuning::divergedLedgerLimit) &&
1826  (tracking_.load() != Tracking::diverged))
1827  {
1828  // The peer's ledger sequence is way off the validation's
1829  std::lock_guard sl(recentLock_);
1830 
1831  tracking_ = Tracking::diverged;
1832  trackingTime_ = clock_type::now();
1833  }
1834 }
1835 
1836 void
1838 {
1839  if (!stringIsUint256Sized(m->hash()))
1840  {
1841  fee_ = Resource::feeInvalidRequest;
1842  return;
1843  }
1844 
1845  uint256 const hash{m->hash()};
1846 
1847  if (m->status() == protocol::tsHAVE)
1848  {
1849  std::lock_guard sl(recentLock_);
1850 
1851  if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
1852  recentTxSets_.end())
1853  {
1854  fee_ = Resource::feeUnwantedData;
1855  return;
1856  }
1857 
1858  recentTxSets_.push_back(hash);
1859  }
1860 }
1861 
1862 void
1864 {
1865  try
1866  {
1867  if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
1868  {
1869  JLOG(p_journal_.debug())
1870  << "ValidatorList: received validator list from peer using "
1871  << "protocol version " << to_string(protocol_)
1872  << " which shouldn't support this feature.";
1873  fee_ = Resource::feeUnwantedData;
1874  return;
1875  }
1876  auto const& manifest = m->manifest();
1877  auto const& blob = m->blob();
1878  auto const& signature = m->signature();
1879  auto const version = m->version();
1880  auto const hash = sha512Half(manifest, blob, signature, version);
1881 
1882  JLOG(p_journal_.debug())
1883  << "Received validator list from " << remote_address_.to_string()
1884  << " (" << id_ << ")";
1885 
1886  if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
1887  {
1888  JLOG(p_journal_.debug())
1889  << "ValidatorList: received duplicate validator list";
1890  // Charging this fee here won't hurt the peer in the normal
1891  // course of operation (ie. refresh every 5 minutes), but
1892  // will add up if the peer is misbehaving.
1893  fee_ = Resource::feeUnwantedData;
1894  return;
1895  }
1896 
1897  auto const applyResult = app_.validators().applyListAndBroadcast(
1898  manifest,
1899  blob,
1900  signature,
1901  version,
1902  remote_address_.to_string(),
1903  hash,
1904  app_.overlay(),
1905  app_.getHashRouter());
1906  auto const disp = applyResult.disposition;
1907 
1908  JLOG(p_journal_.debug())
1909  << "Processed validator list from "
1910  << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
1911  : "unknown or invalid publisher")
1912  << " from " << remote_address_.to_string() << " (" << id_
1913  << ") with result " << to_string(disp);
1914 
1915  switch (disp)
1916  {
1917  case ListDisposition::accepted:
1918  JLOG(p_journal_.debug())
1919  << "Applied new validator list from peer "
1920  << remote_address_;
1921  {
1922  std::lock_guard<std::mutex> sl(recentLock_);
1923 
1924  assert(applyResult.sequence && applyResult.publisherKey);
1925  auto const& pubKey = *applyResult.publisherKey;
1926 #ifndef NDEBUG
1927  if (auto const iter = publisherListSequences_.find(pubKey);
1928  iter != publisherListSequences_.end())
1929  {
1930  assert(iter->second < *applyResult.sequence);
1931  }
1932 #endif
1933  publisherListSequences_[pubKey] = *applyResult.sequence;
1934  }
1935  break;
1936  case ListDisposition::same_sequence:
1937  JLOG(p_journal_.warn())
1938  << "Validator list with current sequence from peer "
1939  << remote_address_;
1940  // Charging this fee here won't hurt the peer in the normal
1941  // course of operation (ie. refresh every 5 minutes), but
1942  // will add up if the peer is misbehaving.
1943  fee_ = Resource::feeUnwantedData;
1944 #ifndef NDEBUG
1945  {
1946  std::lock_guard<std::mutex> sl(recentLock_);
1947  assert(applyResult.sequence && applyResult.publisherKey);
1948  assert(
1949  publisherListSequences_[*applyResult.publisherKey] ==
1950  *applyResult.sequence);
1951  }
1952 #endif // !NDEBUG
1953 
1954  break;
1955  case ListDisposition::stale:
1956  JLOG(p_journal_.warn())
1957  << "Stale validator list from peer " << remote_address_;
1958  // There are very few good reasons for a peer to send an
1959  // old list, particularly more than once.
1960  fee_ = Resource::feeBadData;
1961  break;
1962  case ListDisposition::untrusted:
1963  JLOG(p_journal_.warn())
1964  << "Untrusted validator list from peer " << remote_address_;
1965  // Charging this fee here won't hurt the peer in the normal
1966  // course of operation (ie. refresh every 5 minutes), but
1967  // will add up if the peer is misbehaving.
1968  fee_ = Resource::feeUnwantedData;
1969  break;
1970  case ListDisposition::invalid:
1971  JLOG(p_journal_.warn())
1972  << "Invalid validator list from peer " << remote_address_;
1973  // This shouldn't ever happen with a well-behaved peer
1974  fee_ = Resource::feeInvalidSignature;
1975  break;
1976  case ListDisposition::unsupported_version:
1977  JLOG(p_journal_.warn())
1978  << "Unsupported version validator list from peer "
1979  << remote_address_;
1980  // During a version transition, this may be legitimate.
1981  // If it happens frequently, that's probably bad.
1982  fee_ = Resource::feeBadData;
1983  break;
1984  default:
1985  assert(false);
1986  }
1987  }
1988  catch (std::exception const& e)
1989  {
1990  JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
1991  << " from peer " << remote_address_;
1992  fee_ = Resource::feeBadData;
1993  }
1994 }
1995 
1996 void
1997 PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
1998 {
1999  auto const closeTime = app_.timeKeeper().closeTime();
2000 
2001  if (m->validation().size() < 50)
2002  {
2003  JLOG(p_journal_.warn()) << "Validation: Too small";
2004  fee_ = Resource::feeInvalidRequest;
2005  return;
2006  }
2007 
2008  try
2009  {
2011  {
2012  SerialIter sit(makeSlice(m->validation()));
2013  val = std::make_shared<STValidation>(
2014  std::ref(sit),
2015  [this](PublicKey const& pk) {
2016  return calcNodeID(
2017  app_.validatorManifests().getMasterKey(pk));
2018  },
2019  false);
2020  val->setSeen(closeTime);
2021  }
2022 
2023  if (!isCurrent(
2024  app_.getValidations().parms(),
2025  app_.timeKeeper().closeTime(),
2026  val->getSignTime(),
2027  val->getSeenTime()))
2028  {
2029  JLOG(p_journal_.trace()) << "Validation: Not current";
2030  fee_ = Resource::feeUnwantedData;
2031  return;
2032  }
2033 
2034  auto key = sha512Half(makeSlice(m->validation()));
2035  if (auto [added, relayed] =
2036  app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2037  !added)
2038  {
2039  // Count unique messages (Slots has it's own 'HashRouter'), which a
2040  // peer receives within IDLED seconds since the message has been
2041  // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2042  // connections to peers.
2043  if (app_.config().REDUCE_RELAY_ENABLE && (bool)relayed &&
2044  (stopwatch().now() - *relayed) < squelch::IDLED &&
2045  squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2046  squelch::WAIT_ON_BOOTUP)
2047  overlay_.updateSlotAndSquelch(
2048  key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2049  JLOG(p_journal_.trace()) << "Validation: duplicate";
2050  return;
2051  }
2052 
2053  auto const isTrusted =
2054  app_.validators().trusted(val->getSignerPublic());
2055 
2056  if (!isTrusted && (tracking_.load() == Tracking::diverged))
2057  {
2058  JLOG(p_journal_.debug())
2059  << "Validation: dropping untrusted from diverged peer";
2060  }
2061  if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal())
2062  {
2063  std::weak_ptr<PeerImp> weak = shared_from_this();
2064  app_.getJobQueue().addJob(
2065  isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2066  "recvValidation->checkValidation",
2067  [weak, val, m](Job&) {
2068  if (auto peer = weak.lock())
2069  peer->checkValidation(val, m);
2070  });
2071  }
2072  else
2073  {
2074  JLOG(p_journal_.debug()) << "Validation: Dropping UNTRUSTED (load)";
2075  }
2076  }
2077  catch (std::exception const& e)
2078  {
2079  JLOG(p_journal_.warn())
2080  << "Exception processing validation: " << e.what();
2081  fee_ = Resource::feeInvalidRequest;
2082  }
2083 }
2084 
2085 void
2087 {
2088  protocol::TMGetObjectByHash& packet = *m;
2089 
2090  if (packet.query())
2091  {
2092  // this is a query
2093  if (send_queue_.size() >= Tuning::dropSendQueue)
2094  {
2095  JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2096  return;
2097  }
2098 
2099  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2100  {
2101  doFetchPack(m);
2102  return;
2103  }
2104 
2105  fee_ = Resource::feeMediumBurdenPeer;
2106 
2107  protocol::TMGetObjectByHash reply;
2108 
2109  reply.set_query(false);
2110 
2111  if (packet.has_seq())
2112  reply.set_seq(packet.seq());
2113 
2114  reply.set_type(packet.type());
2115 
2116  if (packet.has_ledgerhash())
2117  {
2118  if (!stringIsUint256Sized(packet.ledgerhash()))
2119  {
2120  fee_ = Resource::feeInvalidRequest;
2121  return;
2122  }
2123 
2124  reply.set_ledgerhash(packet.ledgerhash());
2125  }
2126 
2127  // This is a very minimal implementation
2128  for (int i = 0; i < packet.objects_size(); ++i)
2129  {
2130  auto const& obj = packet.objects(i);
2131  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2132  {
2133  uint256 const hash{obj.hash()};
2134  // VFALCO TODO Move this someplace more sensible so we dont
2135  // need to inject the NodeStore interfaces.
2136  std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2137  auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2138  if (!nodeObject)
2139  {
2140  if (auto shardStore = app_.getShardStore())
2141  {
2142  if (seq >= shardStore->earliestLedgerSeq())
2143  nodeObject = shardStore->fetchNodeObject(hash, seq);
2144  }
2145  }
2146  if (nodeObject)
2147  {
2148  protocol::TMIndexedObject& newObj = *reply.add_objects();
2149  newObj.set_hash(hash.begin(), hash.size());
2150  newObj.set_data(
2151  &nodeObject->getData().front(),
2152  nodeObject->getData().size());
2153 
2154  if (obj.has_nodeid())
2155  newObj.set_index(obj.nodeid());
2156  if (obj.has_ledgerseq())
2157  newObj.set_ledgerseq(obj.ledgerseq());
2158 
2159  // VFALCO NOTE "seq" in the message is obsolete
2160  }
2161  }
2162  }
2163 
2164  JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2165  << packet.objects_size();
2166  send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2167  }
2168  else
2169  {
2170  // this is a reply
2171  std::uint32_t pLSeq = 0;
2172  bool pLDo = true;
2173  bool progress = false;
2174 
2175  for (int i = 0; i < packet.objects_size(); ++i)
2176  {
2177  const protocol::TMIndexedObject& obj = packet.objects(i);
2178 
2179  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2180  {
2181  if (obj.has_ledgerseq())
2182  {
2183  if (obj.ledgerseq() != pLSeq)
2184  {
2185  if (pLDo && (pLSeq != 0))
2186  {
2187  JLOG(p_journal_.debug())
2188  << "GetObj: Full fetch pack for " << pLSeq;
2189  }
2190  pLSeq = obj.ledgerseq();
2191  pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2192 
2193  if (!pLDo)
2194  {
2195  JLOG(p_journal_.debug())
2196  << "GetObj: Late fetch pack for " << pLSeq;
2197  }
2198  else
2199  progress = true;
2200  }
2201  }
2202 
2203  if (pLDo)
2204  {
2205  uint256 const hash{obj.hash()};
2206 
2207  app_.getLedgerMaster().addFetchPack(
2208  hash,
2209  std::make_shared<Blob>(
2210  obj.data().begin(), obj.data().end()));
2211  }
2212  }
2213  }
2214 
2215  if (pLDo && (pLSeq != 0))
2216  {
2217  JLOG(p_journal_.debug())
2218  << "GetObj: Partial fetch pack for " << pLSeq;
2219  }
2220  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2221  app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2222  }
2223 }
2224 
2225 void
2226 PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2227 {
2228  if (!m->has_validatorpubkey())
2229  {
2230  charge(Resource::feeBadData);
2231  return;
2232  }
2233  auto validator = m->validatorpubkey();
2234  auto const slice{makeSlice(validator)};
2235  if (!publicKeyType(slice))
2236  {
2237  charge(Resource::feeBadData);
2238  return;
2239  }
2240  PublicKey key(slice);
2241  auto squelch = m->squelch();
2242  auto duration = m->has_squelchduration() ? m->squelchduration() : 0;
2243  auto sp = shared_from_this();
2244 
2245  // Ignore the squelch for validator's own messages.
2246  if (key == app_.getValidationPublicKey())
2247  {
2248  JLOG(p_journal_.debug())
2249  << "onMessage: TMSquelch discarding validator's squelch " << slice;
2250  return;
2251  }
2252 
2253  if (!strand_.running_in_this_thread())
2254  return post(strand_, [sp, key, squelch, duration]() {
2255  sp->squelch_.squelch(key, squelch, duration);
2256  });
2257 
2258  JLOG(p_journal_.debug())
2259  << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2260 
2261  squelch_.squelch(key, squelch, duration);
2262 }
2263 
2264 //--------------------------------------------------------------------------
2265 
2266 void
2267 PeerImp::addLedger(
2268  uint256 const& hash,
2269  std::lock_guard<std::mutex> const& lockedRecentLock)
2270 {
2271  // lockedRecentLock is passed as a reminder that recentLock_ must be
2272  // locked by the caller.
2273  (void)lockedRecentLock;
2274 
2275  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2276  recentLedgers_.end())
2277  return;
2278 
2279  recentLedgers_.push_back(hash);
2280 }
2281 
2282 void
2283 PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2284 {
2285  // VFALCO TODO Invert this dependency using an observer and shared state
2286  // object. Don't queue fetch pack jobs if we're under load or we already
2287  // have some queued.
2288  if (app_.getFeeTrack().isLoadedLocal() ||
2289  (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2290  (app_.getJobQueue().getJobCount(jtPACK) > 10))
2291  {
2292  JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2293  return;
2294  }
2295 
2296  if (!stringIsUint256Sized(packet->ledgerhash()))
2297  {
2298  JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2299  fee_ = Resource::feeInvalidRequest;
2300  return;
2301  }
2302 
2303  fee_ = Resource::feeHighBurdenPeer;
2304 
2305  uint256 const hash{packet->ledgerhash()};
2306 
2307  std::weak_ptr<PeerImp> weak = shared_from_this();
2308  auto elapsed = UptimeClock::now();
2309  auto const pap = &app_;
2310  app_.getJobQueue().addJob(
2311  jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed](Job&) {
2312  pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2313  });
2314 }
2315 
2316 void
2317 PeerImp::checkTransaction(
2318  int flags,
2319  bool checkSignature,
2320  std::shared_ptr<STTx const> const& stx)
2321 {
2322  // VFALCO TODO Rewrite to not use exceptions
2323  try
2324  {
2325  // Expired?
2326  if (stx->isFieldPresent(sfLastLedgerSequence) &&
2327  (stx->getFieldU32(sfLastLedgerSequence) <
2328  app_.getLedgerMaster().getValidLedgerIndex()))
2329  {
2330  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2331  charge(Resource::feeUnwantedData);
2332  return;
2333  }
2334 
2335  if (checkSignature)
2336  {
2337  // Check the signature before handing off to the job queue.
2338  if (auto [valid, validReason] = checkValidity(
2339  app_.getHashRouter(),
2340  *stx,
2341  app_.getLedgerMaster().getValidatedRules(),
2342  app_.config());
2343  valid != Validity::Valid)
2344  {
2345  if (!validReason.empty())
2346  {
2347  JLOG(p_journal_.trace())
2348  << "Exception checking transaction: " << validReason;
2349  }
2350 
2351  // Probably not necessary to set SF_BAD, but doesn't hurt.
2352  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2353  charge(Resource::feeInvalidSignature);
2354  return;
2355  }
2356  }
2357  else
2358  {
2359  forceValidity(
2360  app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2361  }
2362 
2363  std::string reason;
2364  auto tx = std::make_shared<Transaction>(stx, reason, app_);
2365 
2366  if (tx->getStatus() == INVALID)
2367  {
2368  if (!reason.empty())
2369  {
2370  JLOG(p_journal_.trace())
2371  << "Exception checking transaction: " << reason;
2372  }
2373  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2374  charge(Resource::feeInvalidSignature);
2375  return;
2376  }
2377 
2378  bool const trusted(flags & SF_TRUSTED);
2379  app_.getOPs().processTransaction(
2380  tx, trusted, false, NetworkOPs::FailHard::no);
2381  }
2382  catch (std::exception const&)
2383  {
2384  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2385  charge(Resource::feeBadData);
2386  }
2387 }
2388 
2389 // Called from our JobQueue
2390 void
2391 PeerImp::checkPropose(
2392  Job& job,
2394  RCLCxPeerPos peerPos)
2395 {
2396  bool isTrusted = (job.getType() == jtPROPOSAL_t);
2397 
2398  JLOG(p_journal_.trace())
2399  << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2400 
2401  assert(packet);
2402 
2403  if (!cluster() && !peerPos.checkSign())
2404  {
2405  JLOG(p_journal_.warn()) << "Proposal fails sig check";
2406  charge(Resource::feeInvalidSignature);
2407  return;
2408  }
2409 
2410  bool relay;
2411 
2412  if (isTrusted)
2413  relay = app_.getOPs().processTrustedProposal(peerPos);
2414  else
2415  relay = app_.config().RELAY_UNTRUSTED_PROPOSALS || cluster();
2416 
2417  if (relay)
2418  {
2419  // haveMessage contains peers, which are suppressed; i.e. the peers
2420  // are the source of the message, consequently the message should
2421  // not be relayed to these peers. But the message must be counted
2422  // as part of the squelch logic.
2423  auto haveMessage = app_.overlay().relay(
2424  *packet, peerPos.suppressionID(), peerPos.publicKey());
2425  if (app_.config().REDUCE_RELAY_ENABLE && !haveMessage.empty() &&
2426  squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2427  squelch::WAIT_ON_BOOTUP)
2428  overlay_.updateSlotAndSquelch(
2429  peerPos.suppressionID(),
2430  peerPos.publicKey(),
2431  std::move(haveMessage),
2432  protocol::mtPROPOSE_LEDGER);
2433  }
2434 }
2435 
2436 void
2437 PeerImp::checkValidation(
2438  std::shared_ptr<STValidation> const& val,
2440 {
2441  try
2442  {
2443  // VFALCO Which functions throw?
2444  if (!cluster() && !val->isValid())
2445  {
2446  JLOG(p_journal_.warn()) << "Validation is invalid";
2447  charge(Resource::feeInvalidRequest);
2448  return;
2449  }
2450 
2451  if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
2452  cluster())
2453  {
2454  auto const suppression =
2455  sha512Half(makeSlice(val->getSerialized()));
2456  // haveMessage contains peers, which are suppressed; i.e. the peers
2457  // are the source of the message, consequently the message should
2458  // not be relayed to these peers. But the message must be counted
2459  // as part of the squelch logic.
2460  auto haveMessage =
2461  overlay_.relay(*packet, suppression, val->getSignerPublic());
2462  if (app_.config().REDUCE_RELAY_ENABLE && !haveMessage.empty() &&
2463  squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2464  squelch::WAIT_ON_BOOTUP)
2465  {
2466  overlay_.updateSlotAndSquelch(
2467  suppression,
2468  val->getSignerPublic(),
2469  std::move(haveMessage),
2470  protocol::mtVALIDATION);
2471  }
2472  }
2473  }
2474  catch (std::exception const&)
2475  {
2476  JLOG(p_journal_.trace()) << "Exception processing validation";
2477  charge(Resource::feeInvalidRequest);
2478  }
2479 }
2480 
2481 // Returns the set of peers that can help us get
2482 // the TX tree with the specified root hash.
2483 //
2485 getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
2486 {
2488  int retScore = 0;
2489 
2490  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
2491  if (p->hasTxSet(rootHash) && p.get() != skip)
2492  {
2493  auto score = p->getScore(true);
2494  if (!ret || (score > retScore))
2495  {
2496  ret = std::move(p);
2497  retScore = score;
2498  }
2499  }
2500  });
2501 
2502  return ret;
2503 }
2504 
2505 // Returns a random peer weighted by how likely to
2506 // have the ledger and how responsive it is.
2507 //
2510  OverlayImpl& ov,
2511  uint256 const& ledgerHash,
2512  LedgerIndex ledger,
2513  PeerImp const* skip)
2514 {
2516  int retScore = 0;
2517 
2518  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
2519  if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
2520  {
2521  auto score = p->getScore(true);
2522  if (!ret || (score > retScore))
2523  {
2524  ret = std::move(p);
2525  retScore = score;
2526  }
2527  }
2528  });
2529 
2530  return ret;
2531 }
2532 
2533 // VFALCO NOTE This function is way too big and cumbersome.
2534 void
2535 PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
2536 {
2537  protocol::TMGetLedger& packet = *m;
2538  std::shared_ptr<SHAMap> shared;
2539  SHAMap const* map = nullptr;
2540  protocol::TMLedgerData reply;
2541  bool fatLeaves = true;
2543 
2544  if (packet.has_requestcookie())
2545  reply.set_requestcookie(packet.requestcookie());
2546 
2547  std::string logMe;
2548 
2549  if (packet.itype() == protocol::liTS_CANDIDATE)
2550  {
2551  // Request is for a transaction candidate set
2552  JLOG(p_journal_.trace()) << "GetLedger: Tx candidate set";
2553 
2554  if (!packet.has_ledgerhash() ||
2555  !stringIsUint256Sized(packet.ledgerhash()))
2556  {
2557  charge(Resource::feeInvalidRequest);
2558  JLOG(p_journal_.warn()) << "GetLedger: Tx candidate set invalid";
2559  return;
2560  }
2561 
2562  uint256 const txHash{packet.ledgerhash()};
2563 
2564  shared = app_.getInboundTransactions().getSet(txHash, false);
2565  map = shared.get();
2566 
2567  if (!map)
2568  {
2569  if (packet.has_querytype() && !packet.has_requestcookie())
2570  {
2571  JLOG(p_journal_.debug()) << "GetLedger: Routing Tx set request";
2572 
2573  if (auto const v = getPeerWithTree(overlay_, txHash, this))
2574  {
2575  packet.set_requestcookie(id());
2576  v->send(std::make_shared<Message>(
2577  packet, protocol::mtGET_LEDGER));
2578  return;
2579  }
2580 
2581  JLOG(p_journal_.info()) << "GetLedger: Route TX set failed";
2582  return;
2583  }
2584 
2585  JLOG(p_journal_.debug()) << "GetLedger: Can't provide map ";
2586  charge(Resource::feeInvalidRequest);
2587  return;
2588  }
2589 
2590  reply.set_ledgerseq(0);
2591  reply.set_ledgerhash(txHash.begin(), txHash.size());
2592  reply.set_type(protocol::liTS_CANDIDATE);
2593  fatLeaves = false; // We'll already have most transactions
2594  }
2595  else
2596  {
2597  if (send_queue_.size() >= Tuning::dropSendQueue)
2598  {
2599  JLOG(p_journal_.debug()) << "GetLedger: Large send queue";
2600  return;
2601  }
2602 
2603  if (app_.getFeeTrack().isLoadedLocal() && !cluster())
2604  {
2605  JLOG(p_journal_.debug()) << "GetLedger: Too busy";
2606  return;
2607  }
2608 
2609  // Figure out what ledger they want
2610  JLOG(p_journal_.trace()) << "GetLedger: Received";
2611 
2612  if (packet.has_ledgerhash())
2613  {
2614  if (!stringIsUint256Sized(packet.ledgerhash()))
2615  {
2616  charge(Resource::feeInvalidRequest);
2617  JLOG(p_journal_.warn()) << "GetLedger: Invalid request";
2618  return;
2619  }
2620 
2621  uint256 const ledgerhash{packet.ledgerhash()};
2622  logMe += "LedgerHash:";
2623  logMe += to_string(ledgerhash);
2624  ledger = app_.getLedgerMaster().getLedgerByHash(ledgerhash);
2625 
2626  if (!ledger && packet.has_ledgerseq())
2627  {
2628  if (auto shardStore = app_.getShardStore())
2629  {
2630  auto seq = packet.ledgerseq();
2631  if (seq >= shardStore->earliestLedgerSeq())
2632  ledger = shardStore->fetchLedger(ledgerhash, seq);
2633  }
2634  }
2635 
2636  if (!ledger)
2637  {
2638  JLOG(p_journal_.trace())
2639  << "GetLedger: Don't have " << ledgerhash;
2640  }
2641 
2642  if (!ledger &&
2643  (packet.has_querytype() && !packet.has_requestcookie()))
2644  {
2645  // We don't have the requested ledger
2646  // Search for a peer who might
2647  auto const v = getPeerWithLedger(
2648  overlay_,
2649  ledgerhash,
2650  packet.has_ledgerseq() ? packet.ledgerseq() : 0,
2651  this);
2652  if (!v)
2653  {
2654  JLOG(p_journal_.trace()) << "GetLedger: Cannot route";
2655  return;
2656  }
2657 
2658  packet.set_requestcookie(id());
2659  v->send(
2660  std::make_shared<Message>(packet, protocol::mtGET_LEDGER));
2661  JLOG(p_journal_.debug()) << "GetLedger: Request routed";
2662  return;
2663  }
2664  }
2665  else if (packet.has_ledgerseq())
2666  {
2667  if (packet.ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
2668  {
2669  JLOG(p_journal_.debug()) << "GetLedger: Early ledger request";
2670  return;
2671  }
2672  ledger = app_.getLedgerMaster().getLedgerBySeq(packet.ledgerseq());
2673  if (!ledger)
2674  {
2675  JLOG(p_journal_.debug())
2676  << "GetLedger: Don't have " << packet.ledgerseq();
2677  }
2678  }
2679  else if (packet.has_ltype() && (packet.ltype() == protocol::ltCLOSED))
2680  {
2681  ledger = app_.getLedgerMaster().getClosedLedger();
2682  assert(!ledger->open());
2683  // VFALCO ledger should never be null!
2684  // VFALCO How can the closed ledger be open?
2685 #if 0
2686  if (ledger && ledger->info().open)
2687  ledger = app_.getLedgerMaster ().getLedgerBySeq (
2688  ledger->info().seq - 1);
2689 #endif
2690  }
2691  else
2692  {
2693  charge(Resource::feeInvalidRequest);
2694  JLOG(p_journal_.warn()) << "GetLedger: Unknown request";
2695  return;
2696  }
2697 
2698  if ((!ledger) ||
2699  (packet.has_ledgerseq() &&
2700  (packet.ledgerseq() != ledger->info().seq)))
2701  {
2702  charge(Resource::feeInvalidRequest);
2703 
2704  if (ledger)
2705  {
2706  JLOG(p_journal_.warn()) << "GetLedger: Invalid sequence";
2707  }
2708  return;
2709  }
2710 
2711  if (!packet.has_ledgerseq() &&
2712  (ledger->info().seq < app_.getLedgerMaster().getEarliestFetch()))
2713  {
2714  JLOG(p_journal_.debug()) << "GetLedger: Early ledger request";
2715  return;
2716  }
2717 
2718  // Fill out the reply
2719  auto const lHash = ledger->info().hash;
2720  reply.set_ledgerhash(lHash.begin(), lHash.size());
2721  reply.set_ledgerseq(ledger->info().seq);
2722  reply.set_type(packet.itype());
2723 
2724  if (packet.itype() == protocol::liBASE)
2725  {
2726  // they want the ledger base data
2727  JLOG(p_journal_.trace()) << "GetLedger: Base data";
2728  Serializer nData(128);
2729  addRaw(ledger->info(), nData);
2730  reply.add_nodes()->set_nodedata(
2731  nData.getDataPtr(), nData.getLength());
2732 
2733  auto const& stateMap = ledger->stateMap();
2734  if (stateMap.getHash() != beast::zero)
2735  {
2736  // return account state root node if possible
2737  Serializer rootNode(768);
2738 
2739  stateMap.serializeRoot(rootNode);
2740  reply.add_nodes()->set_nodedata(
2741  rootNode.getDataPtr(), rootNode.getLength());
2742 
2743  if (ledger->info().txHash != beast::zero)
2744  {
2745  auto const& txMap = ledger->txMap();
2746  if (txMap.getHash() != beast::zero)
2747  {
2748  rootNode.erase();
2749 
2750  txMap.serializeRoot(rootNode);
2751  reply.add_nodes()->set_nodedata(
2752  rootNode.getDataPtr(), rootNode.getLength());
2753  }
2754  }
2755  }
2756 
2757  auto oPacket =
2758  std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2759  send(oPacket);
2760  return;
2761  }
2762 
2763  if (packet.itype() == protocol::liTX_NODE)
2764  {
2765  map = &ledger->txMap();
2766  logMe += " TX:";
2767  logMe += to_string(map->getHash());
2768  }
2769  else if (packet.itype() == protocol::liAS_NODE)
2770  {
2771  map = &ledger->stateMap();
2772  logMe += " AS:";
2773  logMe += to_string(map->getHash());
2774  }
2775  }
2776 
2777  if (!map || (packet.nodeids_size() == 0))
2778  {
2779  JLOG(p_journal_.warn()) << "GetLedger: Can't find map or empty request";
2780  charge(Resource::feeInvalidRequest);
2781  return;
2782  }
2783 
2784  JLOG(p_journal_.trace()) << "GetLedger: " << logMe;
2785 
2786  auto const depth = packet.has_querydepth()
2787  ? (std::min(packet.querydepth(), 3u))
2788  : (isHighLatency() ? 2 : 1);
2789 
2790  for (int i = 0;
2791  (i < packet.nodeids().size() &&
2792  (reply.nodes().size() < Tuning::maxReplyNodes));
2793  ++i)
2794  {
2795  auto const mn = deserializeSHAMapNodeID(packet.nodeids(i));
2796 
2797  if (!mn)
2798  {
2799  JLOG(p_journal_.warn()) << "GetLedger: Invalid node " << logMe;
2800  charge(Resource::feeBadData);
2801  return;
2802  }
2803 
2804  std::vector<SHAMapNodeID> nodeIDs;
2805  std::vector<Blob> rawNodes;
2806 
2807  try
2808  {
2809  if (map->getNodeFat(*mn, nodeIDs, rawNodes, fatLeaves, depth))
2810  {
2811  assert(nodeIDs.size() == rawNodes.size());
2812  JLOG(p_journal_.trace()) << "GetLedger: getNodeFat got "
2813  << rawNodes.size() << " nodes";
2814  std::vector<SHAMapNodeID>::iterator nodeIDIterator;
2815  std::vector<Blob>::iterator rawNodeIterator;
2816 
2817  for (nodeIDIterator = nodeIDs.begin(),
2818  rawNodeIterator = rawNodes.begin();
2819  nodeIDIterator != nodeIDs.end();
2820  ++nodeIDIterator, ++rawNodeIterator)
2821  {
2822  protocol::TMLedgerNode* node = reply.add_nodes();
2823  node->set_nodeid(nodeIDIterator->getRawString());
2824  node->set_nodedata(
2825  &rawNodeIterator->front(), rawNodeIterator->size());
2826  }
2827  }
2828  else
2829  {
2830  JLOG(p_journal_.warn())
2831  << "GetLedger: getNodeFat returns false";
2832  }
2833  }
2834  catch (std::exception&)
2835  {
2836  std::string info;
2837 
2838  if (packet.itype() == protocol::liTS_CANDIDATE)
2839  info = "TS candidate";
2840  else if (packet.itype() == protocol::liBASE)
2841  info = "Ledger base";
2842  else if (packet.itype() == protocol::liTX_NODE)
2843  info = "TX node";
2844  else if (packet.itype() == protocol::liAS_NODE)
2845  info = "AS node";
2846 
2847  if (!packet.has_ledgerhash())
2848  info += ", no hash specified";
2849 
2850  JLOG(p_journal_.warn())
2851  << "getNodeFat( " << *mn << ") throws exception: " << info;
2852  }
2853  }
2854 
2855  JLOG(p_journal_.info())
2856  << "Got request for " << packet.nodeids().size() << " nodes at depth "
2857  << depth << ", return " << reply.nodes().size() << " nodes";
2858 
2859  auto oPacket = std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2860  send(oPacket);
2861 }
2862 
2863 int
2864 PeerImp::getScore(bool haveItem) const
2865 {
2866  // Random component of score, used to break ties and avoid
2867  // overloading the "best" peer
2868  static const int spRandomMax = 9999;
2869 
2870  // Score for being very likely to have the thing we are
2871  // look for; should be roughly spRandomMax
2872  static const int spHaveItem = 10000;
2873 
2874  // Score reduction for each millisecond of latency; should
2875  // be roughly spRandomMax divided by the maximum reasonable
2876  // latency
2877  static const int spLatency = 30;
2878 
2879  // Penalty for unknown latency; should be roughly spRandomMax
2880  static const int spNoLatency = 8000;
2881 
2882  int score = rand_int(spRandomMax);
2883 
2884  if (haveItem)
2885  score += spHaveItem;
2886 
2887  boost::optional<std::chrono::milliseconds> latency;
2888  {
2889  std::lock_guard sl(recentLock_);
2890  latency = latency_;
2891  }
2892 
2893  if (latency)
2894  score -= latency->count() * spLatency;
2895  else
2896  score -= spNoLatency;
2897 
2898  return score;
2899 }
2900 
2901 bool
2902 PeerImp::isHighLatency() const
2903 {
2904  std::lock_guard sl(recentLock_);
2905  return latency_ >= peerHighLatency;
2906 }
2907 
2908 void
2909 PeerImp::Metrics::add_message(std::uint64_t bytes)
2910 {
2911  using namespace std::chrono_literals;
2912  std::unique_lock lock{mutex_};
2913 
2914  totalBytes_ += bytes;
2915  accumBytes_ += bytes;
2916  auto const timeElapsed = clock_type::now() - intervalStart_;
2917  auto const timeElapsedInSecs =
2918  std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
2919 
2920  if (timeElapsedInSecs >= 1s)
2921  {
2922  auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
2923  rollingAvg_.push_back(avgBytes);
2924 
2925  auto const totalBytes =
2926  std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
2927  rollingAvgBytes_ = totalBytes / rollingAvg_.size();
2928 
2929  intervalStart_ = clock_type::now();
2930  accumBytes_ = 0;
2931  }
2932 }
2933 
2935 PeerImp::Metrics::average_bytes() const
2936 {
2937  std::shared_lock lock{mutex_};
2938  return rollingAvgBytes_;
2939 }
2940 
2942 PeerImp::Metrics::total_bytes() const
2943 {
2944  std::shared_lock lock{mutex_};
2945  return totalBytes_;
2946 }
2947 
2948 } // namespace ripple
ripple::PublicKey::data
std::uint8_t const * data() const noexcept
Definition: PublicKey.h:81
ripple::PeerImp::ledgerRange
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:460
ripple::PeerImp::uptime
clock_type::duration uptime() const
Definition: PeerImp.h:322
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:97
ripple::ClusterNode
Definition: ClusterNode.h:30
ripple::jtTRANSACTION
@ jtTRANSACTION
Definition: Job.h:51
ripple::PeerImp::inbound_
const bool inbound_
Definition: PeerImp.h:91
ripple::TrafficCount::categorize
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
Definition: TrafficCount.cpp:25
sstream
ripple::PeerImp::recentLock_
std::mutex recentLock_
Definition: PeerImp.h:148
ripple::HashRouter::addSuppressionPeerWithStatus
std::pair< bool, std::optional< Stopwatch::time_point > > addSuppressionPeerWithStatus(uint256 const &key, PeerShortID peer)
Add a suppression peer and get message's relay status.
Definition: HashRouter.cpp:57
ripple::RCLCxPeerPos
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:42
std::weak_ptr::lock
T lock(T... args)
ripple::PeerImp::stream_ptr_
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:78
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::PeerImp::onMessageBegin
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size)
Definition: PeerImp.cpp:967
ripple::csHopLimit
static constexpr std::uint32_t csHopLimit
Definition: ripple/overlay/Peer.h:36
ripple::Application::cluster
virtual Cluster & cluster()=0
ripple::PeerImp::socket_
socket_type & socket_
Definition: PeerImp.h:79
std::bind
T bind(T... args)
ripple::PeerImp::trackingTime_
clock_type::time_point trackingTime_
Definition: PeerImp.h:97
ripple::HashRouter::addSuppressionPeer
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:51
std::string
STL class.
ripple::Resource::feeMediumBurdenPeer
const Charge feeMediumBurdenPeer
std::shared_ptr
STL class.
ripple::PeerImp::onMessage
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:989
ripple::ManifestCache::getMasterKey
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: app/misc/impl/Manifest.cpp:280
ripple::SHAMap::getHash
SHAMapHash getHash() const
Definition: SHAMap.cpp:783
std::exception
STL class.
ripple::PeerImp::hasTxSet
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:479
ripple::calcNodeID
NodeID calcNodeID(PublicKey const &pk)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:299
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::publicKeyType
boost::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:203
ripple::PeerImp::strand_
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:81
ripple::PeerImp::recentLedgers_
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:111
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::PeerImp::request_
http_request_type request_
Definition: PeerImp.h:154
ripple::Resource::Gossip
Data format for exchanging consumption information across peers.
Definition: Gossip.h:29
ripple::PeerImp::~PeerImp
virtual ~PeerImp()
Definition: PeerImp.cpp:109
ripple::PeerImp::getShardIndexes
boost::optional< RangeSet< std::uint32_t > > getShardIndexes() const
Return a range set of known shard indexes from this peer.
Definition: PeerImp.cpp:561
ripple::Serializer::erase
void erase()
Definition: Serializer.h:207
beast::IP::Endpoint::to_string
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:54
std::pair
ripple::http_request_type
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:31
ripple::PeerImp::doAccept
void doAccept()
Definition: PeerImp.cpp:713
std::vector::reserve
T reserve(T... args)
ripple::OverlayImpl::updateSlotAndSquelch
void updateSlotAndSquelch(uint256 const &key, PublicKey const &validator, std::set< Peer::id_t > &&peers, protocol::MessageType type)
Updates message count for validator/peer.
Definition: OverlayImpl.cpp:1407
ripple::HashRouter::shouldProcess
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:78
ripple::HashPrefix::manifest
@ manifest
Manifest.
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:212
ripple::Config::REDUCE_RELAY_ENABLE
bool REDUCE_RELAY_ENABLE
Definition: Config.h:195
Json::UInt
unsigned int UInt
Definition: json_forwards.h:27
ripple::PeerImp::doProtocolStart
void doProtocolStart()
Definition: PeerImp.cpp:813
std::vector
STL class.
std::find
T find(T... args)
std::string::size
T size(T... args)
ripple::PeerImp::recentTxSets_
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:112
ripple::PublicKey::empty
bool empty() const noexcept
Definition: PublicKey.h:117
ripple::make_protocol
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
Definition: ProtocolVersion.h:40
std::chrono::milliseconds
ripple::PeerImp::setTimer
void setTimer()
Definition: PeerImp.cpp:601
ripple::OverlayImpl::incPeerDisconnectCharges
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:357
ripple::toBase58
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:29
beast::IP::Endpoint::address
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:77
ripple::PeerImp::getVersion
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:304
std::stringstream
STL class.
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::shared_ptr::get
T get(T... args)
std::lock_guard
STL class.
ripple::SBoxCmp::diff
@ diff
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerImp::close
void close()
Definition: PeerImp.cpp:507
ripple::PeerImp::charge
void charge(Resource::Charge const &fee) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:275
ripple::match_peer
Select the specific peer.
Definition: predicates.h:115
ripple::PeerImp::onMessageUnknown
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:961
ripple::addRaw
void addRaw(LedgerInfo const &info, Serializer &s)
Definition: View.cpp:43
ripple::from_string
bool from_string(RangeSet< T > &rs, std::string const &s)
Convert the given styled string to a RangeSet.
Definition: RangeSet.h:126
ripple::PeerImp::squelch_
squelch::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:119
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::stopwatch
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:86
std::setfill
T setfill(T... args)
ripple::PeerImp::ShardInfo::endpoint
beast::IP::Endpoint endpoint
Definition: PeerImp.h:56
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:42
ripple::PeerImp::journal_
const beast::Journal journal_
Definition: PeerImp.h:76
ripple::PeerImp::send
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:220
ripple::Application::timeKeeper
virtual TimeKeeper & timeKeeper()=0
ripple::buildHandshake
void buildHandshake(boost::beast::http::fields &h, ripple::uint256 const &sharedValue, boost::optional< std::uint32_t > networkID, beast::IP::Address public_ip, beast::IP::Address remote_ip, Application &app)
Insert fields headers necessary for upgrading the link to the peer protocol.
Definition: Handshake.cpp:102
ripple::OverlayImpl::setup
Setup const & setup() const
Definition: OverlayImpl.h:177
ripple::ProtocolFeature
ProtocolFeature
Definition: ripple/overlay/Peer.h:38
ripple::PeerImp::onTimer
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:636
ripple::Cluster::update
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:58
ripple::PeerImp::lastPingTime_
clock_type::time_point lastPingTime_
Definition: PeerImp.h:116
ripple::OverlayImpl::incJqTransOverflow
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:333
ripple::PeerImp
Definition: PeerImp.h:46
ripple::PeerFinder::Config::peerPrivate
bool peerPrivate
true if we want our IP address kept private.
Definition: PeerfinderManager.h:62
ripple::Config::MAX_TRANSACTIONS
int MAX_TRANSACTIONS
Definition: Config.h:183
ripple::PeerImp::previousLedgerHash_
uint256 previousLedgerHash_
Definition: PeerImp.h:109
std::vector::front
T front(T... args)
algorithm
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::PeerImp::name_
std::string name_
Definition: PeerImp.h:101
ripple::PeerFinder::Manager::on_endpoints
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
ripple::forceValidity
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:89
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::Application::getFeeTrack
virtual LoadFeeTrack & getFeeTrack()=0
ripple::base_uint< 256 >::size
constexpr static std::size_t size()
Definition: base_uint.h:426
ripple::getPeerWithLedger
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:2509
ripple::PeerImp::publicKey_
const PublicKey publicKey_
Definition: PeerImp.h:100
ripple::protocolMessageName
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
Definition: ProtocolMessage.h:42
ripple::PeerImp::read_buffer_
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:153
ripple::PeerImp::error_code
boost::system::error_code error_code
Definition: PeerImp.h:62
ripple::JobQueue::getJobCount
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:121
std::tie
T tie(T... args)
ripple::PeerImp::remote_address_
const beast::IP::Endpoint remote_address_
Definition: PeerImp.h:86
ripple::Cluster::member
boost::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:39
ripple::jtTXN_DATA
@ jtTXN_DATA
Definition: Job.h:55
ripple::PeerFinder::Manager::on_closed
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
ripple::OverlayImpl::peerFinder
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:159
ripple::getPeerWithTree
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:2485
ripple::base_uint< 256 >
ripple::LoadFeeTrack::isLoadedLocal
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:123
ripple::PeerImp::addLedger
void addLedger(uint256 const &hash, std::lock_guard< std::mutex > const &lockedRecentLock)
Definition: PeerImp.cpp:2267
ripple::http_response_type
boost::beast::http::response< boost::beast::http::dynamic_body > http_response_type
Definition: Handoff.h:34
ripple::Resource::feeInvalidSignature
const Charge feeInvalidSignature
ripple::OverlayImpl::onManifests
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Definition: OverlayImpl.cpp:657
ripple::Overlay::Setup::public_ip
beast::IP::Address public_ip
Definition: Overlay.h:75
std::enable_shared_from_this< PeerImp >::shared_from_this
T shared_from_this(T... args)
ripple::UptimeClock::now
static time_point now()
Definition: UptimeClock.cpp:63
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NetworkOPs::isNeedNetworkLedger
virtual bool isNeedNetworkLedger()=0
ripple::Resource::drop
@ drop
Definition: Disposition.h:37
ripple::checkValidity
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
ripple::jtPROPOSAL_t
@ jtPROPOSAL_t
Definition: Job.h:60
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:439
ripple::OverlayImpl::resourceManager
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:165
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::PeerImp::gracefulClose
void gracefulClose()
Definition: PeerImp.cpp:580
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
std::atomic::load
T load(T... args)
ripple::Resource::feeBadData
const Charge feeBadData
ripple::PublicKey::size
std::size_t size() const noexcept
Definition: PublicKey.h:87
ripple::PeerImp::shardInfo_
hash_map< PublicKey, ShardInfo > shardInfo_
Definition: PeerImp.h:166
ripple::Serializer::getDataPtr
const void * getDataPtr() const
Definition: Serializer.h:187
ripple::Resource::Manager::importConsumers
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
ripple::PeerImp::closedLedgerHash_
uint256 closedLedgerHash_
Definition: PeerImp.h:108
ripple::PeerImp::lastPingSeq_
boost::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:115
ripple::PeerImp::detaching_
bool detaching_
Definition: PeerImp.h:98
ripple::PeerImp::onMessageEnd
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:980
ripple::Application::config
virtual Config & config()=0
ripple::isCurrent
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:145
beast::Journal::active
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:301
ripple::PeerImp::stream_
stream_type & stream_
Definition: PeerImp.h:80
ripple::PeerImp::onWriteMessage
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:907
std::unique_lock
STL class.
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::InfoSub::Source::pubPeerStatus
virtual void pubPeerStatus(std::function< Json::Value(void)> const &)=0
ripple::jtVALIDATION_t
@ jtVALIDATION_t
Definition: Job.h:57
ripple::PeerImp::hasRange
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:497
ripple::Resource::feeUnwantedData
const Charge feeUnwantedData
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::Resource::Gossip::items
std::vector< Item > items
Definition: Gossip.h:42
ripple::PeerImp::cycleStatus
void cycleStatus() override
Definition: PeerImp.cpp:487
ripple::set
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:276
ripple::PeerImp::app_
Application & app_
Definition: PeerImp.h:72
ripple::PeerImp::crawl
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:289
ripple::PeerImp::minLedger_
LedgerIndex minLedger_
Definition: PeerImp.h:106
ripple::makeSharedValue
boost::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:70
ripple::base64_decode
std::string base64_decode(std::string const &data)
Definition: base64.cpp:245
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::PeerImp::hasLedger
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:443
ripple::PeerImp::Tracking::unknown
@ unknown
ripple::Resource::Consumer::balance
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:124
ripple::HashPrefix::proposal
@ proposal
proposal for signing
ripple::TimeKeeper::closeTime
virtual time_point closeTime() const =0
Returns the close time, in network time.
ripple::Job
Definition: Job.h:82
ripple::PeerImp::headers_
boost::beast::http::fields const & headers_
Definition: PeerImp.h:156
std::accumulate
T accumulate(T... args)
ripple::SerialIter
Definition: Serializer.h:308
ripple::PeerImp::metrics_
struct ripple::PeerImp::@13 metrics_
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
std::uint32_t
ripple::PeerImp::send_queue_
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:157
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:228
ripple::PeerImp::slot_
const std::shared_ptr< PeerFinder::Slot > slot_
Definition: PeerImp.h:152
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:178
ripple::PeerImp::load_event_
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:160
ripple::PeerImp::protocol_
ProtocolVersion protocol_
Definition: PeerImp.h:94
ripple::Application::getValidationPublicKey
virtual PublicKey const & getValidationPublicKey() const =0
ripple::Cluster::size
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:50
std::nth_element
T nth_element(T... args)
memory
ripple::PeerImp::waitable_timer
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:69
ripple::jtPEER
@ jtPEER
Definition: Job.h:67
ripple::PeerImp::onShutdown
void onShutdown(error_code ec)
Definition: PeerImp.cpp:697
ripple::proposalUniqueId
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
Definition: RCLCxPeerPos.cpp:72
ripple::PeerImp::name
std::string name() const
Definition: PeerImp.cpp:796
ripple::Application::validators
virtual ValidatorList & validators()=0
ripple::KeyType::secp256k1
@ secp256k1
ripple::RCLCxPeerPos::publicKey
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:81
std::weak_ptr
STL class.
ripple::PeerImp::timer_
waitable_timer timer_
Definition: PeerImp.h:82
std::min
T min(T... args)
ripple::Serializer
Definition: Serializer.h:39
ripple::BuildInfo::getFullVersionString
std::string const & getFullVersionString()
Full server version string.
Definition: BuildInfo.cpp:74
ripple::LedgerMaster::getValidatedLedgerAge
std::chrono::seconds getValidatedLedgerAge()
Definition: LedgerMaster.cpp:268
ripple::Resource::Gossip::Item
Describes a single consumer.
Definition: Gossip.h:34
ripple::PeerImp::ShardInfo
Definition: PeerImp.h:54
ripple::OverlayImpl::deletePeer
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
Definition: OverlayImpl.cpp:1440
ripple::PeerImp::Tracking::diverged
@ diverged
ripple::jtPACK
@ jtPACK
Definition: Job.h:41
ripple::PeerImp::gracefulClose_
bool gracefulClose_
Definition: PeerImp.h:158
ripple::PeerImp::latency_
boost::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:114
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedgers::gotLedgerData
virtual bool gotLedgerData(LedgerHash const &ledgerHash, std::shared_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData >)=0
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::Application::validatorManifests
virtual ManifestCache & validatorManifests()=0
ripple::ValidatorList::for_each_available
void for_each_available(std::function< void(std::string const &manifest, std::string const &blob, std::string const &signature, std::uint32_t version, PublicKey const &pubKey, std::size_t sequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
Definition: ValidatorList.cpp:807
ripple::OverlayImpl::getManifestsMessage
std::shared_ptr< Message > getManifestsMessage()
Definition: OverlayImpl.cpp:1267
ripple::send_if_not
send_if_not_pred< Predicate > send_if_not(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:107
ripple::jtVALIDATION_ut
@ jtVALIDATION_ut
Definition: Job.h:43
ripple::INVALID
@ INVALID
Definition: Transaction.h:46
ripple::base_uint::parseHex
bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:384
ripple::OverlayImpl::remove
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
Definition: OverlayImpl.cpp:474
ripple::squelch::IDLED
static constexpr seconds IDLED
Definition: SquelchCommon.h:35
ripple::base_uint::zero
void zero()
Definition: base_uint.h:449
std::vector::begin
T begin(T... args)
ripple::PeerFinder::Manager::config
virtual Config config()=0
Returns the configuration for the manager.
std
STL namespace.
ripple::Resource::Consumer::disconnect
bool disconnect()
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:117
beast::severities::kWarning
@ kWarning
Definition: Journal.h:37
ripple::sha512Half
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:216
beast::IP::Endpoint::from_string
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:46
ripple::OverlayImpl::activate
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
Definition: OverlayImpl.cpp:626
ripple::OverlayImpl::onPeerDeactivate
void onPeerDeactivate(Peer::id_t id)
Definition: OverlayImpl.cpp:650
ripple::PeerImp::hasShard
bool hasShard(std::uint32_t shardIndex) const override
Definition: PeerImp.cpp:469
ripple::Tuning::readBufferBytes
constexpr std::size_t readBufferBytes
Size of buffer used to read from the socket.
Definition: overlay/impl/Tuning.h:60
ripple::Overlay::Setup::networkID
boost::optional< std::uint32_t > networkID
Definition: Overlay.h:78
ripple::Resource::Gossip::Item::address
beast::IP::Endpoint address
Definition: Gossip.h:39
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:206
ripple::Resource::Consumer
An endpoint that consumes resources.
Definition: Consumer.h:33
ripple::Resource::Charge
A consumption charge.
Definition: Charge.h:30
ripple::Resource::Gossip::Item::balance
int balance
Definition: Gossip.h:38
ripple::TimeKeeper::now
virtual time_point now() const override=0
Returns the estimate of wall time, in network time.
ripple::OverlayImpl::lastLink
void lastLink(std::uint32_t id)
Called when the last link from a peer chain is received.
Definition: OverlayImpl.cpp:829
ripple::PeerImp::maxLedger_
LedgerIndex maxLedger_
Definition: PeerImp.h:107
ripple::PeerImp::run
void run()
Definition: PeerImp.cpp:132
ripple::Config::COMPRESSION
bool COMPRESSION
Definition: Config.h:180
ripple::Tuning::targetSendQueue
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
Definition: overlay/impl/Tuning.h:50
ripple::LoadFeeTrack::setClusterFee
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:111
ripple::PeerImp::checkTracking
void checkTracking(std::uint32_t validationSeq)
Check if the peer is tracking.
Definition: PeerImp.cpp:1796
ripple::PeerImp::large_sendq_
int large_sendq_
Definition: PeerImp.h:159
beast::severities::kDebug
@ kDebug
Definition: Journal.h:35
ripple::PeerImp::domain
std::string domain() const
Definition: PeerImp.cpp:803
std::string::empty
T empty(T... args)
ripple::squelch::WAIT_ON_BOOTUP
static constexpr minutes WAIT_ON_BOOTUP
Definition: SquelchCommon.h:46
ripple::Resource::feeLightPeer
const Charge feeLightPeer
ripple::jtPROPOSAL_ut
@ jtPROPOSAL_ut
Definition: Job.h:46
ripple::TokenType::NodePublic
@ NodePublic
ripple::PeerImp::last_status_
protocol::TMStatusChange last_status_
Definition: PeerImp.h:149
ripple::PeerImp::setPublisherListSequence
void setPublisherListSequence(PublicKey const &pubKey, std::size_t const seq) override
Definition: PeerImp.h:345
ripple::RCLCxPeerPos::suppressionID
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:88
ripple::PeerImp::supportsFeature
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:430
ripple::OverlayImpl::findPeerByPublicKey
std::shared_ptr< Peer > findPeerByPublicKey(PublicKey const &pubKey) override
Returns the peer with the matching public key, or null.
Definition: OverlayImpl.cpp:1200
mutex
std::stringstream::str
T str(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
std::size_t
ripple::PeerImp::json
Json::Value json() override
Definition: PeerImp.cpp:312
ripple::Cluster::for_each
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:84
ripple::PeerImp::compressionEnabled_
Compressed compressionEnabled_
Definition: PeerImp.h:168
ripple::Tuning::sendqIntervals
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
Definition: overlay/impl/Tuning.h:44
ripple::ProtocolFeature::ValidatorListPropagation
@ ValidatorListPropagation
beast::IP::Endpoint
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
ripple::OverlayImpl::incPeerDisconnect
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:345
ripple::strHex
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:45
std::vector::end
T end(T... args)
ripple::PeerFinder::Manager::on_failure
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
ripple::Job::getType
JobType getType() const
Definition: Job.cpp:52
ripple::PeerImp::makePrefix
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:628
ripple::PeerImp::usage_
Resource::Consumer usage_
Definition: PeerImp.h:150
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::setw
T setw(T... args)
numeric
ripple::OverlayImpl
Definition: OverlayImpl.h:57
beast::IP::Endpoint::from_string_checked
static boost::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:35
std::max
T max(T... args)
beast::IP::Endpoint::at_port
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:70
ripple::ValidatorList::trusted
bool trusted(PublicKey const &identity) const
Returns true if public key is trusted.
Definition: ValidatorList.cpp:574
ripple::OverlayImpl::findPeerByShortID
std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id) const override
Returns the peer with the matching short id, or null.
Definition: OverlayImpl.cpp:1188
ripple::Serializer::getLength
int getLength() const
Definition: Serializer.h:197
ripple::OverlayImpl::reportTraffic
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
Definition: OverlayImpl.cpp:735
ripple::sfLastLedgerSequence
const SF_UINT32 sfLastLedgerSequence
ripple::JobQueue::makeLoadEvent
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:181
ripple::PeerImp::getPeerShardInfo
boost::optional< hash_map< PublicKey, ShardInfo > > getPeerShardInfo() const
Return any known shard info from this peer and its sub peers.
Definition: PeerImp.cpp:571
ripple::PeerImp::shardInfoMutex_
std::mutex shardInfoMutex_
Definition: PeerImp.h:165
ripple::Resource::Consumer::charge
Disposition charge(Charge const &fee)
Apply a load charge to the consumer.
Definition: Consumer.cpp:99
ripple::PeerImp::overlay_
OverlayImpl & overlay_
Definition: PeerImp.h:90
ripple::SHAMap::getNodeFat
bool getNodeFat(SHAMapNodeID const &wanted, std::vector< SHAMapNodeID > &nodeIDs, std::vector< Blob > &rawNodes, bool fatLeaves, std::uint32_t depth) const
Definition: SHAMapSync.cpp:427
std::unique_ptr< stream_type >
ripple::Tuning::sendQueueLogFreq
@ sendQueueLogFreq
How often to log send queue size.
Definition: overlay/impl/Tuning.h:53
ripple::PeerImp::tracking_
std::atomic< Tracking > tracking_
Definition: PeerImp.h:96
ripple::PeerImp::nameMutex_
boost::shared_mutex nameMutex_
Definition: PeerImp.h:102
ripple::PeerImp::cancelTimer
void cancelTimer()
Definition: PeerImp.cpp:619
ripple::invokeProtocolMessage
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
Definition: ProtocolMessage.h:277
ripple::PeerImp::fee_
Resource::Charge fee_
Definition: PeerImp.h:151
ripple::stringIsUint256Sized
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:126
ripple::PeerImp::stop
void stop() override
Definition: PeerImp.cpp:194
ripple::Application::getHashRouter
virtual HashRouter & getHashRouter()=0
ripple::PeerImp::Tracking::converged
@ converged
ripple::PeerImp::id_
const id_t id_
Definition: PeerImp.h:73
ripple::OverlayImpl::for_each
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:260
std::ref
T ref(T... args)
ripple::RCLCxPeerPos::checkSign
bool checkSign() const
Verify the signing hash of the proposal.
Definition: RCLCxPeerPos.cpp:55
std::exception::what
T what(T... args)
std::shared_lock
STL class.
ripple::PeerImp::fail
void fail(std::string const &reason)
Definition: PeerImp.cpp:529
ripple::PeerImp::cluster
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:298
ripple::PeerImp::p_journal_
const beast::Journal p_journal_
Definition: PeerImp.h:77
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::Config::MAX_UNKNOWN_TIME
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:207
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:43
ripple::Config::MAX_DIVERGED_TIME
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:210
ripple::PeerImp::ShardInfo::shardIndexes
RangeSet< std::uint32_t > shardIndexes
Definition: PeerImp.h:57
ripple::jtLEDGER_REQ
@ jtLEDGER_REQ
Definition: Job.h:45
ripple::PeerImp::onReadMessage
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:851
ripple::ConsensusProposal< NodeID, uint256, uint256 >
std::chrono::steady_clock::now
T now(T... args)