rippled
PeerImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/consensus/RCLValidations.h>
21 #include <ripple/app/ledger/InboundLedgers.h>
22 #include <ripple/app/ledger/InboundTransactions.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/misc/HashRouter.h>
25 #include <ripple/app/misc/LoadFeeTrack.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/app/misc/Transaction.h>
28 #include <ripple/app/misc/ValidatorList.h>
29 #include <ripple/app/tx/apply.h>
30 #include <ripple/basics/UptimeClock.h>
31 #include <ripple/basics/base64.h>
32 #include <ripple/basics/random.h>
33 #include <ripple/basics/safe_cast.h>
34 #include <ripple/beast/core/LexicalCast.h>
35 #include <ripple/beast/core/SemanticVersion.h>
36 #include <ripple/nodestore/DatabaseShard.h>
37 #include <ripple/overlay/Cluster.h>
38 #include <ripple/overlay/impl/PeerImp.h>
39 #include <ripple/overlay/impl/Tuning.h>
40 #include <ripple/overlay/predicates.h>
41 #include <ripple/protocol/digest.h>
42 
43 #include <boost/algorithm/clamp.hpp>
44 #include <boost/algorithm/string.hpp>
45 #include <boost/algorithm/string/predicate.hpp>
46 #include <boost/beast/core/ostream.hpp>
47 
48 #include <algorithm>
49 #include <memory>
50 #include <mutex>
51 #include <numeric>
52 #include <sstream>
53 
54 using namespace std::chrono_literals;
55 
56 namespace ripple {
57 
58 namespace {
60 std::chrono::milliseconds constexpr peerHighLatency{300};
61 
63 std::chrono::seconds constexpr peerTimerInterval{60};
64 } // namespace
65 
66 PeerImp::PeerImp(
67  Application& app,
68  id_t id,
70  http_request_type&& request,
71  PublicKey const& publicKey,
72  ProtocolVersion protocol,
73  Resource::Consumer consumer,
74  std::unique_ptr<stream_type>&& stream_ptr,
75  OverlayImpl& overlay)
76  : Child(overlay)
77  , app_(app)
78  , id_(id)
79  , sink_(app_.journal("Peer"), makePrefix(id))
80  , p_sink_(app_.journal("Protocol"), makePrefix(id))
81  , journal_(sink_)
82  , p_journal_(p_sink_)
83  , stream_ptr_(std::move(stream_ptr))
84  , socket_(stream_ptr_->next_layer().socket())
85  , stream_(*stream_ptr_)
86  , strand_(socket_.get_executor())
87  , timer_(waitable_timer{socket_.get_executor()})
88  , remote_address_(slot->remote_endpoint())
89  , overlay_(overlay)
90  , inbound_(true)
91  , protocol_(protocol)
92  , tracking_(Tracking::unknown)
93  , trackingTime_(clock_type::now())
94  , publicKey_(publicKey)
95  , lastPingTime_(clock_type::now())
96  , creationTime_(clock_type::now())
97  , usage_(consumer)
99  , slot_(slot)
100  , request_(std::move(request))
101  , headers_(request_)
102  , compressionEnabled_(
103  headers_["X-Offer-Compression"] == "lz4" ? Compressed::On
104  : Compressed::Off)
105 {
106 }
107 
109 {
110  const bool inCluster{cluster()};
111 
116 
117  if (inCluster)
118  {
119  JLOG(journal_.warn()) << name() << " left cluster";
120  }
121 }
122 
123 // Helper function to check for valid uint256 values in protobuf buffers
124 static bool
126 {
127  return pBuffStr.size() == uint256::size();
128 }
129 
130 void
132 {
133  if (!strand_.running_in_this_thread())
134  return post(strand_, std::bind(&PeerImp::run, shared_from_this()));
135 
136  auto parseLedgerHash =
137  [](std::string const& value) -> boost::optional<uint256> {
138  if (uint256 ret; ret.parseHex(value))
139  return ret;
140 
141  if (auto const s = base64_decode(value); s.size() == uint256::size())
142  return uint256{s};
143 
144  return boost::none;
145  };
146 
147  boost::optional<uint256> closed;
148  boost::optional<uint256> previous;
149 
150  if (auto const iter = headers_.find("Closed-Ledger");
151  iter != headers_.end())
152  {
153  closed = parseLedgerHash(iter->value().to_string());
154 
155  if (!closed)
156  fail("Malformed handshake data (1)");
157  }
158 
159  if (auto const iter = headers_.find("Previous-Ledger");
160  iter != headers_.end())
161  {
162  previous = parseLedgerHash(iter->value().to_string());
163 
164  if (!previous)
165  fail("Malformed handshake data (2)");
166  }
167 
168  if (previous && !closed)
169  fail("Malformed handshake data (3)");
170 
171  {
173  if (closed)
174  closedLedgerHash_ = *closed;
175  if (previous)
176  previousLedgerHash_ = *previous;
177  }
178 
179  if (inbound_)
180  doAccept();
181  else
182  doProtocolStart();
183 
184  // Request shard info from peer
185  protocol::TMGetPeerShardInfo tmGPS;
186  tmGPS.set_hops(0);
187  send(std::make_shared<Message>(tmGPS, protocol::mtGET_PEER_SHARD_INFO));
188 
189  setTimer();
190 }
191 
192 void
194 {
195  if (!strand_.running_in_this_thread())
196  return post(strand_, std::bind(&PeerImp::stop, shared_from_this()));
197  if (socket_.is_open())
198  {
199  // The rationale for using different severity levels is that
200  // outbound connections are under our control and may be logged
201  // at a higher level, but inbound connections are more numerous and
202  // uncontrolled so to prevent log flooding the severity is reduced.
203  //
204  if (inbound_)
205  {
206  JLOG(journal_.debug()) << "Stop";
207  }
208  else
209  {
210  JLOG(journal_.info()) << "Stop";
211  }
212  }
213  close();
214 }
215 
216 //------------------------------------------------------------------------------
217 
218 void
220 {
221  if (!strand_.running_in_this_thread())
222  return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
223  if (gracefulClose_)
224  return;
225  if (detaching_)
226  return;
227 
228  auto validator = m->getValidatorKey();
229  if (validator && squelch_.isSquelched(*validator))
230  return;
231 
233  safe_cast<TrafficCount::category>(m->getCategory()),
234  false,
235  static_cast<int>(m->getBuffer(compressionEnabled_).size()));
236 
237  auto sendq_size = send_queue_.size();
238 
239  if (sendq_size < Tuning::targetSendQueue)
240  {
241  // To detect a peer that does not read from their
242  // side of the connection, we expect a peer to have
243  // a small senq periodically
244  large_sendq_ = 0;
245  }
246  else if (
248  (sendq_size % Tuning::sendQueueLogFreq) == 0)
249  {
250  std::string const n = name();
251  JLOG(journal_.debug()) << (n.empty() ? remote_address_.to_string() : n)
252  << " sendq: " << sendq_size;
253  }
254 
255  send_queue_.push(m);
256 
257  if (sendq_size != 0)
258  return;
259 
260  boost::asio::async_write(
261  stream_,
262  boost::asio::buffer(
263  send_queue_.front()->getBuffer(compressionEnabled_)),
264  bind_executor(
265  strand_,
266  std::bind(
269  std::placeholders::_1,
270  std::placeholders::_2)));
271 }
272 
273 void
275 {
276  if ((usage_.charge(fee) == Resource::drop) && usage_.disconnect() &&
277  strand_.running_in_this_thread())
278  {
279  // Sever the connection
281  fail("charge: Resources");
282  }
283 }
284 
285 //------------------------------------------------------------------------------
286 
287 bool
289 {
290  auto const iter = headers_.find("Crawl");
291  if (iter == headers_.end())
292  return false;
293  return boost::iequals(iter->value(), "public");
294 }
295 
296 bool
298 {
299  return static_cast<bool>(app_.cluster().member(publicKey_));
300 }
301 
304 {
305  if (inbound_)
306  return headers_["User-Agent"].to_string();
307  return headers_["Server"].to_string();
308 }
309 
312 {
314 
315  ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
316  ret[jss::address] = remote_address_.to_string();
317 
318  if (inbound_)
319  ret[jss::inbound] = true;
320 
321  if (cluster())
322  {
323  ret[jss::cluster] = true;
324 
325  if (auto const n = name(); !n.empty())
326  // Could move here if Json::Value supported moving from a string
327  ret[jss::name] = n;
328  }
329 
330  if (auto const d = domain(); !d.empty())
331  ret[jss::server_domain] = domain();
332 
333  if (auto const nid = headers_["Network-ID"].to_string(); !nid.empty())
334  ret[jss::network_id] = nid;
335 
336  ret[jss::load] = usage_.balance();
337 
338  if (auto const version = getVersion(); !version.empty())
339  ret[jss::version] = version;
340 
341  ret[jss::protocol] = to_string(protocol_);
342 
343  {
345  if (latency_)
346  ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
347  }
348 
349  ret[jss::uptime] = static_cast<Json::UInt>(
350  std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
351 
352  std::uint32_t minSeq, maxSeq;
353  ledgerRange(minSeq, maxSeq);
354 
355  if ((minSeq != 0) || (maxSeq != 0))
356  ret[jss::complete_ledgers] =
357  std::to_string(minSeq) + " - " + std::to_string(maxSeq);
358 
359  switch (tracking_.load())
360  {
361  case Tracking::diverged:
362  ret[jss::track] = "diverged";
363  break;
364 
365  case Tracking::unknown:
366  ret[jss::track] = "unknown";
367  break;
368 
369  case Tracking::converged:
370  // Nothing to do here
371  break;
372  }
373 
374  uint256 closedLedgerHash;
375  protocol::TMStatusChange last_status;
376  {
378  closedLedgerHash = closedLedgerHash_;
379  last_status = last_status_;
380  }
381 
382  if (closedLedgerHash != beast::zero)
383  ret[jss::ledger] = to_string(closedLedgerHash);
384 
385  if (last_status.has_newstatus())
386  {
387  switch (last_status.newstatus())
388  {
389  case protocol::nsCONNECTING:
390  ret[jss::status] = "connecting";
391  break;
392 
393  case protocol::nsCONNECTED:
394  ret[jss::status] = "connected";
395  break;
396 
397  case protocol::nsMONITORING:
398  ret[jss::status] = "monitoring";
399  break;
400 
401  case protocol::nsVALIDATING:
402  ret[jss::status] = "validating";
403  break;
404 
405  case protocol::nsSHUTTING:
406  ret[jss::status] = "shutting";
407  break;
408 
409  default:
410  JLOG(p_journal_.warn())
411  << "Unknown status: " << last_status.newstatus();
412  }
413  }
414 
415  ret[jss::metrics] = Json::Value(Json::objectValue);
416  ret[jss::metrics][jss::total_bytes_recv] =
417  std::to_string(metrics_.recv.total_bytes());
418  ret[jss::metrics][jss::total_bytes_sent] =
419  std::to_string(metrics_.sent.total_bytes());
420  ret[jss::metrics][jss::avg_bps_recv] =
421  std::to_string(metrics_.recv.average_bytes());
422  ret[jss::metrics][jss::avg_bps_sent] =
423  std::to_string(metrics_.sent.average_bytes());
424 
425  return ret;
426 }
427 
428 bool
430 {
431  switch (f)
432  {
434  return protocol_ >= make_protocol(2, 1);
435  }
436  return false;
437 }
438 
439 //------------------------------------------------------------------------------
440 
441 bool
442 PeerImp::hasLedger(uint256 const& hash, std::uint32_t seq) const
443 {
444  {
446  if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
448  return true;
449  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
450  recentLedgers_.end())
451  return true;
452  }
453 
454  return seq >= app_.getNodeStore().earliestLedgerSeq() &&
456 }
457 
458 void
460 {
462 
463  minSeq = minLedger_;
464  maxSeq = maxLedger_;
465 }
466 
467 bool
469 {
471  auto const it{shardInfo_.find(publicKey_)};
472  if (it != shardInfo_.end())
473  return boost::icl::contains(it->second.shardIndexes, shardIndex);
474  return false;
475 }
476 
477 bool
478 PeerImp::hasTxSet(uint256 const& hash) const
479 {
481  return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
482  recentTxSets_.end();
483 }
484 
485 void
487 {
488  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
489  // guarded by recentLock_.
493 }
494 
495 bool
497 {
499  return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
500  (uMax <= maxLedger_);
501 }
502 
503 //------------------------------------------------------------------------------
504 
505 void
507 {
508  assert(strand_.running_in_this_thread());
509  if (socket_.is_open())
510  {
511  detaching_ = true; // DEPRECATED
512  error_code ec;
513  timer_.cancel(ec);
514  socket_.close(ec);
516  if (inbound_)
517  {
518  JLOG(journal_.debug()) << "Closed";
519  }
520  else
521  {
522  JLOG(journal_.info()) << "Closed";
523  }
524  }
525 }
526 
527 void
529 {
530  if (!strand_.running_in_this_thread())
531  return post(
532  strand_,
533  std::bind(
534  (void (Peer::*)(std::string const&)) & PeerImp::fail,
536  reason));
538  {
539  std::string const n = name();
540  JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
541  << " failed: " << reason;
542  }
543  close();
544 }
545 
546 void
548 {
549  assert(strand_.running_in_this_thread());
550  if (socket_.is_open())
551  {
552  JLOG(journal_.warn())
553  << name << " from " << toBase58(TokenType::NodePublic, publicKey_)
554  << " at " << remote_address_.to_string() << ": " << ec.message();
555  }
556  close();
557 }
558 
559 boost::optional<RangeSet<std::uint32_t>>
561 {
563  auto it{shardInfo_.find(publicKey_)};
564  if (it != shardInfo_.end())
565  return it->second.shardIndexes;
566  return boost::none;
567 }
568 
569 boost::optional<hash_map<PublicKey, PeerImp::ShardInfo>>
571 {
573  if (!shardInfo_.empty())
574  return shardInfo_;
575  return boost::none;
576 }
577 
578 void
580 {
581  assert(strand_.running_in_this_thread());
582  assert(socket_.is_open());
583  assert(!gracefulClose_);
584  gracefulClose_ = true;
585 #if 0
586  // Flush messages
587  while(send_queue_.size() > 1)
588  send_queue_.pop_back();
589 #endif
590  if (send_queue_.size() > 0)
591  return;
592  setTimer();
593  stream_.async_shutdown(bind_executor(
594  strand_,
595  std::bind(
596  &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
597 }
598 
599 void
601 {
602  error_code ec;
603  timer_.expires_from_now(peerTimerInterval, ec);
604 
605  if (ec)
606  {
607  JLOG(journal_.error()) << "setTimer: " << ec.message();
608  return;
609  }
610  timer_.async_wait(bind_executor(
611  strand_,
612  std::bind(
613  &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
614 }
615 
616 // convenience for ignoring the error code
617 void
619 {
620  error_code ec;
621  timer_.cancel(ec);
622 }
623 
624 //------------------------------------------------------------------------------
625 
628 {
630  ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
631  return ss.str();
632 }
633 
634 void
636 {
637  if (!socket_.is_open())
638  return;
639 
640  if (ec == boost::asio::error::operation_aborted)
641  return;
642 
643  if (ec)
644  {
645  // This should never happen
646  JLOG(journal_.error()) << "onTimer: " << ec.message();
647  return close();
648  }
649 
651  {
652  fail("Large send queue");
653  return;
654  }
655 
656  if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
657  {
658  clock_type::duration duration;
659 
660  {
662  duration = clock_type::now() - trackingTime_;
663  }
664 
665  if ((t == Tracking::diverged &&
666  (duration > app_.config().MAX_DIVERGED_TIME)) ||
667  (t == Tracking::unknown &&
668  (duration > app_.config().MAX_UNKNOWN_TIME)))
669  {
671  fail("Not useful");
672  return;
673  }
674  }
675 
676  // Already waiting for PONG
677  if (lastPingSeq_)
678  {
679  fail("Ping Timeout");
680  return;
681  }
682 
684  lastPingSeq_ = rand_int<std::uint32_t>();
685 
686  protocol::TMPing message;
687  message.set_type(protocol::TMPing::ptPING);
688  message.set_seq(*lastPingSeq_);
689 
690  send(std::make_shared<Message>(message, protocol::mtPING));
691 
692  setTimer();
693 }
694 
695 void
697 {
698  cancelTimer();
699  // If we don't get eof then something went wrong
700  if (!ec)
701  {
702  JLOG(journal_.error()) << "onShutdown: expected error condition";
703  return close();
704  }
705  if (ec != boost::asio::error::eof)
706  return fail("onShutdown", ec);
707  close();
708 }
709 
710 //------------------------------------------------------------------------------
711 void
713 {
714  assert(read_buffer_.size() == 0);
715 
716  JLOG(journal_.debug()) << "doAccept: " << remote_address_;
717 
718  auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
719 
720  // This shouldn't fail since we already computed
721  // the shared value successfully in OverlayImpl
722  if (!sharedValue)
723  return fail("makeSharedValue: Unexpected failure");
724 
725  JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
726  JLOG(journal_.info()) << "Public Key: "
728 
729  if (auto member = app_.cluster().member(publicKey_))
730  {
731  {
733  name_ = *member;
734  }
735  JLOG(journal_.info()) << "Cluster name: " << *member;
736  }
737 
739 
740  // XXX Set timer: connection is in grace period to be useful.
741  // XXX Set timer: connection idle (idle may vary depending on connection
742  // type.)
743 
744  auto write_buffer = [this, sharedValue]() {
745  auto buf = std::make_shared<boost::beast::multi_buffer>();
746 
747  http_response_type resp;
748  resp.result(boost::beast::http::status::switching_protocols);
749  resp.version(request_.version());
750  resp.insert("Connection", "Upgrade");
751  resp.insert("Upgrade", to_string(protocol_));
752  resp.insert("Connect-As", "Peer");
753  resp.insert("Server", BuildInfo::getFullVersionString());
754  resp.insert(
755  "Crawl",
756  overlay_.peerFinder().config().peerPrivate ? "private" : "public");
757 
758  if (request_["X-Offer-Compression"] == "lz4" &&
760  resp.insert("X-Offer-Compression", "lz4");
761 
763  resp,
764  *sharedValue,
768  app_);
769 
770  boost::beast::ostream(*buf) << resp;
771 
772  return buf;
773  }();
774 
775  // Write the whole buffer and only start protocol when that's done.
776  boost::asio::async_write(
777  stream_,
778  write_buffer->data(),
779  boost::asio::transfer_all(),
780  [this, write_buffer, self = shared_from_this()](
781  error_code ec, std::size_t bytes_transferred) {
782  if (!socket_.is_open())
783  return;
784  if (ec == boost::asio::error::operation_aborted)
785  return;
786  if (ec)
787  return fail("onWriteResponse", ec);
788  if (write_buffer->size() == bytes_transferred)
789  return doProtocolStart();
790  return fail("Failed to write header");
791  });
792 }
793 
796 {
797  std::shared_lock read_lock{nameMutex_};
798  return name_;
799 }
800 
803 {
804  return headers_["Server-Domain"].to_string();
805 }
806 
807 //------------------------------------------------------------------------------
808 
809 // Protocol logic
810 
811 void
813 {
815 
816  // Send all the validator lists that have been loaded
818  {
820  std::string const& blob,
821  std::string const& signature,
822  std::uint32_t version,
823  PublicKey const& pubKey,
824  std::size_t sequence,
825  uint256 const& hash) {
826  protocol::TMValidatorList vl;
827 
828  vl.set_manifest(manifest);
829  vl.set_blob(blob);
830  vl.set_signature(signature);
831  vl.set_version(version);
832 
833  JLOG(p_journal_.debug())
834  << "Sending validator list for " << strHex(pubKey)
835  << " with sequence " << sequence << " to "
836  << remote_address_.to_string() << " (" << id_ << ")";
837  send(std::make_shared<Message>(vl, protocol::mtVALIDATORLIST));
838  // Don't send it next time.
840  setPublisherListSequence(pubKey, sequence);
841  });
842  }
843 
844  if (auto m = overlay_.getManifestsMessage())
845  send(m);
846 }
847 
848 // Called repeatedly with protocol message data
849 void
851 {
852  if (!socket_.is_open())
853  return;
854  if (ec == boost::asio::error::operation_aborted)
855  return;
856  if (ec == boost::asio::error::eof)
857  {
858  JLOG(journal_.info()) << "EOF";
859  return gracefulClose();
860  }
861  if (ec)
862  return fail("onReadMessage", ec);
863  if (auto stream = journal_.trace())
864  {
865  if (bytes_transferred > 0)
866  stream << "onReadMessage: " << bytes_transferred << " bytes";
867  else
868  stream << "onReadMessage";
869  }
870 
871  metrics_.recv.add_message(bytes_transferred);
872 
873  read_buffer_.commit(bytes_transferred);
874 
875  auto hint = Tuning::readBufferBytes;
876 
877  while (read_buffer_.size() > 0)
878  {
879  std::size_t bytes_consumed;
880  std::tie(bytes_consumed, ec) =
881  invokeProtocolMessage(read_buffer_.data(), *this, hint);
882  if (ec)
883  return fail("onReadMessage", ec);
884  if (!socket_.is_open())
885  return;
886  if (gracefulClose_)
887  return;
888  if (bytes_consumed == 0)
889  break;
890  read_buffer_.consume(bytes_consumed);
891  }
892 
893  // Timeout on writes only
894  stream_.async_read_some(
896  bind_executor(
897  strand_,
898  std::bind(
901  std::placeholders::_1,
902  std::placeholders::_2)));
903 }
904 
905 void
907 {
908  if (!socket_.is_open())
909  return;
910  if (ec == boost::asio::error::operation_aborted)
911  return;
912  if (ec)
913  return fail("onWriteMessage", ec);
914  if (auto stream = journal_.trace())
915  {
916  if (bytes_transferred > 0)
917  stream << "onWriteMessage: " << bytes_transferred << " bytes";
918  else
919  stream << "onWriteMessage";
920  }
921 
922  metrics_.sent.add_message(bytes_transferred);
923 
924  assert(!send_queue_.empty());
925  send_queue_.pop();
926  if (!send_queue_.empty())
927  {
928  // Timeout on writes only
929  return boost::asio::async_write(
930  stream_,
931  boost::asio::buffer(
932  send_queue_.front()->getBuffer(compressionEnabled_)),
933  bind_executor(
934  strand_,
935  std::bind(
938  std::placeholders::_1,
939  std::placeholders::_2)));
940  }
941 
942  if (gracefulClose_)
943  {
944  return stream_.async_shutdown(bind_executor(
945  strand_,
946  std::bind(
949  std::placeholders::_1)));
950  }
951 }
952 
953 //------------------------------------------------------------------------------
954 //
955 // ProtocolHandler
956 //
957 //------------------------------------------------------------------------------
958 
959 void
961 {
962  // TODO
963 }
964 
965 void
967  std::uint16_t type,
969  std::size_t size)
970 {
971  load_event_ =
975  TrafficCount::categorize(*m, type, true), true, static_cast<int>(size));
976 }
977 
978 void
982 {
983  load_event_.reset();
984  charge(fee_);
985 }
986 
987 void
989 {
990  // VFALCO What's the right job type?
991  auto that = shared_from_this();
993  jtVALIDATION_ut, "receiveManifests", [this, that, m](Job&) {
994  overlay_.onManifests(m, that);
995  });
996 }
997 
998 void
1000 {
1001  if (m->type() == protocol::TMPing::ptPING)
1002  {
1003  // We have received a ping request, reply with a pong
1005  m->set_type(protocol::TMPing::ptPONG);
1006  send(std::make_shared<Message>(*m, protocol::mtPING));
1007  return;
1008  }
1009 
1010  if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1011  {
1012  // Only reset the ping sequence if we actually received a
1013  // PONG with the correct cookie. That way, any peers which
1014  // respond with incorrect cookies will eventually time out.
1015  if (m->seq() == lastPingSeq_)
1016  {
1017  lastPingSeq_.reset();
1018 
1019  // Update latency estimate
1020  auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1022 
1024 
1025  if (latency_)
1026  latency_ = (*latency_ * 7 + rtt) / 8;
1027  else
1028  latency_ = rtt;
1029  }
1030 
1031  return;
1032  }
1033 }
1034 
1035 void
1037 {
1038  // VFALCO NOTE I think we should drop the peer immediately
1039  if (!cluster())
1040  {
1042  return;
1043  }
1044 
1045  for (int i = 0; i < m->clusternodes().size(); ++i)
1046  {
1047  protocol::TMClusterNode const& node = m->clusternodes(i);
1048 
1049  std::string name;
1050  if (node.has_nodename())
1051  name = node.nodename();
1052 
1053  auto const publicKey =
1054  parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1055 
1056  // NIKB NOTE We should drop the peer immediately if
1057  // they send us a public key we can't parse
1058  if (publicKey)
1059  {
1060  auto const reportTime =
1061  NetClock::time_point{NetClock::duration{node.reporttime()}};
1062 
1063  app_.cluster().update(
1064  *publicKey, name, node.nodeload(), reportTime);
1065  }
1066  }
1067 
1068  int loadSources = m->loadsources().size();
1069  if (loadSources != 0)
1070  {
1071  Resource::Gossip gossip;
1072  gossip.items.reserve(loadSources);
1073  for (int i = 0; i < m->loadsources().size(); ++i)
1074  {
1075  protocol::TMLoadSource const& node = m->loadsources(i);
1077  item.address = beast::IP::Endpoint::from_string(node.name());
1078  item.balance = node.cost();
1079  if (item.address != beast::IP::Endpoint())
1080  gossip.items.push_back(item);
1081  }
1083  }
1084 
1085  // Calculate the cluster fee:
1086  auto const thresh = app_.timeKeeper().now() - 90s;
1087  std::uint32_t clusterFee = 0;
1088 
1090  fees.reserve(app_.cluster().size());
1091 
1092  app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1093  if (status.getReportTime() >= thresh)
1094  fees.push_back(status.getLoadFee());
1095  });
1096 
1097  if (!fees.empty())
1098  {
1099  auto const index = fees.size() / 2;
1100  std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1101  clusterFee = fees[index];
1102  }
1103 
1104  app_.getFeeTrack().setClusterFee(clusterFee);
1105 }
1106 
1107 void
1109 {
1110  // DEPRECATED
1111 }
1112 
1113 void
1115 {
1116  // DEPRECATED
1117 }
1118 
1119 void
1121 {
1122  auto badData = [&](std::string msg) {
1124  JLOG(p_journal_.warn()) << msg;
1125  };
1126 
1127  if (m->hops() > csHopLimit)
1128  return badData("Invalid hops: " + std::to_string(m->hops()));
1129  if (m->peerchain_size() > csHopLimit)
1130  return badData("Invalid peer chain");
1131 
1132  // Reply with shard info we may have
1133  if (auto shardStore = app_.getShardStore())
1134  {
1136  auto shards{shardStore->getCompleteShards()};
1137  if (!shards.empty())
1138  {
1139  protocol::TMPeerShardInfo reply;
1140  reply.set_shardindexes(shards);
1141 
1142  if (m->has_lastlink())
1143  reply.set_lastlink(true);
1144 
1145  if (m->peerchain_size() > 0)
1146  {
1147  for (int i = 0; i < m->peerchain_size(); ++i)
1148  {
1149  if (!publicKeyType(makeSlice(m->peerchain(i).nodepubkey())))
1150  return badData("Invalid peer chain public key");
1151  }
1152 
1153  *reply.mutable_peerchain() = m->peerchain();
1154  }
1155 
1156  send(std::make_shared<Message>(reply, protocol::mtPEER_SHARD_INFO));
1157 
1158  JLOG(p_journal_.trace()) << "Sent shard indexes " << shards;
1159  }
1160  }
1161 
1162  // Relay request to peers
1163  if (m->hops() > 0)
1164  {
1166 
1167  m->set_hops(m->hops() - 1);
1168  if (m->hops() == 0)
1169  m->set_lastlink(true);
1170 
1171  m->add_peerchain()->set_nodepubkey(
1173 
1175  std::make_shared<Message>(*m, protocol::mtGET_PEER_SHARD_INFO),
1176  match_peer(this)));
1177  }
1178 }
1179 
1180 void
1182 {
1183  auto badData = [&](std::string msg) {
1185  JLOG(p_journal_.warn()) << msg;
1186  };
1187 
1188  if (m->shardindexes().empty())
1189  return badData("Missing shard indexes");
1190  if (m->peerchain_size() > csHopLimit)
1191  return badData("Invalid peer chain");
1192  if (m->has_nodepubkey() && !publicKeyType(makeSlice(m->nodepubkey())))
1193  return badData("Invalid public key");
1194 
1195  // Check if the message should be forwarded to another peer
1196  if (m->peerchain_size() > 0)
1197  {
1198  // Get the Public key of the last link in the peer chain
1199  auto const s{
1200  makeSlice(m->peerchain(m->peerchain_size() - 1).nodepubkey())};
1201  if (!publicKeyType(s))
1202  return badData("Invalid pubKey");
1203  PublicKey peerPubKey(s);
1204 
1205  if (auto peer = overlay_.findPeerByPublicKey(peerPubKey))
1206  {
1207  if (!m->has_nodepubkey())
1208  m->set_nodepubkey(publicKey_.data(), publicKey_.size());
1209 
1210  if (!m->has_endpoint())
1211  {
1212  // Check if peer will share IP publicly
1213  if (crawl())
1214  m->set_endpoint(remote_address_.address().to_string());
1215  else
1216  m->set_endpoint("0");
1217  }
1218 
1219  m->mutable_peerchain()->RemoveLast();
1220  peer->send(
1221  std::make_shared<Message>(*m, protocol::mtPEER_SHARD_INFO));
1222 
1223  JLOG(p_journal_.trace())
1224  << "Relayed TMPeerShardInfo to peer with IP "
1225  << remote_address_.address().to_string();
1226  }
1227  else
1228  {
1229  // Peer is no longer available so the relay ends
1231  JLOG(p_journal_.info()) << "Unable to route shard info";
1232  }
1233  return;
1234  }
1235 
1236  // Parse the shard indexes received in the shard info
1237  RangeSet<std::uint32_t> shardIndexes;
1238  {
1239  if (!from_string(shardIndexes, m->shardindexes()))
1240  return badData("Invalid shard indexes");
1241 
1242  std::uint32_t earliestShard;
1243  boost::optional<std::uint32_t> latestShard;
1244  {
1245  auto const curLedgerSeq{
1247  if (auto shardStore = app_.getShardStore())
1248  {
1249  earliestShard = shardStore->earliestShardIndex();
1250  if (curLedgerSeq >= shardStore->earliestLedgerSeq())
1251  latestShard = shardStore->seqToShardIndex(curLedgerSeq);
1252  }
1253  else
1254  {
1255  auto const earliestLedgerSeq{
1257  earliestShard = NodeStore::seqToShardIndex(earliestLedgerSeq);
1258  if (curLedgerSeq >= earliestLedgerSeq)
1259  latestShard = NodeStore::seqToShardIndex(curLedgerSeq);
1260  }
1261  }
1262 
1263  if (boost::icl::first(shardIndexes) < earliestShard ||
1264  (latestShard && boost::icl::last(shardIndexes) > latestShard))
1265  {
1266  return badData("Invalid shard indexes");
1267  }
1268  }
1269 
1270  // Get the IP of the node reporting the shard info
1271  beast::IP::Endpoint endpoint;
1272  if (m->has_endpoint())
1273  {
1274  if (m->endpoint() != "0")
1275  {
1276  auto result =
1278  if (!result)
1279  return badData("Invalid incoming endpoint: " + m->endpoint());
1280  endpoint = std::move(*result);
1281  }
1282  }
1283  else if (crawl()) // Check if peer will share IP publicly
1284  {
1285  endpoint = remote_address_;
1286  }
1287 
1288  // Get the Public key of the node reporting the shard info
1289  PublicKey publicKey;
1290  if (m->has_nodepubkey())
1291  publicKey = PublicKey(makeSlice(m->nodepubkey()));
1292  else
1293  publicKey = publicKey_;
1294 
1295  {
1297  auto it{shardInfo_.find(publicKey)};
1298  if (it != shardInfo_.end())
1299  {
1300  // Update the IP address for the node
1301  it->second.endpoint = std::move(endpoint);
1302 
1303  // Join the shard index range set
1304  it->second.shardIndexes += shardIndexes;
1305  }
1306  else
1307  {
1308  // Add a new node
1309  ShardInfo shardInfo;
1310  shardInfo.endpoint = std::move(endpoint);
1311  shardInfo.shardIndexes = std::move(shardIndexes);
1312  shardInfo_.emplace(publicKey, std::move(shardInfo));
1313  }
1314  }
1315 
1316  JLOG(p_journal_.trace())
1317  << "Consumed TMPeerShardInfo originating from public key "
1318  << toBase58(TokenType::NodePublic, publicKey) << " shard indexes "
1319  << m->shardindexes();
1320 
1321  if (m->has_lastlink())
1323 }
1324 
1325 void
1327 {
1328  // Don't allow endpoints from peers that are not known tracking or are
1329  // not using a version of the message that we support:
1330  if (tracking_.load() != Tracking::converged || m->version() != 2)
1331  return;
1332 
1334  endpoints.reserve(m->endpoints_v2().size());
1335 
1336  for (auto const& tm : m->endpoints_v2())
1337  {
1338  auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1339  if (!result)
1340  {
1341  JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1342  << tm.endpoint() << "}";
1343  continue;
1344  }
1345 
1346  // If hops == 0, this Endpoint describes the peer we are connected
1347  // to -- in that case, we take the remote address seen on the
1348  // socket and store that in the IP::Endpoint. If this is the first
1349  // time, then we'll verify that their listener can receive incoming
1350  // by performing a connectivity test. if hops > 0, then we just
1351  // take the address/port we were given
1352 
1353  endpoints.emplace_back(
1354  tm.hops() > 0 ? *result : remote_address_.at_port(result->port()),
1355  tm.hops());
1356  }
1357 
1358  if (!endpoints.empty())
1359  overlay_.peerFinder().on_endpoints(slot_, endpoints);
1360 }
1361 
1362 void
1364 {
1366  return;
1367 
1369  {
1370  // If we've never been in synch, there's nothing we can do
1371  // with a transaction
1372  JLOG(p_journal_.debug()) << "Ignoring incoming transaction: "
1373  << "Need network ledger";
1374  return;
1375  }
1376 
1377  SerialIter sit(makeSlice(m->rawtransaction()));
1378 
1379  try
1380  {
1381  auto stx = std::make_shared<STTx const>(sit);
1382  uint256 txID = stx->getTransactionID();
1383 
1384  int flags;
1385  constexpr std::chrono::seconds tx_interval = 10s;
1386 
1387  if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1388  {
1389  // we have seen this transaction recently
1390  if (flags & SF_BAD)
1391  {
1393  JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1394  }
1395 
1396  return;
1397  }
1398 
1399  JLOG(p_journal_.debug()) << "Got tx " << txID;
1400 
1401  bool checkSignature = true;
1402  if (cluster())
1403  {
1404  if (!m->has_deferred() || !m->deferred())
1405  {
1406  // Skip local checks if a server we trust
1407  // put the transaction in its open ledger
1408  flags |= SF_TRUSTED;
1409  }
1410 
1412  {
1413  // For now, be paranoid and have each validator
1414  // check each transaction, regardless of source
1415  checkSignature = false;
1416  }
1417  }
1418 
1421  {
1423  JLOG(p_journal_.info()) << "Transaction queue is full";
1424  }
1425  else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
1426  {
1427  JLOG(p_journal_.trace())
1428  << "No new transactions until synchronized";
1429  }
1430  else
1431  {
1433  jtTRANSACTION,
1434  "recvTransaction->checkTransaction",
1436  flags,
1437  checkSignature,
1438  stx](Job&) {
1439  if (auto peer = weak.lock())
1440  peer->checkTransaction(flags, checkSignature, stx);
1441  });
1442  }
1443  }
1444  catch (std::exception const&)
1445  {
1446  JLOG(p_journal_.warn())
1447  << "Transaction invalid: " << strHex(m->rawtransaction());
1448  }
1449 }
1450 
1451 void
1453 {
1456  app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m](Job&) {
1457  if (auto peer = weak.lock())
1458  peer->getLedger(m);
1459  });
1460 }
1461 
1462 void
1464 {
1465  protocol::TMLedgerData& packet = *m;
1466 
1467  if (m->nodes().size() <= 0)
1468  {
1469  JLOG(p_journal_.warn()) << "Ledger/TXset data with no nodes";
1470  return;
1471  }
1472 
1473  if (m->has_requestcookie())
1474  {
1475  std::shared_ptr<Peer> target =
1476  overlay_.findPeerByShortID(m->requestcookie());
1477  if (target)
1478  {
1479  m->clear_requestcookie();
1480  target->send(
1481  std::make_shared<Message>(packet, protocol::mtLEDGER_DATA));
1482  }
1483  else
1484  {
1485  JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1487  }
1488  return;
1489  }
1490 
1491  if (!stringIsUint256Sized(m->ledgerhash()))
1492  {
1493  JLOG(p_journal_.warn()) << "TX candidate reply with invalid hash size";
1495  return;
1496  }
1497 
1498  uint256 const hash{m->ledgerhash()};
1499 
1500  if (m->type() == protocol::liTS_CANDIDATE)
1501  {
1502  // got data for a candidate transaction set
1505  jtTXN_DATA, "recvPeerData", [weak, hash, m](Job&) {
1506  if (auto peer = weak.lock())
1507  peer->app_.getInboundTransactions().gotData(hash, peer, m);
1508  });
1509  return;
1510  }
1511 
1513  {
1514  JLOG(p_journal_.trace()) << "Got data for unwanted ledger";
1516  }
1517 }
1518 
1519 void
1521 {
1522  protocol::TMProposeSet& set = *m;
1523 
1524  auto const sig = makeSlice(set.signature());
1525 
1526  // Preliminary check for the validity of the signature: A DER encoded
1527  // signature can't be longer than 72 bytes.
1528  if ((boost::algorithm::clamp(sig.size(), 64, 72) != sig.size()) ||
1529  (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1530  {
1531  JLOG(p_journal_.warn()) << "Proposal: malformed";
1533  return;
1534  }
1535 
1536  if (!stringIsUint256Sized(set.currenttxhash()) ||
1537  !stringIsUint256Sized(set.previousledger()))
1538  {
1539  JLOG(p_journal_.warn()) << "Proposal: malformed";
1541  return;
1542  }
1543 
1544  uint256 const proposeHash{set.currenttxhash()};
1545  uint256 const prevLedger{set.previousledger()};
1546 
1547  PublicKey const publicKey{makeSlice(set.nodepubkey())};
1548  NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1549 
1550  uint256 const suppression = proposalUniqueId(
1551  proposeHash,
1552  prevLedger,
1553  set.proposeseq(),
1554  closeTime,
1555  publicKey.slice(),
1556  sig);
1557 
1558  if (auto [added, relayed] =
1560  !added)
1561  {
1562  // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1563  // receives within IDLED seconds since the message has been relayed.
1564  // Wait WAIT_ON_BOOTUP time to let the server establish connections to
1565  // peers.
1566  if (app_.config().REDUCE_RELAY_ENABLE && relayed &&
1567  (stopwatch().now() - *relayed) < squelch::IDLED &&
1568  squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
1571  suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1572  JLOG(p_journal_.trace()) << "Proposal: duplicate";
1573  return;
1574  }
1575 
1576  auto const isTrusted = app_.validators().trusted(publicKey);
1577 
1578  if (!isTrusted)
1579  {
1581  {
1582  JLOG(p_journal_.debug())
1583  << "Proposal: Dropping untrusted (peer divergence)";
1584  return;
1585  }
1586 
1587  if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1588  {
1589  JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1590  return;
1591  }
1592  }
1593 
1594  JLOG(p_journal_.trace())
1595  << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1596 
1597  auto proposal = RCLCxPeerPos(
1598  publicKey,
1599  sig,
1600  suppression,
1602  prevLedger,
1603  set.proposeseq(),
1604  proposeHash,
1605  closeTime,
1608 
1611  isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1612  "recvPropose->checkPropose",
1613  [weak, m, proposal](Job& job) {
1614  if (auto peer = weak.lock())
1615  peer->checkPropose(job, m, proposal);
1616  });
1617 }
1618 
1619 void
1621 {
1622  JLOG(p_journal_.trace()) << "Status: Change";
1623 
1624  if (!m->has_networktime())
1625  m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1626 
1627  {
1629  if (!last_status_.has_newstatus() || m->has_newstatus())
1630  last_status_ = *m;
1631  else
1632  {
1633  // preserve old status
1634  protocol::NodeStatus status = last_status_.newstatus();
1635  last_status_ = *m;
1636  m->set_newstatus(status);
1637  }
1638  }
1639 
1640  if (m->newevent() == protocol::neLOST_SYNC)
1641  {
1642  bool outOfSync{false};
1643  {
1644  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1645  // guarded by recentLock_.
1647  if (!closedLedgerHash_.isZero())
1648  {
1649  outOfSync = true;
1651  }
1653  }
1654  if (outOfSync)
1655  {
1656  JLOG(p_journal_.debug()) << "Status: Out of sync";
1657  }
1658  return;
1659  }
1660 
1661  {
1662  uint256 closedLedgerHash{};
1663  bool const peerChangedLedgers{
1664  m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1665 
1666  {
1667  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1668  // guarded by recentLock_.
1670  if (peerChangedLedgers)
1671  {
1672  closedLedgerHash_ = m->ledgerhash();
1673  closedLedgerHash = closedLedgerHash_;
1674  addLedger(closedLedgerHash, sl);
1675  }
1676  else
1677  {
1679  }
1680 
1681  if (m->has_ledgerhashprevious() &&
1682  stringIsUint256Sized(m->ledgerhashprevious()))
1683  {
1684  previousLedgerHash_ = m->ledgerhashprevious();
1686  }
1687  else
1688  {
1690  }
1691  }
1692  if (peerChangedLedgers)
1693  {
1694  JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1695  }
1696  else
1697  {
1698  JLOG(p_journal_.debug()) << "Status: No ledger";
1699  }
1700  }
1701 
1702  if (m->has_firstseq() && m->has_lastseq())
1703  {
1705 
1706  minLedger_ = m->firstseq();
1707  maxLedger_ = m->lastseq();
1708 
1709  if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1710  minLedger_ = maxLedger_ = 0;
1711  }
1712 
1713  if (m->has_ledgerseq() &&
1715  {
1716  checkTracking(
1717  m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1718  }
1719 
1720  app_.getOPs().pubPeerStatus([=]() -> Json::Value {
1722 
1723  if (m->has_newstatus())
1724  {
1725  switch (m->newstatus())
1726  {
1727  case protocol::nsCONNECTING:
1728  j[jss::status] = "CONNECTING";
1729  break;
1730  case protocol::nsCONNECTED:
1731  j[jss::status] = "CONNECTED";
1732  break;
1733  case protocol::nsMONITORING:
1734  j[jss::status] = "MONITORING";
1735  break;
1736  case protocol::nsVALIDATING:
1737  j[jss::status] = "VALIDATING";
1738  break;
1739  case protocol::nsSHUTTING:
1740  j[jss::status] = "SHUTTING";
1741  break;
1742  }
1743  }
1744 
1745  if (m->has_newevent())
1746  {
1747  switch (m->newevent())
1748  {
1749  case protocol::neCLOSING_LEDGER:
1750  j[jss::action] = "CLOSING_LEDGER";
1751  break;
1752  case protocol::neACCEPTED_LEDGER:
1753  j[jss::action] = "ACCEPTED_LEDGER";
1754  break;
1755  case protocol::neSWITCHED_LEDGER:
1756  j[jss::action] = "SWITCHED_LEDGER";
1757  break;
1758  case protocol::neLOST_SYNC:
1759  j[jss::action] = "LOST_SYNC";
1760  break;
1761  }
1762  }
1763 
1764  if (m->has_ledgerseq())
1765  {
1766  j[jss::ledger_index] = m->ledgerseq();
1767  }
1768 
1769  if (m->has_ledgerhash())
1770  {
1771  uint256 closedLedgerHash{};
1772  {
1773  std::lock_guard sl(recentLock_);
1774  closedLedgerHash = closedLedgerHash_;
1775  }
1776  j[jss::ledger_hash] = to_string(closedLedgerHash);
1777  }
1778 
1779  if (m->has_networktime())
1780  {
1781  j[jss::date] = Json::UInt(m->networktime());
1782  }
1783 
1784  if (m->has_firstseq() && m->has_lastseq())
1785  {
1786  j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1787  j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1788  }
1789 
1790  return j;
1791  });
1792 }
1793 
1794 void
1795 PeerImp::checkTracking(std::uint32_t validationSeq)
1796 {
1797  std::uint32_t serverSeq;
1798  {
1799  // Extract the sequence number of the highest
1800  // ledger this peer has
1801  std::lock_guard sl(recentLock_);
1802 
1803  serverSeq = maxLedger_;
1804  }
1805  if (serverSeq != 0)
1806  {
1807  // Compare the peer's ledger sequence to the
1808  // sequence of a recently-validated ledger
1809  checkTracking(serverSeq, validationSeq);
1810  }
1811 }
1812 
1813 void
1814 PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1815 {
1816  int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1817 
1818  if (diff < Tuning::convergedLedgerLimit)
1819  {
1820  // The peer's ledger sequence is close to the validation's
1821  tracking_ = Tracking::converged;
1822  }
1823 
1824  if ((diff > Tuning::divergedLedgerLimit) &&
1825  (tracking_.load() != Tracking::diverged))
1826  {
1827  // The peer's ledger sequence is way off the validation's
1828  std::lock_guard sl(recentLock_);
1829 
1830  tracking_ = Tracking::diverged;
1831  trackingTime_ = clock_type::now();
1832  }
1833 }
1834 
1835 void
1837 {
1838  if (!stringIsUint256Sized(m->hash()))
1839  {
1840  fee_ = Resource::feeInvalidRequest;
1841  return;
1842  }
1843 
1844  uint256 const hash{m->hash()};
1845 
1846  if (m->status() == protocol::tsHAVE)
1847  {
1848  std::lock_guard sl(recentLock_);
1849 
1850  if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
1851  recentTxSets_.end())
1852  {
1853  fee_ = Resource::feeUnwantedData;
1854  return;
1855  }
1856 
1857  recentTxSets_.push_back(hash);
1858  }
1859 }
1860 
1861 void
1863 {
1864  try
1865  {
1866  if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
1867  {
1868  JLOG(p_journal_.debug())
1869  << "ValidatorList: received validator list from peer using "
1870  << "protocol version " << to_string(protocol_)
1871  << " which shouldn't support this feature.";
1872  fee_ = Resource::feeUnwantedData;
1873  return;
1874  }
1875  auto const& manifest = m->manifest();
1876  auto const& blob = m->blob();
1877  auto const& signature = m->signature();
1878  auto const version = m->version();
1879  auto const hash = sha512Half(manifest, blob, signature, version);
1880 
1881  JLOG(p_journal_.debug())
1882  << "Received validator list from " << remote_address_.to_string()
1883  << " (" << id_ << ")";
1884 
1885  if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
1886  {
1887  JLOG(p_journal_.debug())
1888  << "ValidatorList: received duplicate validator list";
1889  // Charging this fee here won't hurt the peer in the normal
1890  // course of operation (ie. refresh every 5 minutes), but
1891  // will add up if the peer is misbehaving.
1892  fee_ = Resource::feeUnwantedData;
1893  return;
1894  }
1895 
1896  auto const applyResult = app_.validators().applyListAndBroadcast(
1897  manifest,
1898  blob,
1899  signature,
1900  version,
1901  remote_address_.to_string(),
1902  hash,
1903  app_.overlay(),
1904  app_.getHashRouter());
1905  auto const disp = applyResult.disposition;
1906 
1907  JLOG(p_journal_.debug())
1908  << "Processed validator list from "
1909  << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
1910  : "unknown or invalid publisher")
1911  << " from " << remote_address_.to_string() << " (" << id_
1912  << ") with result " << to_string(disp);
1913 
1914  switch (disp)
1915  {
1916  case ListDisposition::accepted:
1917  JLOG(p_journal_.debug())
1918  << "Applied new validator list from peer "
1919  << remote_address_;
1920  {
1921  std::lock_guard<std::mutex> sl(recentLock_);
1922 
1923  assert(applyResult.sequence && applyResult.publisherKey);
1924  auto const& pubKey = *applyResult.publisherKey;
1925 #ifndef NDEBUG
1926  if (auto const iter = publisherListSequences_.find(pubKey);
1927  iter != publisherListSequences_.end())
1928  {
1929  assert(iter->second < *applyResult.sequence);
1930  }
1931 #endif
1932  publisherListSequences_[pubKey] = *applyResult.sequence;
1933  }
1934  break;
1935  case ListDisposition::same_sequence:
1936  JLOG(p_journal_.warn())
1937  << "Validator list with current sequence from peer "
1938  << remote_address_;
1939  // Charging this fee here won't hurt the peer in the normal
1940  // course of operation (ie. refresh every 5 minutes), but
1941  // will add up if the peer is misbehaving.
1942  fee_ = Resource::feeUnwantedData;
1943 #ifndef NDEBUG
1944  {
1945  std::lock_guard<std::mutex> sl(recentLock_);
1946  assert(applyResult.sequence && applyResult.publisherKey);
1947  assert(
1948  publisherListSequences_[*applyResult.publisherKey] ==
1949  *applyResult.sequence);
1950  }
1951 #endif // !NDEBUG
1952 
1953  break;
1954  case ListDisposition::stale:
1955  JLOG(p_journal_.warn())
1956  << "Stale validator list from peer " << remote_address_;
1957  // There are very few good reasons for a peer to send an
1958  // old list, particularly more than once.
1959  fee_ = Resource::feeBadData;
1960  break;
1961  case ListDisposition::untrusted:
1962  JLOG(p_journal_.warn())
1963  << "Untrusted validator list from peer " << remote_address_;
1964  // Charging this fee here won't hurt the peer in the normal
1965  // course of operation (ie. refresh every 5 minutes), but
1966  // will add up if the peer is misbehaving.
1967  fee_ = Resource::feeUnwantedData;
1968  break;
1969  case ListDisposition::invalid:
1970  JLOG(p_journal_.warn())
1971  << "Invalid validator list from peer " << remote_address_;
1972  // This shouldn't ever happen with a well-behaved peer
1973  fee_ = Resource::feeInvalidSignature;
1974  break;
1975  case ListDisposition::unsupported_version:
1976  JLOG(p_journal_.warn())
1977  << "Unsupported version validator list from peer "
1978  << remote_address_;
1979  // During a version transition, this may be legitimate.
1980  // If it happens frequently, that's probably bad.
1981  fee_ = Resource::feeBadData;
1982  break;
1983  default:
1984  assert(false);
1985  }
1986  }
1987  catch (std::exception const& e)
1988  {
1989  JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
1990  << " from peer " << remote_address_;
1991  fee_ = Resource::feeBadData;
1992  }
1993 }
1994 
1995 void
1996 PeerImp::onMessage(std::shared_ptr<protocol::TMValidation> const& m)
1997 {
1998  auto const closeTime = app_.timeKeeper().closeTime();
1999 
2000  if (m->validation().size() < 50)
2001  {
2002  JLOG(p_journal_.warn()) << "Validation: Too small";
2003  fee_ = Resource::feeInvalidRequest;
2004  return;
2005  }
2006 
2007  try
2008  {
2010  {
2011  SerialIter sit(makeSlice(m->validation()));
2012  val = std::make_shared<STValidation>(
2013  std::ref(sit),
2014  [this](PublicKey const& pk) {
2015  return calcNodeID(
2016  app_.validatorManifests().getMasterKey(pk));
2017  },
2018  false);
2019  val->setSeen(closeTime);
2020  }
2021 
2022  if (!isCurrent(
2023  app_.getValidations().parms(),
2024  app_.timeKeeper().closeTime(),
2025  val->getSignTime(),
2026  val->getSeenTime()))
2027  {
2028  JLOG(p_journal_.trace()) << "Validation: Not current";
2029  fee_ = Resource::feeUnwantedData;
2030  return;
2031  }
2032 
2033  auto key = sha512Half(makeSlice(m->validation()));
2034  if (auto [added, relayed] =
2035  app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2036  !added)
2037  {
2038  // Count unique messages (Slots has it's own 'HashRouter'), which a
2039  // peer receives within IDLED seconds since the message has been
2040  // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2041  // connections to peers.
2042  if (app_.config().REDUCE_RELAY_ENABLE && (bool)relayed &&
2043  (stopwatch().now() - *relayed) < squelch::IDLED &&
2044  squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2045  squelch::WAIT_ON_BOOTUP)
2046  overlay_.updateSlotAndSquelch(
2047  key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2048  JLOG(p_journal_.trace()) << "Validation: duplicate";
2049  return;
2050  }
2051 
2052  auto const isTrusted =
2053  app_.validators().trusted(val->getSignerPublic());
2054 
2055  if (!isTrusted && (tracking_.load() == Tracking::diverged))
2056  {
2057  JLOG(p_journal_.debug())
2058  << "Validation: dropping untrusted from diverged peer";
2059  }
2060  if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal())
2061  {
2062  std::weak_ptr<PeerImp> weak = shared_from_this();
2063  app_.getJobQueue().addJob(
2064  isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2065  "recvValidation->checkValidation",
2066  [weak, val, m](Job&) {
2067  if (auto peer = weak.lock())
2068  peer->checkValidation(val, m);
2069  });
2070  }
2071  else
2072  {
2073  JLOG(p_journal_.debug()) << "Validation: Dropping UNTRUSTED (load)";
2074  }
2075  }
2076  catch (std::exception const& e)
2077  {
2078  JLOG(p_journal_.warn())
2079  << "Exception processing validation: " << e.what();
2080  fee_ = Resource::feeInvalidRequest;
2081  }
2082 }
2083 
2084 void
2086 {
2087  protocol::TMGetObjectByHash& packet = *m;
2088 
2089  if (packet.query())
2090  {
2091  // this is a query
2092  if (send_queue_.size() >= Tuning::dropSendQueue)
2093  {
2094  JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2095  return;
2096  }
2097 
2098  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2099  {
2100  doFetchPack(m);
2101  return;
2102  }
2103 
2104  fee_ = Resource::feeMediumBurdenPeer;
2105 
2106  protocol::TMGetObjectByHash reply;
2107 
2108  reply.set_query(false);
2109 
2110  if (packet.has_seq())
2111  reply.set_seq(packet.seq());
2112 
2113  reply.set_type(packet.type());
2114 
2115  if (packet.has_ledgerhash())
2116  {
2117  if (!stringIsUint256Sized(packet.ledgerhash()))
2118  {
2119  fee_ = Resource::feeInvalidRequest;
2120  return;
2121  }
2122 
2123  reply.set_ledgerhash(packet.ledgerhash());
2124  }
2125 
2126  // This is a very minimal implementation
2127  for (int i = 0; i < packet.objects_size(); ++i)
2128  {
2129  auto const& obj = packet.objects(i);
2130  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2131  {
2132  uint256 const hash{obj.hash()};
2133  // VFALCO TODO Move this someplace more sensible so we dont
2134  // need to inject the NodeStore interfaces.
2135  std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2136  auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2137  if (!nodeObject)
2138  {
2139  if (auto shardStore = app_.getShardStore())
2140  {
2141  if (seq >= shardStore->earliestLedgerSeq())
2142  nodeObject = shardStore->fetchNodeObject(hash, seq);
2143  }
2144  }
2145  if (nodeObject)
2146  {
2147  protocol::TMIndexedObject& newObj = *reply.add_objects();
2148  newObj.set_hash(hash.begin(), hash.size());
2149  newObj.set_data(
2150  &nodeObject->getData().front(),
2151  nodeObject->getData().size());
2152 
2153  if (obj.has_nodeid())
2154  newObj.set_index(obj.nodeid());
2155  if (obj.has_ledgerseq())
2156  newObj.set_ledgerseq(obj.ledgerseq());
2157 
2158  // VFALCO NOTE "seq" in the message is obsolete
2159  }
2160  }
2161  }
2162 
2163  JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2164  << packet.objects_size();
2165  send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2166  }
2167  else
2168  {
2169  // this is a reply
2170  std::uint32_t pLSeq = 0;
2171  bool pLDo = true;
2172  bool progress = false;
2173 
2174  for (int i = 0; i < packet.objects_size(); ++i)
2175  {
2176  const protocol::TMIndexedObject& obj = packet.objects(i);
2177 
2178  if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2179  {
2180  if (obj.has_ledgerseq())
2181  {
2182  if (obj.ledgerseq() != pLSeq)
2183  {
2184  if (pLDo && (pLSeq != 0))
2185  {
2186  JLOG(p_journal_.debug())
2187  << "GetObj: Full fetch pack for " << pLSeq;
2188  }
2189  pLSeq = obj.ledgerseq();
2190  pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2191 
2192  if (!pLDo)
2193  {
2194  JLOG(p_journal_.debug())
2195  << "GetObj: Late fetch pack for " << pLSeq;
2196  }
2197  else
2198  progress = true;
2199  }
2200  }
2201 
2202  if (pLDo)
2203  {
2204  uint256 const hash{obj.hash()};
2205 
2206  app_.getLedgerMaster().addFetchPack(
2207  hash,
2208  std::make_shared<Blob>(
2209  obj.data().begin(), obj.data().end()));
2210  }
2211  }
2212  }
2213 
2214  if (pLDo && (pLSeq != 0))
2215  {
2216  JLOG(p_journal_.debug())
2217  << "GetObj: Partial fetch pack for " << pLSeq;
2218  }
2219  if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2220  app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2221  }
2222 }
2223 
2224 void
2225 PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2226 {
2227  if (!m->has_validatorpubkey())
2228  {
2229  charge(Resource::feeBadData);
2230  return;
2231  }
2232  auto validator = m->validatorpubkey();
2233  auto const slice{makeSlice(validator)};
2234  if (!publicKeyType(slice))
2235  {
2236  charge(Resource::feeBadData);
2237  return;
2238  }
2239  PublicKey key(slice);
2240  auto squelch = m->squelch();
2241  auto duration = m->has_squelchduration() ? m->squelchduration() : 0;
2242  auto sp = shared_from_this();
2243 
2244  // Ignore the squelch for validator's own messages.
2245  if (key == app_.getValidationPublicKey())
2246  {
2247  JLOG(p_journal_.debug())
2248  << "onMessage: TMSquelch discarding validator's squelch " << slice;
2249  return;
2250  }
2251 
2252  if (!strand_.running_in_this_thread())
2253  return post(strand_, [sp, key, squelch, duration]() {
2254  sp->squelch_.squelch(key, squelch, duration);
2255  });
2256 
2257  JLOG(p_journal_.debug())
2258  << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2259 
2260  squelch_.squelch(key, squelch, duration);
2261 }
2262 
2263 //--------------------------------------------------------------------------
2264 
2265 void
2266 PeerImp::addLedger(
2267  uint256 const& hash,
2268  std::lock_guard<std::mutex> const& lockedRecentLock)
2269 {
2270  // lockedRecentLock is passed as a reminder that recentLock_ must be
2271  // locked by the caller.
2272  (void)lockedRecentLock;
2273 
2274  if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2275  recentLedgers_.end())
2276  return;
2277 
2278  recentLedgers_.push_back(hash);
2279 }
2280 
2281 void
2282 PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2283 {
2284  // VFALCO TODO Invert this dependency using an observer and shared state
2285  // object. Don't queue fetch pack jobs if we're under load or we already
2286  // have some queued.
2287  if (app_.getFeeTrack().isLoadedLocal() ||
2288  (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2289  (app_.getJobQueue().getJobCount(jtPACK) > 10))
2290  {
2291  JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2292  return;
2293  }
2294 
2295  if (!stringIsUint256Sized(packet->ledgerhash()))
2296  {
2297  JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2298  fee_ = Resource::feeInvalidRequest;
2299  return;
2300  }
2301 
2302  fee_ = Resource::feeHighBurdenPeer;
2303 
2304  uint256 const hash{packet->ledgerhash()};
2305 
2306  std::weak_ptr<PeerImp> weak = shared_from_this();
2307  auto elapsed = UptimeClock::now();
2308  auto const pap = &app_;
2309  app_.getJobQueue().addJob(
2310  jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed](Job&) {
2311  pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2312  });
2313 }
2314 
2315 void
2316 PeerImp::checkTransaction(
2317  int flags,
2318  bool checkSignature,
2319  std::shared_ptr<STTx const> const& stx)
2320 {
2321  // VFALCO TODO Rewrite to not use exceptions
2322  try
2323  {
2324  // Expired?
2325  if (stx->isFieldPresent(sfLastLedgerSequence) &&
2326  (stx->getFieldU32(sfLastLedgerSequence) <
2327  app_.getLedgerMaster().getValidLedgerIndex()))
2328  {
2329  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2330  charge(Resource::feeUnwantedData);
2331  return;
2332  }
2333 
2334  if (checkSignature)
2335  {
2336  // Check the signature before handing off to the job queue.
2337  if (auto [valid, validReason] = checkValidity(
2338  app_.getHashRouter(),
2339  *stx,
2340  app_.getLedgerMaster().getValidatedRules(),
2341  app_.config());
2342  valid != Validity::Valid)
2343  {
2344  if (!validReason.empty())
2345  {
2346  JLOG(p_journal_.trace())
2347  << "Exception checking transaction: " << validReason;
2348  }
2349 
2350  // Probably not necessary to set SF_BAD, but doesn't hurt.
2351  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2352  charge(Resource::feeInvalidSignature);
2353  return;
2354  }
2355  }
2356  else
2357  {
2358  forceValidity(
2359  app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2360  }
2361 
2362  std::string reason;
2363  auto tx = std::make_shared<Transaction>(stx, reason, app_);
2364 
2365  if (tx->getStatus() == INVALID)
2366  {
2367  if (!reason.empty())
2368  {
2369  JLOG(p_journal_.trace())
2370  << "Exception checking transaction: " << reason;
2371  }
2372  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2373  charge(Resource::feeInvalidSignature);
2374  return;
2375  }
2376 
2377  bool const trusted(flags & SF_TRUSTED);
2378  app_.getOPs().processTransaction(
2379  tx, trusted, false, NetworkOPs::FailHard::no);
2380  }
2381  catch (std::exception const&)
2382  {
2383  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2384  charge(Resource::feeBadData);
2385  }
2386 }
2387 
2388 // Called from our JobQueue
2389 void
2390 PeerImp::checkPropose(
2391  Job& job,
2393  RCLCxPeerPos peerPos)
2394 {
2395  bool isTrusted = (job.getType() == jtPROPOSAL_t);
2396 
2397  JLOG(p_journal_.trace())
2398  << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2399 
2400  assert(packet);
2401 
2402  if (!cluster() && !peerPos.checkSign())
2403  {
2404  JLOG(p_journal_.warn()) << "Proposal fails sig check";
2405  charge(Resource::feeInvalidSignature);
2406  return;
2407  }
2408 
2409  bool relay;
2410 
2411  if (isTrusted)
2412  relay = app_.getOPs().processTrustedProposal(peerPos);
2413  else
2414  relay = app_.config().RELAY_UNTRUSTED_PROPOSALS || cluster();
2415 
2416  if (relay)
2417  {
2418  // haveMessage contains peers, which are suppressed; i.e. the peers
2419  // are the source of the message, consequently the message should
2420  // not be relayed to these peers. But the message must be counted
2421  // as part of the squelch logic.
2422  auto haveMessage = app_.overlay().relay(
2423  *packet, peerPos.suppressionID(), peerPos.publicKey());
2424  if (app_.config().REDUCE_RELAY_ENABLE && !haveMessage.empty() &&
2425  squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2426  squelch::WAIT_ON_BOOTUP)
2427  overlay_.updateSlotAndSquelch(
2428  peerPos.suppressionID(),
2429  peerPos.publicKey(),
2430  std::move(haveMessage),
2431  protocol::mtPROPOSE_LEDGER);
2432  }
2433 }
2434 
2435 void
2436 PeerImp::checkValidation(
2437  std::shared_ptr<STValidation> const& val,
2439 {
2440  try
2441  {
2442  // VFALCO Which functions throw?
2443  if (!cluster() && !val->isValid())
2444  {
2445  JLOG(p_journal_.warn()) << "Validation is invalid";
2446  charge(Resource::feeInvalidRequest);
2447  return;
2448  }
2449 
2450  if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
2451  cluster())
2452  {
2453  auto const suppression =
2454  sha512Half(makeSlice(val->getSerialized()));
2455  // haveMessage contains peers, which are suppressed; i.e. the peers
2456  // are the source of the message, consequently the message should
2457  // not be relayed to these peers. But the message must be counted
2458  // as part of the squelch logic.
2459  auto haveMessage =
2460  overlay_.relay(*packet, suppression, val->getSignerPublic());
2461  if (app_.config().REDUCE_RELAY_ENABLE && !haveMessage.empty() &&
2462  squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2463  squelch::WAIT_ON_BOOTUP)
2464  {
2465  overlay_.updateSlotAndSquelch(
2466  suppression,
2467  val->getSignerPublic(),
2468  std::move(haveMessage),
2469  protocol::mtVALIDATION);
2470  }
2471  }
2472  }
2473  catch (std::exception const&)
2474  {
2475  JLOG(p_journal_.trace()) << "Exception processing validation";
2476  charge(Resource::feeInvalidRequest);
2477  }
2478 }
2479 
2480 // Returns the set of peers that can help us get
2481 // the TX tree with the specified root hash.
2482 //
2484 getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
2485 {
2487  int retScore = 0;
2488 
2489  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
2490  if (p->hasTxSet(rootHash) && p.get() != skip)
2491  {
2492  auto score = p->getScore(true);
2493  if (!ret || (score > retScore))
2494  {
2495  ret = std::move(p);
2496  retScore = score;
2497  }
2498  }
2499  });
2500 
2501  return ret;
2502 }
2503 
2504 // Returns a random peer weighted by how likely to
2505 // have the ledger and how responsive it is.
2506 //
2509  OverlayImpl& ov,
2510  uint256 const& ledgerHash,
2511  LedgerIndex ledger,
2512  PeerImp const* skip)
2513 {
2515  int retScore = 0;
2516 
2517  ov.for_each([&](std::shared_ptr<PeerImp>&& p) {
2518  if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
2519  {
2520  auto score = p->getScore(true);
2521  if (!ret || (score > retScore))
2522  {
2523  ret = std::move(p);
2524  retScore = score;
2525  }
2526  }
2527  });
2528 
2529  return ret;
2530 }
2531 
2532 // VFALCO NOTE This function is way too big and cumbersome.
2533 void
2534 PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
2535 {
2536  protocol::TMGetLedger& packet = *m;
2537  std::shared_ptr<SHAMap> shared;
2538  SHAMap const* map = nullptr;
2539  protocol::TMLedgerData reply;
2540  bool fatLeaves = true;
2542 
2543  if (packet.has_requestcookie())
2544  reply.set_requestcookie(packet.requestcookie());
2545 
2546  std::string logMe;
2547 
2548  if (packet.itype() == protocol::liTS_CANDIDATE)
2549  {
2550  // Request is for a transaction candidate set
2551  JLOG(p_journal_.trace()) << "GetLedger: Tx candidate set";
2552 
2553  if (!packet.has_ledgerhash() ||
2554  !stringIsUint256Sized(packet.ledgerhash()))
2555  {
2556  charge(Resource::feeInvalidRequest);
2557  JLOG(p_journal_.warn()) << "GetLedger: Tx candidate set invalid";
2558  return;
2559  }
2560 
2561  uint256 const txHash{packet.ledgerhash()};
2562 
2563  shared = app_.getInboundTransactions().getSet(txHash, false);
2564  map = shared.get();
2565 
2566  if (!map)
2567  {
2568  if (packet.has_querytype() && !packet.has_requestcookie())
2569  {
2570  JLOG(p_journal_.debug()) << "GetLedger: Routing Tx set request";
2571 
2572  if (auto const v = getPeerWithTree(overlay_, txHash, this))
2573  {
2574  packet.set_requestcookie(id());
2575  v->send(std::make_shared<Message>(
2576  packet, protocol::mtGET_LEDGER));
2577  return;
2578  }
2579 
2580  JLOG(p_journal_.info()) << "GetLedger: Route TX set failed";
2581  return;
2582  }
2583 
2584  JLOG(p_journal_.debug()) << "GetLedger: Can't provide map ";
2585  charge(Resource::feeInvalidRequest);
2586  return;
2587  }
2588 
2589  reply.set_ledgerseq(0);
2590  reply.set_ledgerhash(txHash.begin(), txHash.size());
2591  reply.set_type(protocol::liTS_CANDIDATE);
2592  fatLeaves = false; // We'll already have most transactions
2593  }
2594  else
2595  {
2596  if (send_queue_.size() >= Tuning::dropSendQueue)
2597  {
2598  JLOG(p_journal_.debug()) << "GetLedger: Large send queue";
2599  return;
2600  }
2601 
2602  if (app_.getFeeTrack().isLoadedLocal() && !cluster())
2603  {
2604  JLOG(p_journal_.debug()) << "GetLedger: Too busy";
2605  return;
2606  }
2607 
2608  // Figure out what ledger they want
2609  JLOG(p_journal_.trace()) << "GetLedger: Received";
2610 
2611  if (packet.has_ledgerhash())
2612  {
2613  if (!stringIsUint256Sized(packet.ledgerhash()))
2614  {
2615  charge(Resource::feeInvalidRequest);
2616  JLOG(p_journal_.warn()) << "GetLedger: Invalid request";
2617  return;
2618  }
2619 
2620  uint256 const ledgerhash{packet.ledgerhash()};
2621  logMe += "LedgerHash:";
2622  logMe += to_string(ledgerhash);
2623  ledger = app_.getLedgerMaster().getLedgerByHash(ledgerhash);
2624 
2625  if (!ledger && packet.has_ledgerseq())
2626  {
2627  if (auto shardStore = app_.getShardStore())
2628  {
2629  auto seq = packet.ledgerseq();
2630  if (seq >= shardStore->earliestLedgerSeq())
2631  ledger = shardStore->fetchLedger(ledgerhash, seq);
2632  }
2633  }
2634 
2635  if (!ledger)
2636  {
2637  JLOG(p_journal_.trace())
2638  << "GetLedger: Don't have " << ledgerhash;
2639  }
2640 
2641  if (!ledger &&
2642  (packet.has_querytype() && !packet.has_requestcookie()))
2643  {
2644  // We don't have the requested ledger
2645  // Search for a peer who might
2646  auto const v = getPeerWithLedger(
2647  overlay_,
2648  ledgerhash,
2649  packet.has_ledgerseq() ? packet.ledgerseq() : 0,
2650  this);
2651  if (!v)
2652  {
2653  JLOG(p_journal_.trace()) << "GetLedger: Cannot route";
2654  return;
2655  }
2656 
2657  packet.set_requestcookie(id());
2658  v->send(
2659  std::make_shared<Message>(packet, protocol::mtGET_LEDGER));
2660  JLOG(p_journal_.debug()) << "GetLedger: Request routed";
2661  return;
2662  }
2663  }
2664  else if (packet.has_ledgerseq())
2665  {
2666  if (packet.ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
2667  {
2668  JLOG(p_journal_.debug()) << "GetLedger: Early ledger request";
2669  return;
2670  }
2671  ledger = app_.getLedgerMaster().getLedgerBySeq(packet.ledgerseq());
2672  if (!ledger)
2673  {
2674  JLOG(p_journal_.debug())
2675  << "GetLedger: Don't have " << packet.ledgerseq();
2676  }
2677  }
2678  else if (packet.has_ltype() && (packet.ltype() == protocol::ltCLOSED))
2679  {
2680  ledger = app_.getLedgerMaster().getClosedLedger();
2681  assert(!ledger->open());
2682  // VFALCO ledger should never be null!
2683  // VFALCO How can the closed ledger be open?
2684 #if 0
2685  if (ledger && ledger->info().open)
2686  ledger = app_.getLedgerMaster ().getLedgerBySeq (
2687  ledger->info().seq - 1);
2688 #endif
2689  }
2690  else
2691  {
2692  charge(Resource::feeInvalidRequest);
2693  JLOG(p_journal_.warn()) << "GetLedger: Unknown request";
2694  return;
2695  }
2696 
2697  if ((!ledger) ||
2698  (packet.has_ledgerseq() &&
2699  (packet.ledgerseq() != ledger->info().seq)))
2700  {
2701  charge(Resource::feeInvalidRequest);
2702 
2703  if (ledger)
2704  {
2705  JLOG(p_journal_.warn()) << "GetLedger: Invalid sequence";
2706  }
2707  return;
2708  }
2709 
2710  if (!packet.has_ledgerseq() &&
2711  (ledger->info().seq < app_.getLedgerMaster().getEarliestFetch()))
2712  {
2713  JLOG(p_journal_.debug()) << "GetLedger: Early ledger request";
2714  return;
2715  }
2716 
2717  // Fill out the reply
2718  auto const lHash = ledger->info().hash;
2719  reply.set_ledgerhash(lHash.begin(), lHash.size());
2720  reply.set_ledgerseq(ledger->info().seq);
2721  reply.set_type(packet.itype());
2722 
2723  if (packet.itype() == protocol::liBASE)
2724  {
2725  // they want the ledger base data
2726  JLOG(p_journal_.trace()) << "GetLedger: Base data";
2727  Serializer nData(128);
2728  addRaw(ledger->info(), nData);
2729  reply.add_nodes()->set_nodedata(
2730  nData.getDataPtr(), nData.getLength());
2731 
2732  auto const& stateMap = ledger->stateMap();
2733  if (stateMap.getHash() != beast::zero)
2734  {
2735  // return account state root node if possible
2736  Serializer rootNode(768);
2737 
2738  stateMap.serializeRoot(rootNode);
2739  reply.add_nodes()->set_nodedata(
2740  rootNode.getDataPtr(), rootNode.getLength());
2741 
2742  if (ledger->info().txHash != beast::zero)
2743  {
2744  auto const& txMap = ledger->txMap();
2745  if (txMap.getHash() != beast::zero)
2746  {
2747  rootNode.erase();
2748 
2749  txMap.serializeRoot(rootNode);
2750  reply.add_nodes()->set_nodedata(
2751  rootNode.getDataPtr(), rootNode.getLength());
2752  }
2753  }
2754  }
2755 
2756  auto oPacket =
2757  std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2758  send(oPacket);
2759  return;
2760  }
2761 
2762  if (packet.itype() == protocol::liTX_NODE)
2763  {
2764  map = &ledger->txMap();
2765  logMe += " TX:";
2766  logMe += to_string(map->getHash());
2767  }
2768  else if (packet.itype() == protocol::liAS_NODE)
2769  {
2770  map = &ledger->stateMap();
2771  logMe += " AS:";
2772  logMe += to_string(map->getHash());
2773  }
2774  }
2775 
2776  if (!map || (packet.nodeids_size() == 0))
2777  {
2778  JLOG(p_journal_.warn()) << "GetLedger: Can't find map or empty request";
2779  charge(Resource::feeInvalidRequest);
2780  return;
2781  }
2782 
2783  JLOG(p_journal_.trace()) << "GetLedger: " << logMe;
2784 
2785  auto const depth = packet.has_querydepth()
2786  ? (std::min(packet.querydepth(), 3u))
2787  : (isHighLatency() ? 2 : 1);
2788 
2789  for (int i = 0;
2790  (i < packet.nodeids().size() &&
2791  (reply.nodes().size() < Tuning::maxReplyNodes));
2792  ++i)
2793  {
2794  auto const mn = deserializeSHAMapNodeID(packet.nodeids(i));
2795 
2796  if (!mn)
2797  {
2798  JLOG(p_journal_.warn()) << "GetLedger: Invalid node " << logMe;
2799  charge(Resource::feeBadData);
2800  return;
2801  }
2802 
2803  std::vector<SHAMapNodeID> nodeIDs;
2804  std::vector<Blob> rawNodes;
2805 
2806  try
2807  {
2808  if (map->getNodeFat(*mn, nodeIDs, rawNodes, fatLeaves, depth))
2809  {
2810  assert(nodeIDs.size() == rawNodes.size());
2811  JLOG(p_journal_.trace()) << "GetLedger: getNodeFat got "
2812  << rawNodes.size() << " nodes";
2813  std::vector<SHAMapNodeID>::iterator nodeIDIterator;
2814  std::vector<Blob>::iterator rawNodeIterator;
2815 
2816  for (nodeIDIterator = nodeIDs.begin(),
2817  rawNodeIterator = rawNodes.begin();
2818  nodeIDIterator != nodeIDs.end();
2819  ++nodeIDIterator, ++rawNodeIterator)
2820  {
2821  protocol::TMLedgerNode* node = reply.add_nodes();
2822  node->set_nodeid(nodeIDIterator->getRawString());
2823  node->set_nodedata(
2824  &rawNodeIterator->front(), rawNodeIterator->size());
2825  }
2826  }
2827  else
2828  {
2829  JLOG(p_journal_.warn())
2830  << "GetLedger: getNodeFat returns false";
2831  }
2832  }
2833  catch (std::exception&)
2834  {
2835  std::string info;
2836 
2837  if (packet.itype() == protocol::liTS_CANDIDATE)
2838  info = "TS candidate";
2839  else if (packet.itype() == protocol::liBASE)
2840  info = "Ledger base";
2841  else if (packet.itype() == protocol::liTX_NODE)
2842  info = "TX node";
2843  else if (packet.itype() == protocol::liAS_NODE)
2844  info = "AS node";
2845 
2846  if (!packet.has_ledgerhash())
2847  info += ", no hash specified";
2848 
2849  JLOG(p_journal_.warn())
2850  << "getNodeFat( " << *mn << ") throws exception: " << info;
2851  }
2852  }
2853 
2854  JLOG(p_journal_.info())
2855  << "Got request for " << packet.nodeids().size() << " nodes at depth "
2856  << depth << ", return " << reply.nodes().size() << " nodes";
2857 
2858  auto oPacket = std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2859  send(oPacket);
2860 }
2861 
2862 int
2863 PeerImp::getScore(bool haveItem) const
2864 {
2865  // Random component of score, used to break ties and avoid
2866  // overloading the "best" peer
2867  static const int spRandomMax = 9999;
2868 
2869  // Score for being very likely to have the thing we are
2870  // look for; should be roughly spRandomMax
2871  static const int spHaveItem = 10000;
2872 
2873  // Score reduction for each millisecond of latency; should
2874  // be roughly spRandomMax divided by the maximum reasonable
2875  // latency
2876  static const int spLatency = 30;
2877 
2878  // Penalty for unknown latency; should be roughly spRandomMax
2879  static const int spNoLatency = 8000;
2880 
2881  int score = rand_int(spRandomMax);
2882 
2883  if (haveItem)
2884  score += spHaveItem;
2885 
2886  boost::optional<std::chrono::milliseconds> latency;
2887  {
2888  std::lock_guard sl(recentLock_);
2889  latency = latency_;
2890  }
2891 
2892  if (latency)
2893  score -= latency->count() * spLatency;
2894  else
2895  score -= spNoLatency;
2896 
2897  return score;
2898 }
2899 
2900 bool
2901 PeerImp::isHighLatency() const
2902 {
2903  std::lock_guard sl(recentLock_);
2904  return latency_ >= peerHighLatency;
2905 }
2906 
2907 void
2908 PeerImp::Metrics::add_message(std::uint64_t bytes)
2909 {
2910  using namespace std::chrono_literals;
2911  std::unique_lock lock{mutex_};
2912 
2913  totalBytes_ += bytes;
2914  accumBytes_ += bytes;
2915  auto const timeElapsed = clock_type::now() - intervalStart_;
2916  auto const timeElapsedInSecs =
2917  std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
2918 
2919  if (timeElapsedInSecs >= 1s)
2920  {
2921  auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
2922  rollingAvg_.push_back(avgBytes);
2923 
2924  auto const totalBytes =
2925  std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
2926  rollingAvgBytes_ = totalBytes / rollingAvg_.size();
2927 
2928  intervalStart_ = clock_type::now();
2929  accumBytes_ = 0;
2930  }
2931 }
2932 
2934 PeerImp::Metrics::average_bytes() const
2935 {
2936  std::shared_lock lock{mutex_};
2937  return rollingAvgBytes_;
2938 }
2939 
2941 PeerImp::Metrics::total_bytes() const
2942 {
2943  std::shared_lock lock{mutex_};
2944  return totalBytes_;
2945 }
2946 
2947 } // namespace ripple
ripple::PublicKey::data
std::uint8_t const * data() const noexcept
Definition: PublicKey.h:81
ripple::PeerImp::ledgerRange
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:459
ripple::PeerImp::uptime
clock_type::duration uptime() const
Definition: PeerImp.h:322
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:97
ripple::ClusterNode
Definition: ClusterNode.h:30
ripple::jtTRANSACTION
@ jtTRANSACTION
Definition: Job.h:51
ripple::PeerImp::inbound_
const bool inbound_
Definition: PeerImp.h:91
ripple::TrafficCount::categorize
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
Definition: TrafficCount.cpp:25
sstream
ripple::Tuning::sendQueueLogFreq
@ sendQueueLogFreq
How often to log send queue size.
Definition: overlay/impl/Tuning.h:53
ripple::PeerImp::recentLock_
std::mutex recentLock_
Definition: PeerImp.h:148
ripple::HashRouter::addSuppressionPeerWithStatus
std::pair< bool, std::optional< Stopwatch::time_point > > addSuppressionPeerWithStatus(uint256 const &key, PeerShortID peer)
Add a suppression peer and get message's relay status.
Definition: HashRouter.cpp:57
ripple::RCLCxPeerPos
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:42
std::weak_ptr::lock
T lock(T... args)
ripple::PeerImp::stream_ptr_
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:78
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::PeerImp::onMessageBegin
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size)
Definition: PeerImp.cpp:966
ripple::csHopLimit
static constexpr std::uint32_t csHopLimit
Definition: ripple/overlay/Peer.h:36
ripple::Application::cluster
virtual Cluster & cluster()=0
ripple::PeerImp::socket_
socket_type & socket_
Definition: PeerImp.h:79
std::bind
T bind(T... args)
ripple::PeerImp::trackingTime_
clock_type::time_point trackingTime_
Definition: PeerImp.h:97
ripple::HashRouter::addSuppressionPeer
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:51
std::string
STL class.
ripple::Resource::feeMediumBurdenPeer
const Charge feeMediumBurdenPeer
std::shared_ptr
STL class.
ripple::PeerImp::onMessage
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:988
ripple::ManifestCache::getMasterKey
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: app/misc/impl/Manifest.cpp:280
ripple::SHAMap::getHash
SHAMapHash getHash() const
Definition: SHAMap.cpp:778
std::exception
STL class.
ripple::PeerImp::hasTxSet
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:478
ripple::calcNodeID
NodeID calcNodeID(PublicKey const &pk)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:299
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::publicKeyType
boost::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:203
ripple::PeerImp::strand_
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:81
ripple::Tuning::targetSendQueue
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
Definition: overlay/impl/Tuning.h:50
ripple::PeerImp::recentLedgers_
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:111
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::PeerImp::request_
http_request_type request_
Definition: PeerImp.h:154
ripple::Resource::Gossip
Data format for exchanging consumption information across peers.
Definition: Gossip.h:29
ripple::PeerImp::~PeerImp
virtual ~PeerImp()
Definition: PeerImp.cpp:108
ripple::PeerImp::getShardIndexes
boost::optional< RangeSet< std::uint32_t > > getShardIndexes() const
Return a range set of known shard indexes from this peer.
Definition: PeerImp.cpp:560
ripple::Serializer::erase
void erase()
Definition: Serializer.h:207
beast::IP::Endpoint::to_string
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:54
std::pair
ripple::http_request_type
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:31
ripple::PeerImp::doAccept
void doAccept()
Definition: PeerImp.cpp:712
std::vector::reserve
T reserve(T... args)
ripple::OverlayImpl::updateSlotAndSquelch
void updateSlotAndSquelch(uint256 const &key, PublicKey const &validator, std::set< Peer::id_t > &&peers, protocol::MessageType type)
Updates message count for validator/peer.
Definition: OverlayImpl.cpp:1407
ripple::HashRouter::shouldProcess
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:78
ripple::HashPrefix::manifest
@ manifest
Manifest.
ripple::PeerImp::metrics_
struct ripple::PeerImp::@14 metrics_
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:212
ripple::Config::REDUCE_RELAY_ENABLE
bool REDUCE_RELAY_ENABLE
Definition: Config.h:197
Json::UInt
unsigned int UInt
Definition: json_forwards.h:27
ripple::PeerImp::doProtocolStart
void doProtocolStart()
Definition: PeerImp.cpp:812
std::vector
STL class.
std::find
T find(T... args)
std::string::size
T size(T... args)
ripple::PeerImp::recentTxSets_
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:112
ripple::PublicKey::empty
bool empty() const noexcept
Definition: PublicKey.h:117
ripple::Tuning::sendqIntervals
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
Definition: overlay/impl/Tuning.h:44
ripple::make_protocol
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
Definition: ProtocolVersion.h:40
std::chrono::milliseconds
ripple::PeerImp::setTimer
void setTimer()
Definition: PeerImp.cpp:600
ripple::OverlayImpl::incPeerDisconnectCharges
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:357
ripple::toBase58
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:29
beast::IP::Endpoint::address
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:77
ripple::PeerImp::getVersion
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:303
std::stringstream
STL class.
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::shared_ptr::get
T get(T... args)
std::lock_guard
STL class.
ripple::SBoxCmp::diff
@ diff
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerImp::close
void close()
Definition: PeerImp.cpp:506
ripple::PeerImp::charge
void charge(Resource::Charge const &fee) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:274
ripple::match_peer
Select the specific peer.
Definition: predicates.h:115
ripple::PeerImp::onMessageUnknown
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:960
ripple::addRaw
void addRaw(LedgerInfo const &info, Serializer &s)
Definition: View.cpp:43
ripple::from_string
bool from_string(RangeSet< T > &rs, std::string const &s)
Convert the given styled string to a RangeSet.
Definition: RangeSet.h:126
ripple::PeerImp::squelch_
squelch::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:119
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::stopwatch
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:86
std::setfill
T setfill(T... args)
ripple::PeerImp::ShardInfo::endpoint
beast::IP::Endpoint endpoint
Definition: PeerImp.h:56
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:42
ripple::PeerImp::journal_
const beast::Journal journal_
Definition: PeerImp.h:76
ripple::PeerImp::send
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:219
ripple::Application::timeKeeper
virtual TimeKeeper & timeKeeper()=0
ripple::buildHandshake
void buildHandshake(boost::beast::http::fields &h, ripple::uint256 const &sharedValue, boost::optional< std::uint32_t > networkID, beast::IP::Address public_ip, beast::IP::Address remote_ip, Application &app)
Insert fields headers necessary for upgrading the link to the peer protocol.
Definition: Handshake.cpp:102
ripple::OverlayImpl::setup
Setup const & setup() const
Definition: OverlayImpl.h:177
ripple::ProtocolFeature
ProtocolFeature
Definition: ripple/overlay/Peer.h:38
ripple::PeerImp::onTimer
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:635
ripple::Cluster::update
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:58
ripple::PeerImp::lastPingTime_
clock_type::time_point lastPingTime_
Definition: PeerImp.h:116
ripple::OverlayImpl::incJqTransOverflow
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:333
ripple::PeerImp
Definition: PeerImp.h:46
ripple::PeerFinder::Config::peerPrivate
bool peerPrivate
true if we want our IP address kept private.
Definition: PeerfinderManager.h:62
ripple::Config::MAX_TRANSACTIONS
int MAX_TRANSACTIONS
Definition: Config.h:185
ripple::PeerImp::previousLedgerHash_
uint256 previousLedgerHash_
Definition: PeerImp.h:109
std::vector::front
T front(T... args)
algorithm
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::PeerImp::name_
std::string name_
Definition: PeerImp.h:101
ripple::PeerFinder::Manager::on_endpoints
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
ripple::forceValidity
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:89
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::Application::getFeeTrack
virtual LoadFeeTrack & getFeeTrack()=0
ripple::base_uint< 256 >::size
constexpr static std::size_t size()
Definition: base_uint.h:426
ripple::getPeerWithLedger
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:2508
ripple::PeerImp::publicKey_
const PublicKey publicKey_
Definition: PeerImp.h:100
ripple::protocolMessageName
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
Definition: ProtocolMessage.h:42
ripple::PeerImp::read_buffer_
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:153
ripple::PeerImp::error_code
boost::system::error_code error_code
Definition: PeerImp.h:62
ripple::JobQueue::getJobCount
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:121
std::tie
T tie(T... args)
ripple::PeerImp::remote_address_
const beast::IP::Endpoint remote_address_
Definition: PeerImp.h:86
ripple::Cluster::member
boost::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:39
ripple::jtTXN_DATA
@ jtTXN_DATA
Definition: Job.h:55
ripple::PeerFinder::Manager::on_closed
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
ripple::OverlayImpl::peerFinder
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:159
ripple::getPeerWithTree
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:2484
ripple::base_uint< 256 >
ripple::LoadFeeTrack::isLoadedLocal
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:123
ripple::PeerImp::addLedger
void addLedger(uint256 const &hash, std::lock_guard< std::mutex > const &lockedRecentLock)
Definition: PeerImp.cpp:2266
ripple::http_response_type
boost::beast::http::response< boost::beast::http::dynamic_body > http_response_type
Definition: Handoff.h:34
ripple::Resource::feeInvalidSignature
const Charge feeInvalidSignature
ripple::OverlayImpl::onManifests
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Definition: OverlayImpl.cpp:657
ripple::Overlay::Setup::public_ip
beast::IP::Address public_ip
Definition: Overlay.h:75
std::enable_shared_from_this< PeerImp >::shared_from_this
T shared_from_this(T... args)
ripple::UptimeClock::now
static time_point now()
Definition: UptimeClock.cpp:63
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NetworkOPs::isNeedNetworkLedger
virtual bool isNeedNetworkLedger()=0
ripple::Resource::drop
@ drop
Definition: Disposition.h:37
ripple::checkValidity
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
ripple::jtPROPOSAL_t
@ jtPROPOSAL_t
Definition: Job.h:60
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:439
ripple::OverlayImpl::resourceManager
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:165
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::PeerImp::gracefulClose
void gracefulClose()
Definition: PeerImp.cpp:579
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
std::atomic::load
T load(T... args)
ripple::Resource::feeBadData
const Charge feeBadData
ripple::PublicKey::size
std::size_t size() const noexcept
Definition: PublicKey.h:87
ripple::PeerImp::shardInfo_
hash_map< PublicKey, ShardInfo > shardInfo_
Definition: PeerImp.h:166
ripple::Serializer::getDataPtr
const void * getDataPtr() const
Definition: Serializer.h:187
ripple::Resource::Manager::importConsumers
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
ripple::PeerImp::closedLedgerHash_
uint256 closedLedgerHash_
Definition: PeerImp.h:108
ripple::PeerImp::lastPingSeq_
boost::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:115
ripple::PeerImp::detaching_
bool detaching_
Definition: PeerImp.h:98
ripple::PeerImp::onMessageEnd
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:979
ripple::Application::config
virtual Config & config()=0
ripple::isCurrent
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:145
beast::Journal::active
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:301
ripple::PeerImp::stream_
stream_type & stream_
Definition: PeerImp.h:80
ripple::PeerImp::onWriteMessage
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:906
std::unique_lock
STL class.
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::InfoSub::Source::pubPeerStatus
virtual void pubPeerStatus(std::function< Json::Value(void)> const &)=0
ripple::jtVALIDATION_t
@ jtVALIDATION_t
Definition: Job.h:57
ripple::PeerImp::hasRange
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:496
ripple::Resource::feeUnwantedData
const Charge feeUnwantedData
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::Resource::Gossip::items
std::vector< Item > items
Definition: Gossip.h:42
ripple::PeerImp::cycleStatus
void cycleStatus() override
Definition: PeerImp.cpp:486
ripple::set
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:276
ripple::PeerImp::app_
Application & app_
Definition: PeerImp.h:72
ripple::PeerImp::crawl
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:288
ripple::PeerImp::minLedger_
LedgerIndex minLedger_
Definition: PeerImp.h:106
ripple::makeSharedValue
boost::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:70
ripple::base64_decode
std::string base64_decode(std::string const &data)
Definition: base64.cpp:245
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::PeerImp::hasLedger
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:442
ripple::PeerImp::Tracking::unknown
@ unknown
ripple::Resource::Consumer::balance
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:124
ripple::HashPrefix::proposal
@ proposal
proposal for signing
ripple::TimeKeeper::closeTime
virtual time_point closeTime() const =0
Returns the close time, in network time.
ripple::Job
Definition: Job.h:82
ripple::PeerImp::headers_
boost::beast::http::fields const & headers_
Definition: PeerImp.h:156
std::accumulate
T accumulate(T... args)
ripple::SerialIter
Definition: Serializer.h:308
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
std::uint32_t
ripple::PeerImp::send_queue_
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:157
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:241
ripple::PeerImp::slot_
const std::shared_ptr< PeerFinder::Slot > slot_
Definition: PeerImp.h:152
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:178
ripple::PeerImp::load_event_
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:160
ripple::PeerImp::protocol_
ProtocolVersion protocol_
Definition: PeerImp.h:94
ripple::Application::getValidationPublicKey
virtual PublicKey const & getValidationPublicKey() const =0
ripple::Cluster::size
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:50
std::nth_element
T nth_element(T... args)
memory
ripple::PeerImp::waitable_timer
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:69
ripple::jtPEER
@ jtPEER
Definition: Job.h:67
ripple::PeerImp::onShutdown
void onShutdown(error_code ec)
Definition: PeerImp.cpp:696
ripple::proposalUniqueId
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
Definition: RCLCxPeerPos.cpp:72
ripple::PeerImp::name
std::string name() const
Definition: PeerImp.cpp:795
ripple::Application::validators
virtual ValidatorList & validators()=0
ripple::KeyType::secp256k1
@ secp256k1
ripple::RCLCxPeerPos::publicKey
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:81
std::weak_ptr
STL class.
ripple::PeerImp::timer_
waitable_timer timer_
Definition: PeerImp.h:82
std::min
T min(T... args)
ripple::Serializer
Definition: Serializer.h:39
ripple::BuildInfo::getFullVersionString
std::string const & getFullVersionString()
Full server version string.
Definition: BuildInfo.cpp:74
ripple::LedgerMaster::getValidatedLedgerAge
std::chrono::seconds getValidatedLedgerAge()
Definition: LedgerMaster.cpp:268
ripple::Resource::Gossip::Item
Describes a single consumer.
Definition: Gossip.h:34
ripple::PeerImp::ShardInfo
Definition: PeerImp.h:54
ripple::OverlayImpl::deletePeer
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
Definition: OverlayImpl.cpp:1440
ripple::PeerImp::Tracking::diverged
@ diverged
ripple::jtPACK
@ jtPACK
Definition: Job.h:41
ripple::PeerImp::gracefulClose_
bool gracefulClose_
Definition: PeerImp.h:158
ripple::PeerImp::latency_
boost::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:114
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedgers::gotLedgerData
virtual bool gotLedgerData(LedgerHash const &ledgerHash, std::shared_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData >)=0
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::Application::validatorManifests
virtual ManifestCache & validatorManifests()=0
ripple::ValidatorList::for_each_available
void for_each_available(std::function< void(std::string const &manifest, std::string const &blob, std::string const &signature, std::uint32_t version, PublicKey const &pubKey, std::size_t sequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
Definition: ValidatorList.cpp:774
ripple::OverlayImpl::getManifestsMessage
std::shared_ptr< Message > getManifestsMessage()
Definition: OverlayImpl.cpp:1267
ripple::send_if_not
send_if_not_pred< Predicate > send_if_not(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:107
ripple::jtVALIDATION_ut
@ jtVALIDATION_ut
Definition: Job.h:43
ripple::INVALID
@ INVALID
Definition: Transaction.h:46
ripple::base_uint::parseHex
bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:384
ripple::OverlayImpl::remove
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
Definition: OverlayImpl.cpp:474
ripple::squelch::IDLED
static constexpr seconds IDLED
Definition: SquelchCommon.h:35
ripple::base_uint::zero
void zero()
Definition: base_uint.h:449
std::vector::begin
T begin(T... args)
ripple::PeerFinder::Manager::config
virtual Config config()=0
Returns the configuration for the manager.
std
STL namespace.
ripple::Resource::Consumer::disconnect
bool disconnect()
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:117
beast::severities::kWarning
@ kWarning
Definition: Journal.h:37
ripple::sha512Half
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:216
beast::IP::Endpoint::from_string
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:46
ripple::OverlayImpl::activate
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
Definition: OverlayImpl.cpp:626
ripple::OverlayImpl::onPeerDeactivate
void onPeerDeactivate(Peer::id_t id)
Definition: OverlayImpl.cpp:650
ripple::PeerImp::hasShard
bool hasShard(std::uint32_t shardIndex) const override
Definition: PeerImp.cpp:468
ripple::Tuning::readBufferBytes
constexpr std::size_t readBufferBytes
Size of buffer used to read from the socket.
Definition: overlay/impl/Tuning.h:60
ripple::Overlay::Setup::networkID
boost::optional< std::uint32_t > networkID
Definition: Overlay.h:78
ripple::Resource::Gossip::Item::address
beast::IP::Endpoint address
Definition: Gossip.h:39
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:206
ripple::Resource::Consumer
An endpoint that consumes resources.
Definition: Consumer.h:33
ripple::Resource::Charge
A consumption charge.
Definition: Charge.h:30
ripple::Resource::Gossip::Item::balance
int balance
Definition: Gossip.h:38
ripple::TimeKeeper::now
virtual time_point now() const override=0
Returns the estimate of wall time, in network time.
ripple::OverlayImpl::lastLink
void lastLink(std::uint32_t id)
Called when the last link from a peer chain is received.
Definition: OverlayImpl.cpp:829
ripple::PeerImp::maxLedger_
LedgerIndex maxLedger_
Definition: PeerImp.h:107
ripple::PeerImp::run
void run()
Definition: PeerImp.cpp:131
ripple::Config::COMPRESSION
bool COMPRESSION
Definition: Config.h:182
ripple::LoadFeeTrack::setClusterFee
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:111
ripple::PeerImp::checkTracking
void checkTracking(std::uint32_t validationSeq)
Check if the peer is tracking.
Definition: PeerImp.cpp:1795
ripple::PeerImp::large_sendq_
int large_sendq_
Definition: PeerImp.h:159
beast::severities::kDebug
@ kDebug
Definition: Journal.h:35
ripple::PeerImp::domain
std::string domain() const
Definition: PeerImp.cpp:802
std::string::empty
T empty(T... args)
ripple::squelch::WAIT_ON_BOOTUP
static constexpr minutes WAIT_ON_BOOTUP
Definition: SquelchCommon.h:46
ripple::Resource::feeLightPeer
const Charge feeLightPeer
ripple::jtPROPOSAL_ut
@ jtPROPOSAL_ut
Definition: Job.h:46
ripple::TokenType::NodePublic
@ NodePublic
ripple::PeerImp::last_status_
protocol::TMStatusChange last_status_
Definition: PeerImp.h:149
ripple::PeerImp::setPublisherListSequence
void setPublisherListSequence(PublicKey const &pubKey, std::size_t const seq) override
Definition: PeerImp.h:345
ripple::RCLCxPeerPos::suppressionID
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:88
ripple::PeerImp::supportsFeature
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:429
ripple::OverlayImpl::findPeerByPublicKey
std::shared_ptr< Peer > findPeerByPublicKey(PublicKey const &pubKey) override
Returns the peer with the matching public key, or null.
Definition: OverlayImpl.cpp:1200
mutex
std::stringstream::str
T str(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
std::size_t
ripple::PeerImp::json
Json::Value json() override
Definition: PeerImp.cpp:311
ripple::Cluster::for_each
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:84
ripple::PeerImp::compressionEnabled_
Compressed compressionEnabled_
Definition: PeerImp.h:168
ripple::ProtocolFeature::ValidatorListPropagation
@ ValidatorListPropagation
beast::IP::Endpoint
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
ripple::OverlayImpl::incPeerDisconnect
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:345
ripple::strHex
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:45
std::vector::end
T end(T... args)
ripple::PeerFinder::Manager::on_failure
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
ripple::Job::getType
JobType getType() const
Definition: Job.cpp:52
ripple::PeerImp::makePrefix
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:627
ripple::PeerImp::usage_
Resource::Consumer usage_
Definition: PeerImp.h:150
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::setw
T setw(T... args)
numeric
ripple::OverlayImpl
Definition: OverlayImpl.h:57
beast::IP::Endpoint::from_string_checked
static boost::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:35
std::max
T max(T... args)
beast::IP::Endpoint::at_port
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:70
ripple::ValidatorList::trusted
bool trusted(PublicKey const &identity) const
Returns true if public key is trusted.
Definition: ValidatorList.cpp:561
ripple::OverlayImpl::findPeerByShortID
std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id) const override
Returns the peer with the matching short id, or null.
Definition: OverlayImpl.cpp:1188
ripple::Serializer::getLength
int getLength() const
Definition: Serializer.h:197
ripple::OverlayImpl::reportTraffic
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
Definition: OverlayImpl.cpp:735
ripple::sfLastLedgerSequence
const SF_UINT32 sfLastLedgerSequence
ripple::JobQueue::makeLoadEvent
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:181
ripple::PeerImp::getPeerShardInfo
boost::optional< hash_map< PublicKey, ShardInfo > > getPeerShardInfo() const
Return any known shard info from this peer and its sub peers.
Definition: PeerImp.cpp:570
ripple::PeerImp::shardInfoMutex_
std::mutex shardInfoMutex_
Definition: PeerImp.h:165
ripple::Resource::Consumer::charge
Disposition charge(Charge const &fee)
Apply a load charge to the consumer.
Definition: Consumer.cpp:99
ripple::PeerImp::overlay_
OverlayImpl & overlay_
Definition: PeerImp.h:90
ripple::SHAMap::getNodeFat
bool getNodeFat(SHAMapNodeID const &wanted, std::vector< SHAMapNodeID > &nodeIDs, std::vector< Blob > &rawNodes, bool fatLeaves, std::uint32_t depth) const
Definition: SHAMapSync.cpp:426
std::unique_ptr< stream_type >
ripple::PeerImp::tracking_
std::atomic< Tracking > tracking_
Definition: PeerImp.h:96
ripple::PeerImp::nameMutex_
boost::shared_mutex nameMutex_
Definition: PeerImp.h:102
ripple::PeerImp::cancelTimer
void cancelTimer()
Definition: PeerImp.cpp:618
ripple::invokeProtocolMessage
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
Definition: ProtocolMessage.h:270
ripple::PeerImp::fee_
Resource::Charge fee_
Definition: PeerImp.h:151
ripple::stringIsUint256Sized
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:125
ripple::PeerImp::stop
void stop() override
Definition: PeerImp.cpp:193
ripple::Application::getHashRouter
virtual HashRouter & getHashRouter()=0
ripple::PeerImp::Tracking::converged
@ converged
ripple::PeerImp::id_
const id_t id_
Definition: PeerImp.h:73
ripple::OverlayImpl::for_each
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:260
std::ref
T ref(T... args)
ripple::RCLCxPeerPos::checkSign
bool checkSign() const
Verify the signing hash of the proposal.
Definition: RCLCxPeerPos.cpp:55
std::exception::what
T what(T... args)
std::shared_lock
STL class.
ripple::PeerImp::fail
void fail(std::string const &reason)
Definition: PeerImp.cpp:528
ripple::PeerImp::cluster
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:297
ripple::PeerImp::p_journal_
const beast::Journal p_journal_
Definition: PeerImp.h:77
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::Config::MAX_UNKNOWN_TIME
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:209
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:43
ripple::Config::MAX_DIVERGED_TIME
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:212
ripple::PeerImp::ShardInfo::shardIndexes
RangeSet< std::uint32_t > shardIndexes
Definition: PeerImp.h:57
ripple::jtLEDGER_REQ
@ jtLEDGER_REQ
Definition: Job.h:45
ripple::PeerImp::onReadMessage
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:850
ripple::ConsensusProposal< NodeID, uint256, uint256 >
std::chrono::steady_clock::now
T now(T... args)