rippled
PeerImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/overlay/impl/PeerImp.h>
21 #include <ripple/overlay/impl/Tuning.h>
22 #include <ripple/app/consensus/RCLValidations.h>
23 #include <ripple/app/ledger/InboundLedgers.h>
24 #include <ripple/app/ledger/LedgerMaster.h>
25 #include <ripple/app/ledger/InboundTransactions.h>
26 #include <ripple/app/misc/HashRouter.h>
27 #include <ripple/app/misc/LoadFeeTrack.h>
28 #include <ripple/app/misc/NetworkOPs.h>
29 #include <ripple/app/misc/Transaction.h>
30 #include <ripple/app/misc/ValidatorList.h>
31 #include <ripple/app/tx/apply.h>
32 #include <ripple/basics/base64.h>
33 #include <ripple/basics/random.h>
34 #include <ripple/basics/safe_cast.h>
35 #include <ripple/basics/UptimeClock.h>
36 #include <ripple/beast/core/LexicalCast.h>
37 #include <ripple/beast/core/SemanticVersion.h>
38 #include <ripple/nodestore/DatabaseShard.h>
39 #include <ripple/overlay/Cluster.h>
40 #include <ripple/overlay/predicates.h>
41 #include <ripple/protocol/digest.h>
42 
43 #include <boost/algorithm/clamp.hpp>
44 #include <boost/algorithm/string/predicate.hpp>
45 #include <boost/algorithm/string.hpp>
46 #include <boost/beast/core/ostream.hpp>
47 
48 #include <algorithm>
49 #include <memory>
50 #include <sstream>
51 #include <numeric>
52 
53 using namespace std::chrono_literals;
54 
55 namespace ripple {
56 
57 PeerImp::PeerImp (Application& app, id_t id,
59  PublicKey const& publicKey,
60  ProtocolVersion protocol, Resource::Consumer consumer,
61  std::unique_ptr<stream_type>&& stream_ptr,
62  OverlayImpl& overlay)
63  : Child (overlay)
64  , app_ (app)
65  , id_(id)
66  , sink_(app_.journal("Peer"), makePrefix(id))
67  , p_sink_(app_.journal("Protocol"), makePrefix(id))
68  , journal_ (sink_)
69  , p_journal_(p_sink_)
70  , stream_ptr_(std::move(stream_ptr))
71  , socket_ (stream_ptr_->next_layer().socket())
72  , stream_ (*stream_ptr_)
73  , strand_ (socket_.get_executor())
74  , timer_ (waitable_timer{socket_.get_executor()})
75  , remote_address_ (slot->remote_endpoint())
76  , overlay_ (overlay)
77  , m_inbound (true)
78  , protocol_ (protocol)
79  , state_ (State::active)
80  , sanity_ (Sanity::unknown)
81  , insaneTime_ (clock_type::now())
82  , publicKey_(publicKey)
83  , creationTime_ (clock_type::now())
84  , usage_(consumer)
85  , fee_ (Resource::feeLightPeer)
86  , slot_ (slot)
87  , request_(std::move(request))
88  , headers_(request_)
89  , compressionEnabled_(headers_["X-Offer-Compression"] == "lz4" ? Compressed::On : Compressed::Off)
90 {
91 }
92 
94 {
95  const bool inCluster {cluster()};
96 
97  if (state_ == State::active)
101 
102  if (inCluster)
103  {
104  JLOG(journal_.warn()) << getName() << " left cluster";
105  }
106 }
107 
108 // Helper function to check for valid uint256 values in protobuf buffers
109 static
110 bool
112 {
113  return pBuffStr.size() == uint256::size();
114 }
115 
116 void
118 {
119  if(! strand_.running_in_this_thread())
120  return post(strand_, std::bind(&PeerImp::run, shared_from_this()));
121 
122  // We need to decipher
123  auto parseLedgerHash = [](std::string const& value) -> boost::optional<uint256>
124  {
125  uint256 ret;
126  if (ret.SetHexExact(value))
127  return { ret };
128 
129  auto const s = base64_decode(value);
130  if (s.size() != uint256::size())
131  return boost::none;
132  return uint256{ s };
133  };
134 
135  boost::optional<uint256> closed;
136  boost::optional<uint256> previous;
137 
138  if (auto const iter = headers_.find("Closed-Ledger"); iter != headers_.end())
139  {
140  closed = parseLedgerHash(iter->value().to_string());
141 
142  if (!closed)
143  fail("Malformed handshake data (1)");
144  }
145 
146  if (auto const iter = headers_.find("Previous-Ledger"); iter != headers_.end())
147  {
148  previous = parseLedgerHash(iter->value().to_string());
149 
150  if (!previous)
151  fail("Malformed handshake data (2)");
152  }
153 
154  if (previous && !closed)
155  fail("Malformed handshake data (3)");
156 
157  {
159  if (closed)
160  closedLedgerHash_ = *closed;
161  if (previous)
162  previousLedgerHash_ = *previous;
163  }
164 
165  if (m_inbound)
166  {
167  doAccept();
168  }
169  else
170  {
171  assert (state_ == State::active);
172  // XXX Set timer: connection is in grace period to be useful.
173  // XXX Set timer: connection idle (idle may vary depending on connection type.)
174  doProtocolStart();
175  }
176 
177  // Request shard info from peer
178  protocol::TMGetPeerShardInfo tmGPS;
179  tmGPS.set_hops(0);
180  send(std::make_shared<Message>(tmGPS, protocol::mtGET_PEER_SHARD_INFO));
181 
182  setTimer();
183 }
184 
185 void
187 {
188  if(! strand_.running_in_this_thread())
189  return post(strand_, std::bind(&PeerImp::stop, shared_from_this()));
190  if (socket_.is_open())
191  {
192  // The rationale for using different severity levels is that
193  // outbound connections are under our control and may be logged
194  // at a higher level, but inbound connections are more numerous and
195  // uncontrolled so to prevent log flooding the severity is reduced.
196  //
197  if(m_inbound)
198  {
199  JLOG(journal_.debug()) << "Stop";
200  }
201  else
202  {
203  JLOG(journal_.info()) << "Stop";
204  }
205  }
206  close();
207 }
208 
209 //------------------------------------------------------------------------------
210 
211 void
213 {
214  if (! strand_.running_in_this_thread())
215  return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
216  if(gracefulClose_)
217  return;
218  if(detaching_)
219  return;
220 
222  safe_cast<TrafficCount::category>(m->getCategory()),
223  false, static_cast<int>(m->getBuffer(compressionEnabled_).size()));
224 
225  auto sendq_size = send_queue_.size();
226 
227  if (sendq_size < Tuning::targetSendQueue)
228  {
229  // To detect a peer that does not read from their
230  // side of the connection, we expect a peer to have
231  // a small senq periodically
232  large_sendq_ = 0;
233  }
235  (sendq_size % Tuning::sendQueueLogFreq) == 0)
236  {
237  std::string const name {getName()};
238  JLOG (journal_.debug()) <<
239  (name.empty() ? remote_address_.to_string() : name) <<
240  " sendq: " << sendq_size;
241  }
242 
243  send_queue_.push(m);
244 
245  if(sendq_size != 0)
246  return;
247 
248  boost::asio::async_write(
249  stream_,
250  boost::asio::buffer(send_queue_.front()->getBuffer(compressionEnabled_)),
251  bind_executor(
252  strand_,
253  std::bind(
256  std::placeholders::_1,
257  std::placeholders::_2)));
258 }
259 
260 void
262 {
263  if ((usage_.charge(fee) == Resource::drop) &&
264  usage_.disconnect() && strand_.running_in_this_thread())
265  {
266  // Sever the connection
268  fail("charge: Resources");
269  }
270 }
271 
272 //------------------------------------------------------------------------------
273 
274 bool
276 {
277  auto const iter = headers_.find("Crawl");
278  if (iter == headers_.end())
279  return false;
280  return boost::iequals(iter->value(), "public");
281 }
282 
283 bool
285 {
286  return static_cast<bool>(app_.cluster().member(publicKey_));
287 }
288 
291 {
292  if (m_inbound)
293  return headers_["User-Agent"].to_string();
294  return headers_["Server"].to_string();
295 }
296 
299 {
301 
302  ret[jss::public_key] = toBase58 (
304  ret[jss::address] = remote_address_.to_string();
305 
306  if (m_inbound)
307  ret[jss::inbound] = true;
308 
309  if (cluster())
310  {
311  ret[jss::cluster] = true;
312 
313  std::string name {getName()};
314  if (!name.empty ())
315  ret[jss::name] = std::move(name);
316  }
317 
318  ret[jss::load] = usage_.balance ();
319 
320  {
321  auto const version = getVersion();
322  if (!version.empty())
323  ret[jss::version] = version;
324  }
325 
326  ret[jss::protocol] = to_string (protocol_);
327 
328  {
330  if (latency_)
331  ret[jss::latency] = static_cast<Json::UInt> (latency_->count());
332  }
333 
334  ret[jss::uptime] = static_cast<Json::UInt>(
335  std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
336 
337  std::uint32_t minSeq, maxSeq;
338  ledgerRange(minSeq, maxSeq);
339 
340  if ((minSeq != 0) || (maxSeq != 0))
341  ret[jss::complete_ledgers] = std::to_string(minSeq) +
342  " - " + std::to_string(maxSeq);
343 
344  switch (sanity_.load ())
345  {
346  case Sanity::insane:
347  ret[jss::sanity] = "insane";
348  break;
349 
350  case Sanity::unknown:
351  ret[jss::sanity] = "unknown";
352  break;
353 
354  case Sanity::sane:
355  // Nothing to do here
356  break;
357  }
358 
359  uint256 closedLedgerHash;
360  protocol::TMStatusChange last_status;
361  {
363  closedLedgerHash = closedLedgerHash_;
364  last_status = last_status_;
365  }
366 
367  if (closedLedgerHash != beast::zero)
368  ret[jss::ledger] = to_string (closedLedgerHash);
369 
370  if (last_status.has_newstatus ())
371  {
372  switch (last_status.newstatus ())
373  {
374  case protocol::nsCONNECTING:
375  ret[jss::status] = "connecting";
376  break;
377 
378  case protocol::nsCONNECTED:
379  ret[jss::status] = "connected";
380  break;
381 
382  case protocol::nsMONITORING:
383  ret[jss::status] = "monitoring";
384  break;
385 
386  case protocol::nsVALIDATING:
387  ret[jss::status] = "validating";
388  break;
389 
390  case protocol::nsSHUTTING:
391  ret[jss::status] = "shutting";
392  break;
393 
394  default:
395  JLOG(p_journal_.warn()) <<
396  "Unknown status: " << last_status.newstatus ();
397  }
398  }
399 
400  ret[jss::metrics] = Json::Value(Json::objectValue);
401  ret[jss::metrics][jss::total_bytes_recv] = std::to_string(metrics_.recv.total_bytes());
402  ret[jss::metrics][jss::total_bytes_sent] = std::to_string(metrics_.sent.total_bytes());
403  ret[jss::metrics][jss::avg_bps_recv] = std::to_string(metrics_.recv.average_bytes());
404  ret[jss::metrics][jss::avg_bps_sent] = std::to_string(metrics_.sent.average_bytes());
405 
406  return ret;
407 }
408 
409 bool
411 {
412  switch (f)
413  {
415  return protocol_ >= make_protocol(2, 1);
416  }
417  return false;
418 }
419 
420 //------------------------------------------------------------------------------
421 
422 bool
423 PeerImp::hasLedger (uint256 const& hash, std::uint32_t seq) const
424 {
425  {
427  if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
428  (sanity_.load() == Sanity::sane))
429  return true;
430  if (std::find(recentLedgers_.begin(),
431  recentLedgers_.end(), hash) != recentLedgers_.end())
432  return true;
433  }
434 
435  return seq >= app_.getNodeStore().earliestLedgerSeq() &&
437 }
438 
439 void
441  std::uint32_t& maxSeq) const
442 {
444 
445  minSeq = minLedger_;
446  maxSeq = maxLedger_;
447 }
448 
449 bool
451 {
453  auto const it {shardInfo_.find(publicKey_)};
454  if (it != shardInfo_.end())
455  return boost::icl::contains(it->second.shardIndexes, shardIndex);
456  return false;
457 }
458 
459 bool
460 PeerImp::hasTxSet (uint256 const& hash) const
461 {
463  return std::find (recentTxSets_.begin(),
464  recentTxSets_.end(), hash) != recentTxSets_.end();
465 }
466 
467 void
469 {
470  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
471  // guarded by recentLock_.
475 }
476 
477 bool
479 {
481  return (sanity_ != Sanity::insane) &&
482  (uMin >= minLedger_) &&
483  (uMax <= maxLedger_);
484 }
485 
486 //------------------------------------------------------------------------------
487 
488 void
490 {
491  assert(strand_.running_in_this_thread());
492  if (socket_.is_open())
493  {
494  detaching_ = true; // DEPRECATED
495  error_code ec;
496  timer_.cancel(ec);
497  socket_.close(ec);
499  if(m_inbound)
500  {
501  JLOG(journal_.debug()) << "Closed";
502  }
503  else
504  {
505  JLOG(journal_.info()) << "Closed";
506  }
507  }
508 }
509 
510 void
512 {
513  if(! strand_.running_in_this_thread())
514  return post(
515  strand_,
516  std::bind(
517  (void (Peer::*)(std::string const&)) & PeerImp::fail,
519  reason));
521  {
522  std::string const name {getName()};
523  JLOG (journal_.warn()) <<
524  (name.empty() ? remote_address_.to_string() : name) <<
525  " failed: " << reason;
526  }
527  close();
528 }
529 
530 void
532 {
533  assert(strand_.running_in_this_thread());
534  if (socket_.is_open())
535  {
536  JLOG(journal_.warn()) << name << " from " <<
538  " at " << remote_address_.to_string() <<
539  ": " << ec.message();
540  }
541  close();
542 }
543 
544 boost::optional<RangeSet<std::uint32_t>>
546 {
548  auto it{shardInfo_.find(publicKey_)};
549  if (it != shardInfo_.end())
550  return it->second.shardIndexes;
551  return boost::none;
552 }
553 
554 boost::optional<hash_map<PublicKey, PeerImp::ShardInfo>>
556 {
558  if (!shardInfo_.empty())
559  return shardInfo_;
560  return boost::none;
561 }
562 
563 void
565 {
566  assert(strand_.running_in_this_thread());
567  assert(socket_.is_open());
568  assert(! gracefulClose_);
569  gracefulClose_ = true;
570 #if 0
571  // Flush messages
572  while(send_queue_.size() > 1)
573  send_queue_.pop_back();
574 #endif
575  if (send_queue_.size() > 0)
576  return;
577  setTimer();
578  stream_.async_shutdown(bind_executor(
579  strand_,
580  std::bind(
581  &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
582 }
583 
584 void
586 {
587  error_code ec;
588  timer_.expires_from_now( std::chrono::seconds(
589  Tuning::timerSeconds), ec);
590 
591  if (ec)
592  {
593  JLOG(journal_.error()) << "setTimer: " << ec.message();
594  return;
595  }
596  timer_.async_wait(bind_executor(
597  strand_,
598  std::bind(
599  &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
600 }
601 
602 // convenience for ignoring the error code
603 void
605 {
606  error_code ec;
607  timer_.cancel(ec);
608 }
609 
610 //------------------------------------------------------------------------------
611 
614 {
616  ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
617  return ss.str();
618 }
619 
620 void
622 {
623  if (! socket_.is_open())
624  return;
625 
626  if (ec == boost::asio::error::operation_aborted)
627  return;
628 
629  if (ec)
630  {
631  // This should never happen
632  JLOG(journal_.error()) << "onTimer: " << ec.message();
633  return close();
634  }
635 
637  {
638  fail ("Large send queue");
639  return;
640  }
641 
642  bool failedNoPing {false};
643  boost::optional<std::uint32_t> pingSeq;
644  // Operations on lastPingSeq_, lastPingTime_, no_ping_, and latency_
645  // must be guarded by recentLock_.
646  {
648  if (no_ping_++ >= Tuning::noPing)
649  {
650  failedNoPing = true;
651  }
652  else if (!lastPingSeq_)
653  {
654  // Make the sequence unpredictable enough to prevent guessing
655  lastPingSeq_ = rand_int<std::uint32_t>();
657  pingSeq = lastPingSeq_;
658  }
659  else
660  {
661  // We have an outstanding ping, raise latency
662  auto const minLatency =
663  std::chrono::duration_cast<std::chrono::milliseconds>
665 
666  if (latency_ < minLatency)
667  latency_ = minLatency;
668  }
669  }
670 
671  if (failedNoPing)
672  {
673  fail ("No ping reply received");
674  return;
675  }
676 
677  if (pingSeq)
678  {
679  protocol::TMPing message;
680  message.set_type (protocol::TMPing::ptPING);
681  message.set_seq (*pingSeq);
682 
683  send (std::make_shared<Message> (message, protocol::mtPING));
684  }
685 
686  setTimer();
687 }
688 
689 void
691 {
692  cancelTimer();
693  // If we don't get eof then something went wrong
694  if (! ec)
695  {
696  JLOG(journal_.error()) << "onShutdown: expected error condition";
697  return close();
698  }
699  if (ec != boost::asio::error::eof)
700  return fail("onShutdown", ec);
701  close();
702 }
703 
704 //------------------------------------------------------------------------------
705 
707 {
708  assert(read_buffer_.size() == 0);
709 
710  JLOG(journal_.debug()) << "doAccept: " << remote_address_;
711 
712  auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
713 
714  // This shouldn't fail since we already computed
715  // the shared value successfully in OverlayImpl
716  if(! sharedValue)
717  return fail("makeSharedValue: Unexpected failure");
718 
719  // TODO Apply headers to connection state.
720 
721  boost::beast::ostream(write_buffer_) << makeResponse(
723  request_, remote_address_.address(), *sharedValue);
724 
725  JLOG(journal_.info()) << "Protocol: " <<
727  JLOG(journal_.info()) << "Public Key: " <<
729 
730  if (auto member = app_.cluster().member(publicKey_))
731  {
732  {
734  name_ = *member;
735  }
736  JLOG(journal_.info()) << "Cluster name: " << *member;
737  }
738 
740 
741  // XXX Set timer: connection is in grace period to be useful.
742  // XXX Set timer: connection idle (idle may vary depending on connection type.)
743 
745 }
746 
749  http_request_type const& req,
750  beast::IP::Address remote_ip,
751  uint256 const& sharedValue)
752 {
753  http_response_type resp;
754  resp.result(boost::beast::http::status::switching_protocols);
755  resp.version(req.version());
756  resp.insert("Connection", "Upgrade");
757  resp.insert("Upgrade", to_string(protocol_));
758  resp.insert("Connect-As", "Peer");
759  resp.insert("Server", BuildInfo::getFullVersionString());
760  resp.insert("Crawl", crawl ? "public" : "private");
761  if (req["X-Offer-Compression"] == "lz4" && app_.config().COMPRESSION)
762  resp.insert("X-Offer-Compression", "lz4");
763 
764  buildHandshake(resp, sharedValue, overlay_.setup().networkID,
765  overlay_.setup().public_ip, remote_ip, app_);
766 
767  return resp;
768 }
769 
770 // Called repeatedly to send the bytes in the response
771 void
773 {
774  if(! socket_.is_open())
775  return;
776  if(ec == boost::asio::error::operation_aborted)
777  return;
778  if(ec)
779  return fail("onWriteResponse", ec);
780  if(auto stream = journal_.trace())
781  {
782  if (bytes_transferred > 0)
783  stream <<
784  "onWriteResponse: " << bytes_transferred << " bytes";
785  else
786  stream << "onWriteResponse";
787  }
788 
789  write_buffer_.consume (bytes_transferred);
790  if (write_buffer_.size() == 0)
791  return doProtocolStart();
792 
793  stream_.async_write_some(
794  write_buffer_.data(),
795  bind_executor(
796  strand_,
797  std::bind(
800  std::placeholders::_1,
801  std::placeholders::_2)));
802 }
803 
806 {
808  return name_;
809 }
810 
811 //------------------------------------------------------------------------------
812 
813 // Protocol logic
814 
815 void
817 {
819 
820  // Send all the validator lists that have been loaded
822  {
824  [&](std::string const& manifest,
825  std::string const& blob, std::string const& signature,
826  std::uint32_t version,
827  PublicKey const& pubKey, std::size_t sequence,
828  uint256 const& hash)
829  {
830  protocol::TMValidatorList vl;
831 
832  vl.set_manifest(manifest);
833  vl.set_blob(blob);
834  vl.set_signature(signature);
835  vl.set_version(version);
836 
837  JLOG(p_journal_.debug()) << "Sending validator list for " <<
838  strHex(pubKey) << " with sequence " <<
839  sequence << " to " <<
840  remote_address_.to_string() << " (" << id_ << ")";
841  auto m = std::make_shared<Message>(vl, protocol::mtVALIDATORLIST);
842  send(m);
843  // Don't send it next time.
845  setPublisherListSequence(pubKey, sequence);
846  }
847  );
848  }
849 
850  protocol::TMManifests tm;
851 
853  [&tm](std::size_t s){tm.mutable_list()->Reserve(s);},
854  [&tm, &hr = app_.getHashRouter()](Manifest const& manifest)
855  {
856  auto const& s = manifest.serialized;
857  auto& tm_e = *tm.add_list();
858  tm_e.set_stobject(s.data(), s.size());
859  hr.addSuppression(manifest.hash());
860  });
861 
862  if (tm.list_size() > 0)
863  {
864  auto m = std::make_shared<Message>(tm, protocol::mtMANIFESTS);
865  send (m);
866  }
867 }
868 
869 // Called repeatedly with protocol message data
870 void
872 {
873  if(! socket_.is_open())
874  return;
875  if(ec == boost::asio::error::operation_aborted)
876  return;
877  if(ec == boost::asio::error::eof)
878  {
879  JLOG(journal_.info()) << "EOF";
880  return gracefulClose();
881  }
882  if(ec)
883  return fail("onReadMessage", ec);
884  if(auto stream = journal_.trace())
885  {
886  if (bytes_transferred > 0)
887  stream <<
888  "onReadMessage: " << bytes_transferred << " bytes";
889  else
890  stream << "onReadMessage";
891  }
892 
893  metrics_.recv.add_message(bytes_transferred);
894 
895  read_buffer_.commit (bytes_transferred);
896 
897  while (read_buffer_.size() > 0)
898  {
899  std::size_t bytes_consumed;
900  std::tie(bytes_consumed, ec) = invokeProtocolMessage(
901  read_buffer_.data(), *this);
902  if (ec)
903  return fail("onReadMessage", ec);
904  if (! socket_.is_open())
905  return;
906  if(gracefulClose_)
907  return;
908  if (bytes_consumed == 0)
909  break;
910  read_buffer_.consume (bytes_consumed);
911  }
912  // Timeout on writes only
913  stream_.async_read_some(
915  bind_executor(
916  strand_,
917  std::bind(
920  std::placeholders::_1,
921  std::placeholders::_2)));
922 }
923 
924 void
926 {
927  if(! socket_.is_open())
928  return;
929  if(ec == boost::asio::error::operation_aborted)
930  return;
931  if(ec)
932  return fail("onWriteMessage", ec);
933  if(auto stream = journal_.trace())
934  {
935  if (bytes_transferred > 0)
936  stream <<
937  "onWriteMessage: " << bytes_transferred << " bytes";
938  else
939  stream << "onWriteMessage";
940  }
941 
942  metrics_.sent.add_message(bytes_transferred);
943 
944  assert(! send_queue_.empty());
945  send_queue_.pop();
946  if (! send_queue_.empty())
947  {
948  // Timeout on writes only
949  return boost::asio::async_write(
950  stream_,
951  boost::asio::buffer(send_queue_.front()->getBuffer(compressionEnabled_)),
952  bind_executor(
953  strand_,
954  std::bind(
957  std::placeholders::_1,
958  std::placeholders::_2)));
959  }
960 
961  if (gracefulClose_)
962  {
963  return stream_.async_shutdown(bind_executor(
964  strand_,
965  std::bind(
968  std::placeholders::_1)));
969  }
970 }
971 
972 //------------------------------------------------------------------------------
973 //
974 // ProtocolHandler
975 //
976 //------------------------------------------------------------------------------
977 
978 void
980 {
981  // TODO
982 }
983 
984 void
987  std::size_t size)
988 {
990  jtPEER, protocolMessageName(type));
993  true, static_cast<int>(size));
994 }
995 
996 void
999 {
1000  load_event_.reset();
1001  charge (fee_);
1002 }
1003 
1004 void
1006 {
1007  // VFALCO What's the right job type?
1008  auto that = shared_from_this();
1009  app_.getJobQueue().addJob (
1010  jtVALIDATION_ut, "receiveManifests",
1011  [this, that, m] (Job&) { overlay_.onManifests(m, that); });
1012 }
1013 
1014 void
1016 {
1017  if (m->type () == protocol::TMPing::ptPING)
1018  {
1019  // We have received a ping request, reply with a pong
1021  m->set_type (protocol::TMPing::ptPONG);
1022  send (std::make_shared<Message> (*m, protocol::mtPING));
1023  return;
1024  }
1025 
1026  if (m->type () == protocol::TMPing::ptPONG)
1027  {
1028  // Operations on lastPingSeq_, lastPingTime_, no_ping_, and latency_
1029  // must be guarded by recentLock_.
1031 
1032  if (m->has_seq() && m->seq() == lastPingSeq_)
1033  {
1034  no_ping_ = 0;
1035 
1036  // Only reset the ping sequence if we actually received a
1037  // PONG with the correct cookie. That way, any peers which
1038  // respond with incorrect cookies will eventually time out.
1039  lastPingSeq_.reset();
1040 
1041  // Update latency estimate
1042  auto const estimate =
1043  std::chrono::duration_cast<std::chrono::milliseconds>
1045 
1046  // Calculate the cumulative moving average of the latency:
1047  if (latency_)
1048  latency_ = (*latency_ * 7 + estimate) / 8;
1049  else
1050  latency_ = estimate;
1051  }
1052 
1053  return;
1054  }
1055 }
1056 
1057 void
1059 {
1060  // VFALCO NOTE I think we should drop the peer immediately
1061  if (! cluster())
1062  {
1064  return;
1065  }
1066 
1067  for (int i = 0; i < m->clusternodes().size(); ++i)
1068  {
1069  protocol::TMClusterNode const& node = m->clusternodes(i);
1070 
1071  std::string name;
1072  if (node.has_nodename())
1073  name = node.nodename();
1074 
1075  auto const publicKey = parseBase58<PublicKey>(
1076  TokenType::NodePublic, node.publickey());
1077 
1078  // NIKB NOTE We should drop the peer immediately if
1079  // they send us a public key we can't parse
1080  if (publicKey)
1081  {
1082  auto const reportTime =
1084  NetClock::duration{node.reporttime()}};
1085 
1086  app_.cluster().update(
1087  *publicKey,
1088  name,
1089  node.nodeload(),
1090  reportTime);
1091  }
1092  }
1093 
1094  int loadSources = m->loadsources().size();
1095  if (loadSources != 0)
1096  {
1097  Resource::Gossip gossip;
1098  gossip.items.reserve (loadSources);
1099  for (int i = 0; i < m->loadsources().size(); ++i)
1100  {
1101  protocol::TMLoadSource const& node = m->loadsources (i);
1103  item.address = beast::IP::Endpoint::from_string (node.name());
1104  item.balance = node.cost();
1105  if (item.address != beast::IP::Endpoint())
1106  gossip.items.push_back(item);
1107  }
1109  }
1110 
1111  // Calculate the cluster fee:
1112  auto const thresh = app_.timeKeeper().now() - 90s;
1113  std::uint32_t clusterFee = 0;
1114 
1116  fees.reserve (app_.cluster().size());
1117 
1118  app_.cluster().for_each(
1119  [&fees,thresh](ClusterNode const& status)
1120  {
1121  if (status.getReportTime() >= thresh)
1122  fees.push_back (status.getLoadFee ());
1123  });
1124 
1125  if (!fees.empty())
1126  {
1127  auto const index = fees.size() / 2;
1129  fees.begin(),
1130  fees.begin () + index,
1131  fees.end());
1132  clusterFee = fees[index];
1133  }
1134 
1135  app_.getFeeTrack().setClusterFee(clusterFee);
1136 }
1137 
1138 void
1140 {
1141  // DEPRECATED
1142 }
1143 
1144 void
1146 {
1147  // DEPRECATED
1148 }
1149 
1150 void
1152 {
1153  auto badData = [&](std::string msg) {
1155  JLOG(p_journal_.warn()) << msg;
1156  };
1157 
1158  if (m->hops() > csHopLimit)
1159  return badData("Invalid hops: " + std::to_string(m->hops()));
1160  if (m->peerchain_size() > csHopLimit)
1161  return badData("Invalid peer chain");
1162 
1163  // Reply with shard info we may have
1164  if (auto shardStore = app_.getShardStore())
1165  {
1167  auto shards {shardStore->getCompleteShards()};
1168  if (!shards.empty())
1169  {
1170  protocol::TMPeerShardInfo reply;
1171  reply.set_shardindexes(shards);
1172 
1173  if (m->has_lastlink())
1174  reply.set_lastlink(true);
1175 
1176  if (m->peerchain_size() > 0)
1177  *reply.mutable_peerchain() = m->peerchain();
1178 
1179  send(std::make_shared<Message>(
1180  reply, protocol::mtPEER_SHARD_INFO));
1181 
1182  JLOG(p_journal_.trace()) <<
1183  "Sent shard indexes " << shards;
1184  }
1185  }
1186 
1187  // Relay request to peers
1188  if (m->hops() > 0)
1189  {
1191 
1192  m->set_hops(m->hops() - 1);
1193  if (m->hops() == 0)
1194  m->set_lastlink(true);
1195 
1196  m->add_peerchain()->set_nodepubkey(
1198 
1200  std::make_shared<Message>(*m, protocol::mtGET_PEER_SHARD_INFO),
1201  match_peer(this)));
1202  }
1203 }
1204 
1205 void
1207 {
1208  auto badData = [&](std::string msg) {
1210  JLOG(p_journal_.warn()) << msg;
1211  };
1212 
1213  if (m->shardindexes().empty())
1214  return badData("Missing shard indexes");
1215  if (m->peerchain_size() > csHopLimit)
1216  return badData("Invalid peer chain");
1217  if (m->has_nodepubkey() && !publicKeyType(makeSlice(m->nodepubkey())))
1218  return badData("Invalid public key");
1219 
1220  // Check if the message should be forwarded to another peer
1221  if (m->peerchain_size() > 0)
1222  {
1223  // Get the Public key of the last link in the peer chain
1224  auto const s {makeSlice(m->peerchain(
1225  m->peerchain_size() - 1).nodepubkey())};
1226  if (!publicKeyType(s))
1227  return badData("Invalid pubKey");
1228  PublicKey peerPubKey(s);
1229 
1230  if (auto peer = overlay_.findPeerByPublicKey(peerPubKey))
1231  {
1232  if (!m->has_nodepubkey())
1233  m->set_nodepubkey(publicKey_.data(), publicKey_.size());
1234 
1235  if (!m->has_endpoint())
1236  {
1237  // Check if peer will share IP publicly
1238  if (crawl())
1239  m->set_endpoint(remote_address_.address().to_string());
1240  else
1241  m->set_endpoint("0");
1242  }
1243 
1244  m->mutable_peerchain()->RemoveLast();
1245  peer->send(std::make_shared<Message>(
1246  *m, protocol::mtPEER_SHARD_INFO));
1247 
1248  JLOG(p_journal_.trace()) <<
1249  "Relayed TMPeerShardInfo to peer with IP " <<
1250  remote_address_.address().to_string();
1251  }
1252  else
1253  {
1254  // Peer is no longer available so the relay ends
1256  JLOG(p_journal_.info()) <<
1257  "Unable to route shard info";
1258  }
1259  return;
1260  }
1261 
1262  // Parse the shard indexes received in the shard info
1263  RangeSet<std::uint32_t> shardIndexes;
1264  {
1265  if (!from_string(shardIndexes, m->shardindexes()))
1266  return badData("Invalid shard indexes");
1267 
1268  std::uint32_t earliestShard;
1269  boost::optional<std::uint32_t> latestShard;
1270  {
1271  auto const curLedgerSeq {
1273  if (auto shardStore = app_.getShardStore())
1274  {
1275  earliestShard = shardStore->earliestShardIndex();
1276  if (curLedgerSeq >= shardStore->earliestLedgerSeq())
1277  latestShard = shardStore->seqToShardIndex(curLedgerSeq);
1278  }
1279  else
1280  {
1281  auto const earliestLedgerSeq {
1283  earliestShard = NodeStore::seqToShardIndex(earliestLedgerSeq);
1284  if (curLedgerSeq >= earliestLedgerSeq)
1285  latestShard = NodeStore::seqToShardIndex(curLedgerSeq);
1286  }
1287  }
1288 
1289  if (boost::icl::first(shardIndexes) < earliestShard ||
1290  (latestShard && boost::icl::last(shardIndexes) > latestShard))
1291  {
1292  return badData("Invalid shard indexes");
1293  }
1294  }
1295 
1296  // Get the IP of the node reporting the shard info
1297  beast::IP::Endpoint endpoint;
1298  if (m->has_endpoint())
1299  {
1300  if (m->endpoint() != "0")
1301  {
1302  auto result =
1304  if (!result)
1305  return badData("Invalid incoming endpoint: " + m->endpoint());
1306  endpoint = std::move(*result);
1307  }
1308  }
1309  else if (crawl()) // Check if peer will share IP publicly
1310  {
1311  endpoint = remote_address_;
1312  }
1313 
1314  // Get the Public key of the node reporting the shard info
1315  PublicKey publicKey;
1316  if (m->has_nodepubkey())
1317  publicKey = PublicKey(makeSlice(m->nodepubkey()));
1318  else
1319  publicKey = publicKey_;
1320 
1321  {
1323  auto it {shardInfo_.find(publicKey)};
1324  if (it != shardInfo_.end())
1325  {
1326  // Update the IP address for the node
1327  it->second.endpoint = std::move(endpoint);
1328 
1329  // Join the shard index range set
1330  it->second.shardIndexes += shardIndexes;
1331  }
1332  else
1333  {
1334  // Add a new node
1335  ShardInfo shardInfo;
1336  shardInfo.endpoint = std::move(endpoint);
1337  shardInfo.shardIndexes = std::move(shardIndexes);
1338  shardInfo_.emplace(publicKey, std::move(shardInfo));
1339  }
1340  }
1341 
1342  JLOG(p_journal_.trace()) <<
1343  "Consumed TMPeerShardInfo originating from public key " <<
1344  toBase58(TokenType::NodePublic, publicKey) <<
1345  " shard indexes " << m->shardindexes();
1346 
1347  if (m->has_lastlink())
1349 }
1350 
1351 void
1353 {
1354  if (sanity_.load() != Sanity::sane)
1355  {
1356  // Don't allow endpoints from peer not known sane
1357  return;
1358  }
1359 
1361 
1362  if (m->endpoints_v2().size())
1363  {
1364  endpoints.reserve (m->endpoints_v2().size());
1365  for (auto const& tm : m->endpoints_v2 ())
1366  {
1367  // these endpoint strings support ipv4 and ipv6
1368  auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1369  if (! result)
1370  {
1371  JLOG(p_journal_.error()) <<
1372  "failed to parse incoming endpoint: {" <<
1373  tm.endpoint() << "}";
1374  continue;
1375  }
1376 
1377  // If hops == 0, this Endpoint describes the peer we are connected
1378  // to -- in that case, we take the remote address seen on the
1379  // socket and store that in the IP::Endpoint. If this is the first
1380  // time, then we'll verify that their listener can receive incoming
1381  // by performing a connectivity test. if hops > 0, then we just
1382  // take the address/port we were given
1383 
1384  endpoints.emplace_back(
1385  tm.hops() > 0 ?
1386  *result :
1387  remote_address_.at_port(result->port()),
1388  tm.hops());
1389  JLOG(p_journal_.trace()) <<
1390  "got v2 EP: " << endpoints.back().address <<
1391  ", hops = " << endpoints.back().hops;
1392  }
1393  }
1394  else
1395  {
1396  // this branch can be removed once the entire network is operating with
1397  // endpoint_v2() items (strings)
1398  endpoints.reserve (m->endpoints().size());
1399  for (int i = 0; i < m->endpoints ().size (); ++i)
1400  {
1401  PeerFinder::Endpoint endpoint;
1402  protocol::TMEndpoint const& tm (m->endpoints(i));
1403 
1404  // hops
1405  endpoint.hops = tm.hops();
1406 
1407  // ipv4
1408  if (endpoint.hops > 0)
1409  {
1410  in_addr addr;
1411  addr.s_addr = tm.ipv4().ipv4();
1412  beast::IP::AddressV4 v4 (ntohl (addr.s_addr));
1413  endpoint.address = beast::IP::Endpoint (v4, tm.ipv4().ipv4port ());
1414  }
1415  else
1416  {
1417  // This Endpoint describes the peer we are connected to.
1418  // We will take the remote address seen on the socket and
1419  // store that in the IP::Endpoint. If this is the first time,
1420  // then we'll verify that their listener can receive incoming
1421  // by performing a connectivity test.
1422  //
1423  endpoint.address = remote_address_.at_port (
1424  tm.ipv4().ipv4port ());
1425  }
1426  endpoints.push_back (endpoint);
1427  JLOG(p_journal_.trace()) <<
1428  "got v1 EP: " << endpoints.back().address <<
1429  ", hops = " << endpoints.back().hops;
1430  }
1431  }
1432 
1433  if (! endpoints.empty())
1434  overlay_.peerFinder().on_endpoints (slot_, endpoints);
1435 }
1436 
1437 void
1439 {
1440 
1441  if (sanity_.load() == Sanity::insane)
1442  return;
1443 
1444  if (app_.getOPs().isNeedNetworkLedger ())
1445  {
1446  // If we've never been in synch, there's nothing we can do
1447  // with a transaction
1448  JLOG(p_journal_.debug()) << "Ignoring incoming transaction: " <<
1449  "Need network ledger";
1450  return;
1451  }
1452 
1453  SerialIter sit (makeSlice(m->rawtransaction()));
1454 
1455  try
1456  {
1457  auto stx = std::make_shared<STTx const>(sit);
1458  uint256 txID = stx->getTransactionID ();
1459 
1460  int flags;
1461  constexpr std::chrono::seconds tx_interval = 10s;
1462 
1463  if (! app_.getHashRouter ().shouldProcess (txID, id_, flags,
1464  tx_interval))
1465  {
1466  // we have seen this transaction recently
1467  if (flags & SF_BAD)
1468  {
1470  JLOG(p_journal_.debug()) << "Ignoring known bad tx " <<
1471  txID;
1472  }
1473 
1474  return;
1475  }
1476 
1477  JLOG(p_journal_.debug()) << "Got tx " << txID;
1478 
1479  bool checkSignature = true;
1480  if (cluster())
1481  {
1482  if (! m->has_deferred () || ! m->deferred ())
1483  {
1484  // Skip local checks if a server we trust
1485  // put the transaction in its open ledger
1486  flags |= SF_TRUSTED;
1487  }
1488 
1490  {
1491  // For now, be paranoid and have each validator
1492  // check each transaction, regardless of source
1493  checkSignature = false;
1494  }
1495  }
1496 
1497  // The maximum number of transactions to have in the job queue.
1498  constexpr int max_transactions = 250;
1499  if (app_.getJobQueue().getJobCount(jtTRANSACTION) > max_transactions)
1500  {
1502  JLOG(p_journal_.info()) << "Transaction queue is full";
1503  }
1504  else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
1505  {
1506  JLOG(p_journal_.trace()) << "No new transactions until synchronized";
1507  }
1508  else
1509  {
1510  app_.getJobQueue ().addJob (
1511  jtTRANSACTION, "recvTransaction->checkTransaction",
1513  flags, checkSignature, stx] (Job&) {
1514  if (auto peer = weak.lock())
1515  peer->checkTransaction(flags,
1516  checkSignature, stx);
1517  });
1518  }
1519  }
1520  catch (std::exception const&)
1521  {
1522  JLOG(p_journal_.warn()) << "Transaction invalid: " <<
1523  strHex(m->rawtransaction ());
1524  }
1525 }
1526 
1527 void
1529 {
1532  app_.getJobQueue().addJob (
1533  jtLEDGER_REQ, "recvGetLedger",
1534  [weak, m] (Job&) {
1535  if (auto peer = weak.lock())
1536  peer->getLedger(m);
1537  });
1538 }
1539 
1540 void
1542 {
1543  protocol::TMLedgerData& packet = *m;
1544 
1545  if (m->nodes ().size () <= 0)
1546  {
1547  JLOG(p_journal_.warn()) << "Ledger/TXset data with no nodes";
1548  return;
1549  }
1550 
1551  if (m->has_requestcookie ())
1552  {
1553  std::shared_ptr<Peer> target = overlay_.findPeerByShortID (m->requestcookie ());
1554  if (target)
1555  {
1556  m->clear_requestcookie ();
1557  target->send (std::make_shared<Message> (
1558  packet, protocol::mtLEDGER_DATA));
1559  }
1560  else
1561  {
1562  JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1564  }
1565  return;
1566  }
1567 
1568  if (! stringIsUint256Sized (m->ledgerhash()))
1569  {
1570  JLOG(p_journal_.warn()) << "TX candidate reply with invalid hash size";
1572  return;
1573  }
1574 
1575  uint256 const hash {m->ledgerhash()};
1576 
1577  if (m->type () == protocol::liTS_CANDIDATE)
1578  {
1579  // got data for a candidate transaction set
1581  auto& journal = p_journal_;
1583  jtTXN_DATA, "recvPeerData",
1584  [weak, hash, journal, m] (Job&) {
1585  if (auto peer = weak.lock())
1586  peer->peerTXData(hash, m, journal);
1587  });
1588  return;
1589  }
1590 
1592  hash, shared_from_this(), m))
1593  {
1594  JLOG(p_journal_.trace()) << "Got data for unwanted ledger";
1596  }
1597 }
1598 
1599 void
1601 {
1602  protocol::TMProposeSet& set = *m;
1603 
1604  if (set.has_hops() && ! cluster())
1605  set.set_hops(set.hops() + 1);
1606 
1607  auto const sig = makeSlice(set.signature());
1608 
1609  // Preliminary check for the validity of the signature: A DER encoded
1610  // signature can't be longer than 72 bytes.
1611  if ((boost::algorithm::clamp(sig.size(), 64, 72) != sig.size()) ||
1612  (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1613  {
1614  JLOG(p_journal_.warn()) << "Proposal: malformed";
1616  return;
1617  }
1618 
1619  if (! stringIsUint256Sized (set.currenttxhash()) ||
1620  ! stringIsUint256Sized (set.previousledger()))
1621  {
1622  JLOG(p_journal_.warn()) << "Proposal: malformed";
1624  return;
1625  }
1626 
1627  uint256 const proposeHash{set.currenttxhash()};
1628  uint256 const prevLedger{set.previousledger()};
1629 
1630  PublicKey const publicKey {makeSlice(set.nodepubkey())};
1631  NetClock::time_point const closeTime { NetClock::duration{set.closetime()} };
1632 
1633  uint256 const suppression = proposalUniqueId (
1634  proposeHash, prevLedger, set.proposeseq(),
1635  closeTime, publicKey.slice(), sig);
1636 
1637  if (! app_.getHashRouter ().addSuppressionPeer (suppression, id_))
1638  {
1639  JLOG(p_journal_.trace()) << "Proposal: duplicate";
1640  return;
1641  }
1642 
1643  auto const isTrusted = app_.validators().trusted (publicKey);
1644 
1645  if (!isTrusted)
1646  {
1647  if (sanity_.load() == Sanity::insane)
1648  {
1649  JLOG(p_journal_.debug()) << "Proposal: Dropping UNTRUSTED (insane)";
1650  return;
1651  }
1652 
1653  if (! cluster() && app_.getFeeTrack ().isLoadedLocal())
1654  {
1655  JLOG(p_journal_.debug()) << "Proposal: Dropping UNTRUSTED (load)";
1656  return;
1657  }
1658  }
1659 
1660  JLOG(p_journal_.trace()) <<
1661  "Proposal: " << (isTrusted ? "trusted" : "UNTRUSTED");
1662 
1663  auto proposal = RCLCxPeerPos(
1664  publicKey,
1665  sig,
1666  suppression,
1668  prevLedger,
1669  set.proposeseq(),
1670  proposeHash,
1671  closeTime,
1674 
1676  app_.getJobQueue ().addJob (
1677  isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut, "recvPropose->checkPropose",
1678  [weak, m, proposal] (Job& job) {
1679  if (auto peer = weak.lock())
1680  peer->checkPropose(job, m, proposal);
1681  });
1682 }
1683 
1684 void
1686 {
1687  JLOG(p_journal_.trace()) << "Status: Change";
1688 
1689  if (!m->has_networktime ())
1690  m->set_networktime (app_.timeKeeper().now().time_since_epoch().count());
1691 
1692  {
1694  if (!last_status_.has_newstatus () || m->has_newstatus ())
1695  last_status_ = *m;
1696  else
1697  {
1698  // preserve old status
1699  protocol::NodeStatus status = last_status_.newstatus ();
1700  last_status_ = *m;
1701  m->set_newstatus (status);
1702  }
1703  }
1704 
1705  if (m->newevent () == protocol::neLOST_SYNC)
1706  {
1707  bool outOfSync {false};
1708  {
1709  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1710  // guarded by recentLock_.
1712  if (!closedLedgerHash_.isZero ())
1713  {
1714  outOfSync = true;
1716  }
1718  }
1719  if (outOfSync)
1720  {
1721  JLOG(p_journal_.debug()) << "Status: Out of sync";
1722  }
1723  return;
1724  }
1725 
1726  {
1727  uint256 closedLedgerHash {};
1728  bool const peerChangedLedgers {
1729  m->has_ledgerhash() && stringIsUint256Sized (m->ledgerhash())};
1730 
1731  {
1732  // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1733  // guarded by recentLock_.
1735  if (peerChangedLedgers)
1736  {
1737  closedLedgerHash_ = m->ledgerhash();
1738  closedLedgerHash = closedLedgerHash_;
1739  addLedger (closedLedgerHash, sl);
1740  }
1741  else
1742  {
1744  }
1745 
1746  if (m->has_ledgerhashprevious() &&
1747  stringIsUint256Sized (m->ledgerhashprevious()))
1748  {
1749  previousLedgerHash_ = m->ledgerhashprevious();
1751  }
1752  else
1753  {
1755  }
1756  }
1757  if (peerChangedLedgers)
1758  {
1759  JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1760  }
1761  else
1762  {
1763  JLOG(p_journal_.debug()) << "Status: No ledger";
1764  }
1765  }
1766 
1767 
1768  if (m->has_firstseq () && m->has_lastseq())
1769  {
1771 
1772  minLedger_ = m->firstseq ();
1773  maxLedger_ = m->lastseq ();
1774 
1775  if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1776  minLedger_ = maxLedger_ = 0;
1777  }
1778 
1779  if (m->has_ledgerseq() &&
1781  {
1782  checkSanity (m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1783  }
1784 
1786  [=]() -> Json::Value
1787  {
1789 
1790  if (m->has_newstatus ())
1791  {
1792  switch (m->newstatus ())
1793  {
1794  case protocol::nsCONNECTING:
1795  j[jss::status] = "CONNECTING";
1796  break;
1797  case protocol::nsCONNECTED:
1798  j[jss::status] = "CONNECTED";
1799  break;
1800  case protocol::nsMONITORING:
1801  j[jss::status] = "MONITORING";
1802  break;
1803  case protocol::nsVALIDATING:
1804  j[jss::status] = "VALIDATING";
1805  break;
1806  case protocol::nsSHUTTING:
1807  j[jss::status] = "SHUTTING";
1808  break;
1809  }
1810  }
1811 
1812  if (m->has_newevent())
1813  {
1814  switch (m->newevent ())
1815  {
1816  case protocol::neCLOSING_LEDGER:
1817  j[jss::action] = "CLOSING_LEDGER";
1818  break;
1819  case protocol::neACCEPTED_LEDGER:
1820  j[jss::action] = "ACCEPTED_LEDGER";
1821  break;
1822  case protocol::neSWITCHED_LEDGER:
1823  j[jss::action] = "SWITCHED_LEDGER";
1824  break;
1825  case protocol::neLOST_SYNC:
1826  j[jss::action] = "LOST_SYNC";
1827  break;
1828  }
1829  }
1830 
1831  if (m->has_ledgerseq ())
1832  {
1833  j[jss::ledger_index] = m->ledgerseq();
1834  }
1835 
1836  if (m->has_ledgerhash ())
1837  {
1838  uint256 closedLedgerHash {};
1839  {
1840  std::lock_guard sl(recentLock_);
1841  closedLedgerHash = closedLedgerHash_;
1842  }
1843  j[jss::ledger_hash] = to_string (closedLedgerHash);
1844  }
1845 
1846  if (m->has_networktime ())
1847  {
1848  j[jss::date] = Json::UInt (m->networktime());
1849  }
1850 
1851  if (m->has_firstseq () && m->has_lastseq ())
1852  {
1853  j[jss::ledger_index_min] =
1854  Json::UInt (m->firstseq ());
1855  j[jss::ledger_index_max] =
1856  Json::UInt (m->lastseq ());
1857  }
1858 
1859  return j;
1860  });
1861 }
1862 
1863 void
1864 PeerImp::checkSanity (std::uint32_t validationSeq)
1865 {
1866  std::uint32_t serverSeq;
1867  {
1868  // Extract the seqeuence number of the highest
1869  // ledger this peer has
1870  std::lock_guard sl (recentLock_);
1871 
1872  serverSeq = maxLedger_;
1873  }
1874  if (serverSeq != 0)
1875  {
1876  // Compare the peer's ledger sequence to the
1877  // sequence of a recently-validated ledger
1878  checkSanity (serverSeq, validationSeq);
1879  }
1880 }
1881 
1882 void
1883 PeerImp::checkSanity (std::uint32_t seq1, std::uint32_t seq2)
1884 {
1885  int diff = std::max (seq1, seq2) - std::min (seq1, seq2);
1886 
1887  if (diff < Tuning::saneLedgerLimit)
1888  {
1889  // The peer's ledger sequence is close to the validation's
1890  sanity_ = Sanity::sane;
1891  }
1892 
1893  if ((diff > Tuning::insaneLedgerLimit) && (sanity_.load() != Sanity::insane))
1894  {
1895  // The peer's ledger sequence is way off the validation's
1896  std::lock_guard sl(recentLock_);
1897 
1898  sanity_ = Sanity::insane;
1899  insaneTime_ = clock_type::now();
1900  }
1901 }
1902 
1903 // Should this connection be rejected
1904 // and considered a failure
1905 void PeerImp::check ()
1906 {
1907  if (m_inbound || (sanity_.load() == Sanity::sane))
1908  return;
1909 
1910  clock_type::time_point insaneTime;
1911  {
1912  std::lock_guard sl(recentLock_);
1913 
1914  insaneTime = insaneTime_;
1915  }
1916 
1917  bool reject = false;
1918 
1919  if (sanity_.load() == Sanity::insane)
1920  reject = (insaneTime - clock_type::now())
1921  > std::chrono::seconds (Tuning::maxInsaneTime);
1922 
1923  if (sanity_.load() == Sanity::unknown)
1924  reject = (insaneTime - clock_type::now())
1925  > std::chrono::seconds (Tuning::maxUnknownTime);
1926 
1927  if (reject)
1928  {
1929  overlay_.peerFinder().on_failure (slot_);
1930  post(
1931  strand_,
1932  std::bind(
1933  (void (PeerImp::*)(std::string const&)) & PeerImp::fail,
1934  shared_from_this(),
1935  "Not useful"));
1936  }
1937 }
1938 
1939 void
1941 {
1942  if (! stringIsUint256Sized (m->hash()))
1943  {
1944  fee_ = Resource::feeInvalidRequest;
1945  return;
1946  }
1947 
1948  uint256 const hash {m->hash()};
1949 
1950  if (m->status () == protocol::tsHAVE)
1951  {
1952  std::lock_guard sl(recentLock_);
1953 
1954  if (std::find (recentTxSets_.begin (),
1955  recentTxSets_.end (), hash) != recentTxSets_.end ())
1956  {
1957  fee_ = Resource::feeUnwantedData;
1958  return;
1959  }
1960 
1961  if (recentTxSets_.size () == 128)
1962  recentTxSets_.pop_front ();
1963 
1964  recentTxSets_.push_back (hash);
1965  }
1966 }
1967 
1968 void
1969 PeerImp::onMessage (std::shared_ptr <protocol::TMValidatorList> const& m)
1970 {
1971  try
1972  {
1973  if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
1974  {
1975  JLOG(p_journal_.debug())
1976  << "ValidatorList: received validator list from peer using "
1977  << "protocol version " << to_string(protocol_)
1978  << " which shouldn't support this feature.";
1979  fee_ = Resource::feeUnwantedData;
1980  return;
1981  }
1982  auto const& manifest = m->manifest();
1983  auto const& blob = m->blob();
1984  auto const& signature = m->signature();
1985  auto const version = m->version();
1986  auto const hash = sha512Half(manifest, blob, signature, version);
1987 
1988  JLOG(p_journal_.debug()) << "Received validator list from " <<
1989  remote_address_.to_string() << " (" << id_ << ")";
1990 
1991  if (! app_.getHashRouter ().addSuppressionPeer(hash, id_))
1992  {
1993  JLOG(p_journal_.debug()) <<
1994  "ValidatorList: received duplicate validator list";
1995  // Charging this fee here won't hurt the peer in the normal
1996  // course of operation (ie. refresh every 5 minutes), but
1997  // will add up if the peer is misbehaving.
1998  fee_ = Resource::feeUnwantedData;
1999  return;
2000  }
2001 
2002  auto const applyResult = app_.validators().applyListAndBroadcast (
2003  manifest,
2004  blob,
2005  signature,
2006  version,
2007  remote_address_.to_string(),
2008  hash,
2009  app_.overlay(),
2010  app_.getHashRouter());
2011  auto const disp = applyResult.disposition;
2012 
2013  JLOG(p_journal_.debug()) << "Processed validator list from " <<
2014  (applyResult.publisherKey ? strHex(*applyResult.publisherKey) :
2015  "unknown or invalid publisher") << " from " <<
2016  remote_address_.to_string() << " (" << id_ << ") with result " <<
2017  to_string(disp);
2018 
2019  switch (disp)
2020  {
2021  case ListDisposition::accepted:
2022  JLOG (p_journal_.debug()) <<
2023  "Applied new validator list from peer " << remote_address_;
2024  {
2025  std::lock_guard<std::mutex> sl(recentLock_);
2026 
2027  assert(applyResult.sequence && applyResult.publisherKey);
2028  auto const& pubKey = *applyResult.publisherKey;
2029 #ifndef NDEBUG
2030  if (auto const iter = publisherListSequences_.find(pubKey);
2031  iter != publisherListSequences_.end())
2032  {
2033  assert(iter->second < *applyResult.sequence);
2034  }
2035 #endif
2036  publisherListSequences_[pubKey] = *applyResult.sequence;
2037  }
2038  break;
2039  case ListDisposition::same_sequence:
2040  JLOG (p_journal_.warn()) <<
2041  "Validator list with current sequence from peer " <<
2042  remote_address_;
2043  // Charging this fee here won't hurt the peer in the normal
2044  // course of operation (ie. refresh every 5 minutes), but
2045  // will add up if the peer is misbehaving.
2046  fee_ = Resource::feeUnwantedData;
2047 #ifndef NDEBUG
2048  {
2049  std::lock_guard<std::mutex> sl(recentLock_);
2050  assert(applyResult.sequence && applyResult.publisherKey);
2051  assert(publisherListSequences_[*applyResult.publisherKey]
2052  == *applyResult.sequence);
2053  }
2054 #endif // !NDEBUG
2055 
2056  break;
2057  case ListDisposition::stale:
2058  JLOG (p_journal_.warn()) <<
2059  "Stale validator list from peer " << remote_address_;
2060  // There are very few good reasons for a peer to send an
2061  // old list, particularly more than once.
2062  fee_ = Resource::feeBadData;
2063  break;
2064  case ListDisposition::untrusted:
2065  JLOG (p_journal_.warn()) <<
2066  "Untrusted validator list from peer " << remote_address_;
2067  // Charging this fee here won't hurt the peer in the normal
2068  // course of operation (ie. refresh every 5 minutes), but
2069  // will add up if the peer is misbehaving.
2070  fee_ = Resource::feeUnwantedData;
2071  break;
2072  case ListDisposition::invalid:
2073  JLOG (p_journal_.warn()) <<
2074  "Invalid validator list from peer " << remote_address_;
2075  // This shouldn't ever happen with a well-behaved peer
2076  fee_ = Resource::feeInvalidSignature;
2077  break;
2078  case ListDisposition::unsupported_version:
2079  JLOG (p_journal_.warn()) <<
2080  "Unsupported version validator list from peer " <<
2081  remote_address_;
2082  // During a version transition, this may be legitimate.
2083  // If it happens frequently, that's probably bad.
2084  fee_ = Resource::feeBadData;
2085  break;
2086  default:
2087  assert(false);
2088  }
2089  }
2090  catch (std::exception const& e)
2091  {
2092  JLOG(p_journal_.warn()) <<
2093  "ValidatorList: Exception, " << e.what() <<
2094  " from peer " << remote_address_;
2095  fee_ = Resource::feeBadData;
2096  }
2097 }
2098 
2099 void
2100 PeerImp::onMessage (std::shared_ptr <protocol::TMValidation> const& m)
2101 {
2102  auto const closeTime = app_.timeKeeper().closeTime();
2103 
2104  if (m->has_hops() && ! cluster())
2105  m->set_hops(m->hops() + 1);
2106 
2107  if (m->validation ().size () < 50)
2108  {
2109  JLOG(p_journal_.warn()) << "Validation: Too small";
2110  fee_ = Resource::feeInvalidRequest;
2111  return;
2112  }
2113 
2114  try
2115  {
2117  {
2118  SerialIter sit (makeSlice(m->validation()));
2119  val = std::make_shared<STValidation>(
2120  std::ref(sit),
2121  [this](PublicKey const& pk) {
2122  return calcNodeID(
2123  app_.validatorManifests().getMasterKey(pk));
2124  },
2125  false);
2126  val->setSeen (closeTime);
2127  }
2128 
2129  if (! isCurrent(app_.getValidations().parms(),
2130  app_.timeKeeper().closeTime(),
2131  val->getSignTime(),
2132  val->getSeenTime()))
2133  {
2134  JLOG(p_journal_.trace()) << "Validation: Not current";
2135  fee_ = Resource::feeUnwantedData;
2136  return;
2137  }
2138 
2139  if (! app_.getHashRouter ().addSuppressionPeer(
2140  sha512Half(makeSlice(m->validation())), id_))
2141  {
2142  JLOG(p_journal_.trace()) << "Validation: duplicate";
2143  return;
2144  }
2145 
2146  auto const isTrusted =
2147  app_.validators().trusted(val->getSignerPublic ());
2148 
2149  if (!isTrusted && (sanity_.load () == Sanity::insane))
2150  {
2151  JLOG(p_journal_.debug()) <<
2152  "Validation: dropping untrusted from insane peer";
2153  }
2154  if (isTrusted || cluster() ||
2155  ! app_.getFeeTrack ().isLoadedLocal ())
2156  {
2157  std::weak_ptr<PeerImp> weak = shared_from_this();
2158  app_.getJobQueue ().addJob (
2159  isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2160  "recvValidation->checkValidation",
2161  [weak, val, m] (Job&)
2162  {
2163  if (auto peer = weak.lock())
2164  peer->checkValidation(val, m);
2165  });
2166  }
2167  else
2168  {
2169  JLOG(p_journal_.debug()) <<
2170  "Validation: Dropping UNTRUSTED (load)";
2171  }
2172  }
2173  catch (std::exception const& e)
2174  {
2175  JLOG(p_journal_.warn()) <<
2176  "Validation: Exception, " << e.what();
2177  fee_ = Resource::feeInvalidRequest;
2178  }
2179 }
2180 
2181 void
2183 {
2184  protocol::TMGetObjectByHash& packet = *m;
2185 
2186  if (packet.query ())
2187  {
2188  // this is a query
2189  if (send_queue_.size() >= Tuning::dropSendQueue)
2190  {
2191  JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2192  return;
2193  }
2194 
2195  if (packet.type () == protocol::TMGetObjectByHash::otFETCH_PACK)
2196  {
2197  doFetchPack (m);
2198  return;
2199  }
2200 
2201  fee_ = Resource::feeMediumBurdenPeer;
2202 
2203  protocol::TMGetObjectByHash reply;
2204 
2205  reply.set_query (false);
2206 
2207  if (packet.has_seq())
2208  reply.set_seq(packet.seq());
2209 
2210  reply.set_type (packet.type ());
2211 
2212  if (packet.has_ledgerhash ())
2213  reply.set_ledgerhash (packet.ledgerhash ());
2214 
2215  // This is a very minimal implementation
2216  for (int i = 0; i < packet.objects_size (); ++i)
2217  {
2218  auto const& obj = packet.objects (i);
2219  if (obj.has_hash() && stringIsUint256Sized (obj.hash()))
2220  {
2221  uint256 const hash {obj.hash()};
2222  // VFALCO TODO Move this someplace more sensible so we dont
2223  // need to inject the NodeStore interfaces.
2224  std::uint32_t seq {obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2225  auto hObj {app_.getNodeStore().fetch (hash, seq)};
2226  if (!hObj)
2227  {
2228  if (auto shardStore = app_.getShardStore())
2229  {
2230  if (seq >= shardStore->earliestLedgerSeq())
2231  hObj = shardStore->fetch(hash, seq);
2232  }
2233  }
2234  if (hObj)
2235  {
2236  protocol::TMIndexedObject& newObj = *reply.add_objects ();
2237  newObj.set_hash (hash.begin (), hash.size ());
2238  newObj.set_data (&hObj->getData ().front (),
2239  hObj->getData ().size ());
2240 
2241  if (obj.has_nodeid ())
2242  newObj.set_index (obj.nodeid ());
2243  if (obj.has_ledgerseq())
2244  newObj.set_ledgerseq(obj.ledgerseq());
2245 
2246  // VFALCO NOTE "seq" in the message is obsolete
2247  }
2248  }
2249  }
2250 
2251  JLOG(p_journal_.trace()) <<
2252  "GetObj: " << reply.objects_size () <<
2253  " of " << packet.objects_size ();
2254  send (std::make_shared<Message> (reply, protocol::mtGET_OBJECTS));
2255  }
2256  else
2257  {
2258  // this is a reply
2259  std::uint32_t pLSeq = 0;
2260  bool pLDo = true;
2261  bool progress = false;
2262 
2263  for (int i = 0; i < packet.objects_size(); ++i)
2264  {
2265  const protocol::TMIndexedObject& obj = packet.objects (i);
2266 
2267  if (obj.has_hash() && stringIsUint256Sized (obj.hash()))
2268  {
2269  if (obj.has_ledgerseq())
2270  {
2271  if (obj.ledgerseq() != pLSeq)
2272  {
2273  if (pLDo && (pLSeq != 0))
2274  {
2275  JLOG(p_journal_.debug()) <<
2276  "GetObj: Full fetch pack for " << pLSeq;
2277  }
2278  pLSeq = obj.ledgerseq();
2279  pLDo = !app_.getLedgerMaster().haveLedger (pLSeq);
2280 
2281  if (!pLDo)
2282  {
2283  JLOG(p_journal_.debug()) <<
2284  "GetObj: Late fetch pack for " << pLSeq;
2285  }
2286  else
2287  progress = true;
2288  }
2289  }
2290 
2291  if (pLDo)
2292  {
2293  uint256 const hash {obj.hash()};
2294 
2296  std::make_shared< Blob > (
2297  obj.data().begin(), obj.data().end()));
2298 
2299  app_.getLedgerMaster().addFetchPack (hash, data);
2300  }
2301  }
2302  }
2303 
2304  if (pLDo && (pLSeq != 0))
2305  {
2306  JLOG(p_journal_.debug()) <<
2307  "GetObj: Partial fetch pack for " << pLSeq;
2308  }
2309  if (packet.type () == protocol::TMGetObjectByHash::otFETCH_PACK)
2310  app_.getLedgerMaster ().gotFetchPack (progress, pLSeq);
2311  }
2312 }
2313 
2314 //--------------------------------------------------------------------------
2315 
2316 void
2317 PeerImp::addLedger (uint256 const& hash,
2318  std::lock_guard<std::mutex> const& lockedRecentLock)
2319 {
2320  // lockedRecentLock is passed as a reminder that recentLock_ must be
2321  // locked by the caller.
2322  (void) lockedRecentLock;
2323 
2324  if (std::find (recentLedgers_.begin(),
2325  recentLedgers_.end(), hash) != recentLedgers_.end())
2326  return;
2327 
2328  // VFALCO TODO See if a sorted vector would be better.
2329 
2330  if (recentLedgers_.size () == 128)
2331  recentLedgers_.pop_front ();
2332 
2333  recentLedgers_.push_back (hash);
2334 }
2335 
2336 void
2337 PeerImp::doFetchPack (const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2338 {
2339  // VFALCO TODO Invert this dependency using an observer and shared state object.
2340  // Don't queue fetch pack jobs if we're under load or we already have
2341  // some queued.
2342  if (app_.getFeeTrack ().isLoadedLocal () ||
2343  (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2344  (app_.getJobQueue().getJobCount(jtPACK) > 10))
2345  {
2346  JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2347  return;
2348  }
2349 
2350  if (! stringIsUint256Sized (packet->ledgerhash()))
2351  {
2352  JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2353  fee_ = Resource::feeInvalidRequest;
2354  return;
2355  }
2356 
2357  fee_ = Resource::feeHighBurdenPeer;
2358 
2359  uint256 const hash {packet->ledgerhash()};
2360 
2361  std::weak_ptr<PeerImp> weak = shared_from_this();
2362  auto elapsed = UptimeClock::now();
2363  auto const pap = &app_;
2364  app_.getJobQueue ().addJob (
2365  jtPACK, "MakeFetchPack",
2366  [pap, weak, packet, hash, elapsed] (Job&) {
2367  pap->getLedgerMaster().makeFetchPack(
2368  weak, packet, hash, elapsed);
2369  });
2370 }
2371 
2372 void
2373 PeerImp::checkTransaction (int flags,
2374  bool checkSignature, std::shared_ptr<STTx const> const& stx)
2375 {
2376  // VFALCO TODO Rewrite to not use exceptions
2377  try
2378  {
2379  // Expired?
2380  if (stx->isFieldPresent(sfLastLedgerSequence) &&
2381  (stx->getFieldU32 (sfLastLedgerSequence) <
2382  app_.getLedgerMaster().getValidLedgerIndex()))
2383  {
2384  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2385  charge (Resource::feeUnwantedData);
2386  return;
2387  }
2388 
2389  if (checkSignature)
2390  {
2391  // Check the signature before handing off to the job queue.
2392  if (auto [valid, validReason] = checkValidity(
2393  app_.getHashRouter(),
2394  *stx,
2395  app_.getLedgerMaster().getValidatedRules(),
2396  app_.config());
2397  valid != Validity::Valid)
2398  {
2399  if (!validReason.empty())
2400  {
2401  JLOG(p_journal_.trace()) <<
2402  "Exception checking transaction: " <<
2403  validReason;
2404  }
2405 
2406  // Probably not necessary to set SF_BAD, but doesn't hurt.
2407  app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2408  charge(Resource::feeInvalidSignature);
2409  return;
2410  }
2411  }
2412  else
2413  {
2414  forceValidity(app_.getHashRouter(),
2415  stx->getTransactionID(), Validity::Valid);
2416  }
2417 
2418  std::string reason;
2419  auto tx = std::make_shared<Transaction> (
2420  stx, reason, app_);
2421 
2422  if (tx->getStatus () == INVALID)
2423  {
2424  if (! reason.empty ())
2425  {
2426  JLOG(p_journal_.trace()) <<
2427  "Exception checking transaction: " << reason;
2428  }
2429  app_.getHashRouter ().setFlags (stx->getTransactionID (), SF_BAD);
2430  charge (Resource::feeInvalidSignature);
2431  return;
2432  }
2433 
2434  bool const trusted (flags & SF_TRUSTED);
2435  app_.getOPs ().processTransaction (
2436  tx, trusted, false, NetworkOPs::FailHard::no);
2437  }
2438  catch (std::exception const&)
2439  {
2440  app_.getHashRouter ().setFlags (stx->getTransactionID (), SF_BAD);
2441  charge (Resource::feeBadData);
2442  }
2443 }
2444 
2445 // Called from our JobQueue
2446 void
2447 PeerImp::checkPropose (Job& job,
2449  RCLCxPeerPos peerPos)
2450 {
2451  bool isTrusted = (job.getType () == jtPROPOSAL_t);
2452 
2453  JLOG(p_journal_.trace()) <<
2454  "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2455 
2456  assert (packet);
2457  protocol::TMProposeSet& set = *packet;
2458 
2459  if (! cluster() && !peerPos.checkSign ())
2460  {
2461  JLOG(p_journal_.warn()) <<
2462  "Proposal fails sig check";
2463  charge (Resource::feeInvalidSignature);
2464  return;
2465  }
2466 
2467  if (isTrusted)
2468  {
2469  app_.getOPs ().processTrustedProposal (peerPos, packet);
2470  }
2471  else
2472  {
2473  if (cluster() ||
2474  (app_.getOPs().getConsensusLCL() == peerPos.proposal().prevLedger()))
2475  {
2476  // relay untrusted proposal
2477  JLOG(p_journal_.trace()) <<
2478  "relaying UNTRUSTED proposal";
2479  overlay_.relay(set, peerPos.suppressionID());
2480  }
2481  else
2482  {
2483  JLOG(p_journal_.debug()) <<
2484  "Not relaying UNTRUSTED proposal";
2485  }
2486  }
2487 }
2488 
2489 void
2490 PeerImp::checkValidation (STValidation::pointer val,
2492 {
2493  try
2494  {
2495  // VFALCO Which functions throw?
2496  if (! cluster() && !val->isValid())
2497  {
2498  JLOG(p_journal_.warn()) <<
2499  "Validation is invalid";
2500  charge (Resource::feeInvalidRequest);
2501  return;
2502  }
2503 
2504  if (app_.getOPs ().recvValidation(val, std::to_string(id())) ||
2505  cluster())
2506  {
2507  auto const suppression = sha512Half(
2508  makeSlice(val->getSerialized()));
2509  overlay_.relay(*packet, suppression);
2510  }
2511  }
2512  catch (std::exception const&)
2513  {
2514  JLOG(p_journal_.trace()) <<
2515  "Exception processing validation";
2516  charge (Resource::feeInvalidRequest);
2517  }
2518 }
2519 
2520 // Returns the set of peers that can help us get
2521 // the TX tree with the specified root hash.
2522 //
2523 static
2526  uint256 const& rootHash, PeerImp const* skip)
2527 {
2529  int retScore = 0;
2530 
2532  {
2533  if (p->hasTxSet(rootHash) && p.get() != skip)
2534  {
2535  auto score = p->getScore (true);
2536  if (! ret || (score > retScore))
2537  {
2538  ret = std::move(p);
2539  retScore = score;
2540  }
2541  }
2542  });
2543 
2544  return ret;
2545 }
2546 
2547 // Returns a random peer weighted by how likely to
2548 // have the ledger and how responsive it is.
2549 //
2550 static
2553  uint256 const& ledgerHash, LedgerIndex ledger,
2554  PeerImp const* skip)
2555 {
2557  int retScore = 0;
2558 
2560  {
2561  if (p->hasLedger(ledgerHash, ledger) &&
2562  p.get() != skip)
2563  {
2564  auto score = p->getScore (true);
2565  if (! ret || (score > retScore))
2566  {
2567  ret = std::move(p);
2568  retScore = score;
2569  }
2570  }
2571  });
2572 
2573  return ret;
2574 }
2575 
2576 // VFALCO NOTE This function is way too big and cumbersome.
2577 void
2578 PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
2579 {
2580  protocol::TMGetLedger& packet = *m;
2581  std::shared_ptr<SHAMap> shared;
2582  SHAMap const* map = nullptr;
2583  protocol::TMLedgerData reply;
2584  bool fatLeaves = true;
2586 
2587  if (packet.has_requestcookie ())
2588  reply.set_requestcookie (packet.requestcookie ());
2589 
2590  std::string logMe;
2591 
2592  if (packet.itype () == protocol::liTS_CANDIDATE)
2593  {
2594  // Request is for a transaction candidate set
2595  JLOG(p_journal_.trace()) << "GetLedger: Tx candidate set";
2596 
2597  if (!packet.has_ledgerhash() ||
2598  !stringIsUint256Sized (packet.ledgerhash()))
2599  {
2600  charge (Resource::feeInvalidRequest);
2601  JLOG(p_journal_.warn()) << "GetLedger: Tx candidate set invalid";
2602  return;
2603  }
2604 
2605  uint256 const txHash {packet.ledgerhash()};
2606 
2607  shared = app_.getInboundTransactions().getSet (txHash, false);
2608  map = shared.get();
2609 
2610  if (! map)
2611  {
2612  if (packet.has_querytype () && !packet.has_requestcookie ())
2613  {
2614  JLOG(p_journal_.debug()) << "GetLedger: Routing Tx set request";
2615 
2616  auto const v = getPeerWithTree(
2617  overlay_, txHash, this);
2618  if (! v)
2619  {
2620  JLOG(p_journal_.info()) << "GetLedger: Route TX set failed";
2621  return;
2622  }
2623 
2624  packet.set_requestcookie (id ());
2625  v->send (std::make_shared<Message> (
2626  packet, protocol::mtGET_LEDGER));
2627  return;
2628  }
2629 
2630  JLOG(p_journal_.debug()) << "GetLedger: Can't provide map ";
2631  charge (Resource::feeInvalidRequest);
2632  return;
2633  }
2634 
2635  reply.set_ledgerseq (0);
2636  reply.set_ledgerhash (txHash.begin (), txHash.size ());
2637  reply.set_type (protocol::liTS_CANDIDATE);
2638  fatLeaves = false; // We'll already have most transactions
2639  }
2640  else
2641  {
2642  if (send_queue_.size() >= Tuning::dropSendQueue)
2643  {
2644  JLOG(p_journal_.debug()) << "GetLedger: Large send queue";
2645  return;
2646  }
2647 
2648  if (app_.getFeeTrack().isLoadedLocal() && ! cluster())
2649  {
2650  JLOG(p_journal_.debug()) << "GetLedger: Too busy";
2651  return;
2652  }
2653 
2654  // Figure out what ledger they want
2655  JLOG(p_journal_.trace()) << "GetLedger: Received";
2656 
2657  if (packet.has_ledgerhash ())
2658  {
2659  if (! stringIsUint256Sized (packet.ledgerhash()))
2660  {
2661  charge (Resource::feeInvalidRequest);
2662  JLOG(p_journal_.warn()) << "GetLedger: Invalid request";
2663  return;
2664  }
2665 
2666  uint256 const ledgerhash {packet.ledgerhash()};
2667  logMe += "LedgerHash:";
2668  logMe += to_string (ledgerhash);
2669  ledger = app_.getLedgerMaster ().getLedgerByHash (ledgerhash);
2670 
2671  if (!ledger && packet.has_ledgerseq())
2672  {
2673  if (auto shardStore = app_.getShardStore())
2674  {
2675  auto seq = packet.ledgerseq();
2676  if (seq >= shardStore->earliestLedgerSeq())
2677  ledger = shardStore->fetchLedger(ledgerhash, seq);
2678  }
2679  }
2680 
2681  if (!ledger)
2682  {
2683  JLOG(p_journal_.trace()) <<
2684  "GetLedger: Don't have " << ledgerhash;
2685  }
2686 
2687  if (!ledger && (packet.has_querytype () &&
2688  !packet.has_requestcookie ()))
2689  {
2690  // We don't have the requested ledger
2691  // Search for a peer who might
2692  auto const v = getPeerWithLedger(overlay_, ledgerhash,
2693  packet.has_ledgerseq() ? packet.ledgerseq() : 0, this);
2694  if (!v)
2695  {
2696  JLOG(p_journal_.trace()) << "GetLedger: Cannot route";
2697  return;
2698  }
2699 
2700  packet.set_requestcookie (id ());
2701  v->send (std::make_shared<Message>(
2702  packet, protocol::mtGET_LEDGER));
2703  JLOG(p_journal_.debug()) << "GetLedger: Request routed";
2704  return;
2705  }
2706  }
2707  else if (packet.has_ledgerseq ())
2708  {
2709  if (packet.ledgerseq() <
2710  app_.getLedgerMaster().getEarliestFetch())
2711  {
2712  JLOG(p_journal_.debug()) << "GetLedger: Early ledger request";
2713  return;
2714  }
2715  ledger = app_.getLedgerMaster ().getLedgerBySeq (
2716  packet.ledgerseq ());
2717  if (! ledger)
2718  {
2719  JLOG(p_journal_.debug()) <<
2720  "GetLedger: Don't have " << packet.ledgerseq ();
2721  }
2722  }
2723  else if (packet.has_ltype () && (packet.ltype () == protocol::ltCLOSED) )
2724  {
2725  ledger = app_.getLedgerMaster ().getClosedLedger ();
2726  assert(! ledger->open());
2727  // VFALCO ledger should never be null!
2728  // VFALCO How can the closed ledger be open?
2729  #if 0
2730  if (ledger && ledger->info().open)
2731  ledger = app_.getLedgerMaster ().getLedgerBySeq (
2732  ledger->info().seq - 1);
2733  #endif
2734  }
2735  else
2736  {
2737  charge (Resource::feeInvalidRequest);
2738  JLOG(p_journal_.warn()) << "GetLedger: Unknown request";
2739  return;
2740  }
2741 
2742  if ((!ledger) || (packet.has_ledgerseq () && (
2743  packet.ledgerseq () != ledger->info().seq)))
2744  {
2745  charge (Resource::feeInvalidRequest);
2746 
2747  if (ledger)
2748  {
2749  JLOG(p_journal_.warn()) << "GetLedger: Invalid sequence";
2750  }
2751  return;
2752  }
2753 
2754  if (!packet.has_ledgerseq() && (ledger->info().seq <
2755  app_.getLedgerMaster().getEarliestFetch()))
2756  {
2757  JLOG(p_journal_.debug()) << "GetLedger: Early ledger request";
2758  return;
2759  }
2760 
2761  // Fill out the reply
2762  auto const lHash = ledger->info().hash;
2763  reply.set_ledgerhash (lHash.begin (), lHash.size ());
2764  reply.set_ledgerseq (ledger->info().seq);
2765  reply.set_type (packet.itype ());
2766 
2767  if (packet.itype () == protocol::liBASE)
2768  {
2769  // they want the ledger base data
2770  JLOG(p_journal_.trace()) << "GetLedger: Base data";
2771  Serializer nData (128);
2772  addRaw(ledger->info(), nData);
2773  reply.add_nodes ()->set_nodedata (
2774  nData.getDataPtr (), nData.getLength ());
2775 
2776  auto const& stateMap = ledger->stateMap ();
2777  if (stateMap.getHash() != beast::zero)
2778  {
2779  // return account state root node if possible
2780  Serializer rootNode (768);
2781  if (stateMap.getRootNode(rootNode, snfWIRE))
2782  {
2783  reply.add_nodes ()->set_nodedata (
2784  rootNode.getDataPtr (), rootNode.getLength ());
2785 
2786  if (ledger->info().txHash != beast::zero)
2787  {
2788  auto const& txMap = ledger->txMap ();
2789 
2790  if (txMap.getHash() != beast::zero)
2791  {
2792  rootNode.erase ();
2793 
2794  if (txMap.getRootNode (rootNode, snfWIRE))
2795  reply.add_nodes ()->set_nodedata (
2796  rootNode.getDataPtr (),
2797  rootNode.getLength ());
2798  }
2799  }
2800  }
2801  }
2802 
2803  auto oPacket = std::make_shared<Message> (
2804  reply, protocol::mtLEDGER_DATA);
2805  send (oPacket);
2806  return;
2807  }
2808 
2809  if (packet.itype () == protocol::liTX_NODE)
2810  {
2811  map = &ledger->txMap ();
2812  logMe += " TX:";
2813  logMe += to_string (map->getHash ());
2814  }
2815  else if (packet.itype () == protocol::liAS_NODE)
2816  {
2817  map = &ledger->stateMap ();
2818  logMe += " AS:";
2819  logMe += to_string (map->getHash ());
2820  }
2821  }
2822 
2823  if (!map || (packet.nodeids_size () == 0))
2824  {
2825  JLOG(p_journal_.warn()) <<
2826  "GetLedger: Can't find map or empty request";
2827  charge (Resource::feeInvalidRequest);
2828  return;
2829  }
2830 
2831  JLOG(p_journal_.trace()) << "GetLedger: " << logMe;
2832 
2833  auto const depth =
2834  packet.has_querydepth() ?
2835  (std::min(packet.querydepth(), 3u)) :
2836  (isHighLatency() ? 2 : 1);
2837 
2838  for (int i = 0;
2839  (i < packet.nodeids().size() &&
2840  (reply.nodes().size() < Tuning::maxReplyNodes)); ++i)
2841  {
2842  SHAMapNodeID mn (packet.nodeids (i).data (), packet.nodeids (i).size ());
2843 
2844  if (!mn.isValid ())
2845  {
2846  JLOG(p_journal_.warn()) << "GetLedger: Invalid node " << logMe;
2847  charge (Resource::feeInvalidRequest);
2848  return;
2849  }
2850 
2851  std::vector<SHAMapNodeID> nodeIDs;
2852  std::vector< Blob > rawNodes;
2853 
2854  try
2855  {
2856  if (map->getNodeFat(mn, nodeIDs, rawNodes, fatLeaves, depth))
2857  {
2858  assert (nodeIDs.size () == rawNodes.size ());
2859  JLOG(p_journal_.trace()) <<
2860  "GetLedger: getNodeFat got " << rawNodes.size () << " nodes";
2861  std::vector<SHAMapNodeID>::iterator nodeIDIterator;
2862  std::vector< Blob >::iterator rawNodeIterator;
2863 
2864  for (nodeIDIterator = nodeIDs.begin (),
2865  rawNodeIterator = rawNodes.begin ();
2866  nodeIDIterator != nodeIDs.end ();
2867  ++nodeIDIterator, ++rawNodeIterator)
2868  {
2869  Serializer nID (33);
2870  nodeIDIterator->addIDRaw (nID);
2871  protocol::TMLedgerNode* node = reply.add_nodes ();
2872  node->set_nodeid (nID.getDataPtr (), nID.getLength ());
2873  node->set_nodedata (&rawNodeIterator->front (),
2874  rawNodeIterator->size ());
2875  }
2876  }
2877  else
2878  {
2879  JLOG(p_journal_.warn()) <<
2880  "GetLedger: getNodeFat returns false";
2881  }
2882  }
2883  catch (std::exception&)
2884  {
2885  std::string info;
2886 
2887  if (packet.itype () == protocol::liTS_CANDIDATE)
2888  info = "TS candidate";
2889  else if (packet.itype () == protocol::liBASE)
2890  info = "Ledger base";
2891  else if (packet.itype () == protocol::liTX_NODE)
2892  info = "TX node";
2893  else if (packet.itype () == protocol::liAS_NODE)
2894  info = "AS node";
2895 
2896  if (!packet.has_ledgerhash ())
2897  info += ", no hash specified";
2898 
2899  JLOG(p_journal_.warn()) <<
2900  "getNodeFat( " << mn << ") throws exception: " << info;
2901  }
2902  }
2903 
2904  JLOG(p_journal_.info()) <<
2905  "Got request for " << packet.nodeids().size() << " nodes at depth " <<
2906  depth << ", return " << reply.nodes().size() << " nodes";
2907 
2908  auto oPacket = std::make_shared<Message> (
2909  reply, protocol::mtLEDGER_DATA);
2910  send (oPacket);
2911 }
2912 
2913 void
2914 PeerImp::peerTXData (uint256 const& hash,
2916  beast::Journal journal)
2917 {
2918  app_.getInboundTransactions().gotData (hash, shared_from_this(), pPacket);
2919 }
2920 
2921 int
2922 PeerImp::getScore (bool haveItem) const
2923 {
2924  // Random component of score, used to break ties and avoid
2925  // overloading the "best" peer
2926  static const int spRandomMax = 9999;
2927 
2928  // Score for being very likely to have the thing we are
2929  // look for; should be roughly spRandomMax
2930  static const int spHaveItem = 10000;
2931 
2932  // Score reduction for each millisecond of latency; should
2933  // be roughly spRandomMax divided by the maximum reasonable
2934  // latency
2935  static const int spLatency = 30;
2936 
2937  // Penalty for unknown latency; should be roughly spRandomMax
2938  static const int spNoLatency = 8000;
2939 
2940  int score = rand_int(spRandomMax);
2941 
2942  if (haveItem)
2943  score += spHaveItem;
2944 
2945  boost::optional<std::chrono::milliseconds> latency;
2946  {
2947  std::lock_guard sl (recentLock_);
2948  latency = latency_;
2949  }
2950 
2951  if (latency)
2952  score -= latency->count() * spLatency;
2953  else
2954  score -= spNoLatency;
2955 
2956  return score;
2957 }
2958 
2959 bool
2960 PeerImp::isHighLatency() const
2961 {
2962  std::lock_guard sl (recentLock_);
2963  return latency_ >= Tuning::peerHighLatency;
2964 }
2965 
2966 void
2967 PeerImp::Metrics::add_message(std::uint64_t bytes)
2968 {
2969  using namespace std::chrono_literals;
2970  std::unique_lock lock{ mutex_ };
2971 
2972  totalBytes_ += bytes;
2973  accumBytes_ += bytes;
2974  auto const timeElapsed = clock_type::now() - intervalStart_;
2975  auto const timeElapsedInSecs = std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
2976 
2977  if (timeElapsedInSecs >= 1s)
2978  {
2979  auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
2980  rollingAvg_.push_back(avgBytes);
2981 
2982  auto const totalBytes = std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
2983  rollingAvgBytes_ = totalBytes / rollingAvg_.size();
2984 
2985  intervalStart_ = clock_type::now();
2986  accumBytes_ = 0;
2987  }
2988 }
2989 
2991 PeerImp::Metrics::average_bytes() const {
2992  std::shared_lock lock{ mutex_ };
2993  return rollingAvgBytes_;
2994 }
2995 
2997 PeerImp::Metrics::total_bytes() const {
2998  std::shared_lock lock{ mutex_ };
2999  return totalBytes_;
3000 }
3001 
3002 
3003 } // ripple
ripple::PublicKey::data
std::uint8_t const * data() const noexcept
Definition: PublicKey.h:81
ripple::PeerImp::ledgerRange
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:440
ripple::PeerImp::uptime
clock_type::duration uptime() const
Definition: PeerImp.h:343
ripple::OverlayImpl::findPeerByShortID
std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id) override
Returns the peer with the matching short id, or null.
Definition: OverlayImpl.cpp:1144
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:85
ripple::ClusterNode
Definition: ClusterNode.h:30
ripple::jtTRANSACTION
@ jtTRANSACTION
Definition: Job.h:52
ripple::TrafficCount::categorize
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
Definition: TrafficCount.cpp:24
sstream
ripple::Tuning::sendQueueLogFreq
@ sendQueueLogFreq
How often to log send queue size.
Definition: overlay/impl/Tuning.h:74
ripple::PeerImp::recentLock_
std::mutex recentLock_
Definition: PeerImp.h:183
ripple::RCLCxPeerPos
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:42
ripple::RCLCxPeerPos::proposal
const Proposal & proposal() const
Definition: RCLCxPeerPos.h:94
std::weak_ptr::lock
T lock(T... args)
ripple::PeerImp::Sanity::unknown
@ unknown
ripple::PeerImp::stream_ptr_
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:110
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:199
ripple::PeerImp::onMessageBegin
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size)
Definition: PeerImp.cpp:985
ripple::csHopLimit
static constexpr std::uint32_t csHopLimit
Definition: ripple/overlay/Peer.h:36
ripple::Application::cluster
virtual Cluster & cluster()=0
ripple::PeerImp::socket_
socket_type & socket_
Definition: PeerImp.h:111
std::bind
T bind(T... args)
ripple::HashRouter::addSuppressionPeer
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:53
std::string
STL class.
ripple::Resource::feeMediumBurdenPeer
const Charge feeMediumBurdenPeer
std::shared_ptr
STL class.
ripple::PeerImp::onMessage
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1005
ripple::ManifestCache::getMasterKey
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: app/misc/impl/Manifest.cpp:301
ripple::SHAMap::getHash
SHAMapHash getHash() const
Definition: SHAMap.cpp:751
ripple::PeerImp::recentLedgers_
std::deque< uint256 > recentLedgers_
Definition: PeerImp.h:145
ripple::PeerImp::checkSanity
void checkSanity(std::uint32_t validationSeq)
Check if the peer is sane.
Definition: PeerImp.cpp:1864
std::exception
STL class.
ripple::PeerImp::hasTxSet
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:460
ripple::calcNodeID
NodeID calcNodeID(PublicKey const &pk)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:307
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:287
ripple::publicKeyType
boost::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:207
ripple::PeerImp::strand_
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:113
ripple::Tuning::targetSendQueue
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
Definition: overlay/impl/Tuning.h:71
ripple::PeerImp::request_
http_request_type request_
Definition: PeerImp.h:189
ripple::Resource::Gossip
Data format for exchanging consumption information across peers.
Definition: Gossip.h:29
ripple::Manifest
Definition: Manifest.h:78
ripple::PeerImp::~PeerImp
virtual ~PeerImp()
Definition: PeerImp.cpp:93
ripple::PeerImp::getShardIndexes
boost::optional< RangeSet< std::uint32_t > > getShardIndexes() const
Return a range set of known shard indexes from this peer.
Definition: PeerImp.cpp:545
ripple::Serializer::erase
void erase()
Definition: Serializer.h:211
beast::IP::Endpoint::to_string
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:54
std::pair
ripple::http_request_type
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:31
ripple::PeerImp::doAccept
void doAccept()
Definition: PeerImp.cpp:706
std::vector::reserve
T reserve(T... args)
ripple::HashRouter::shouldProcess
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:72
ripple::HashPrefix::manifest
@ manifest
Manifest.
ripple::PeerImp::metrics_
struct ripple::PeerImp::@14 metrics_
ripple::SHAMap::getNodeFat
bool getNodeFat(SHAMapNodeID node, std::vector< SHAMapNodeID > &nodeIDs, std::vector< Blob > &rawNode, bool fatLeaves, std::uint32_t depth) const
Definition: SHAMapSync.cpp:428
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:223
Json::UInt
unsigned int UInt
Definition: json_forwards.h:28
ripple::PeerImp::doProtocolStart
void doProtocolStart()
Definition: PeerImp.cpp:816
std::vector
STL class.
std::find
T find(T... args)
std::string::size
T size(T... args)
ripple::PeerImp::write_buffer_
boost::beast::multi_buffer write_buffer_
Definition: PeerImp.h:192
ripple::PublicKey::empty
bool empty() const noexcept
Definition: PublicKey.h:117
ripple::Overlay::foreach
std::enable_if_t<! std::is_void< typename UnaryFunc::return_type >::value, typename UnaryFunc::return_type > foreach(UnaryFunc f)
Visit every active peer and return a value The functor must:
Definition: Overlay.h:195
ripple::Tuning::sendqIntervals
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
Definition: overlay/impl/Tuning.h:62
ripple::make_protocol
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
Definition: ProtocolVersion.h:42
std::chrono::seconds
ripple::PeerImp::setTimer
void setTimer()
Definition: PeerImp.cpp:585
ripple::PeerImp::no_ping_
int no_ping_
Definition: PeerImp.h:196
ripple::OverlayImpl::incPeerDisconnectCharges
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:351
ripple::toBase58
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:29
ripple::SHAMapNodeID::isValid
bool isValid() const
Definition: SHAMapNodeID.h:103
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:121
beast::IP::Endpoint::address
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:67
ripple::PeerImp::getVersion
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:290
std::stringstream
STL class.
beast::Journal::warn
Stream warn() const
Definition: Journal.h:302
std::shared_ptr::get
T get(T... args)
std::lock_guard
STL class.
ripple::Tuning::noPing
@ noPing
How many timer intervals we can go without a ping reply.
Definition: overlay/impl/Tuning.h:65
ripple::SBoxCmp::diff
@ diff
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerImp::close
void close()
Definition: PeerImp.cpp:489
ripple::PeerImp::charge
void charge(Resource::Charge const &fee) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:261
ripple::match_peer
Select the specific peer.
Definition: predicates.h:112
ripple::PeerImp::onMessageUnknown
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:979
ripple::addRaw
void addRaw(LedgerInfo const &info, Serializer &s)
Definition: View.cpp:42
ripple::from_string
bool from_string(RangeSet< T > &rs, std::string const &s)
Convert the given styled string to a RangeSet.
Definition: RangeSet.h:126
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:156
std::setfill
T setfill(T... args)
std::vector::back
T back(T... args)
ripple::PeerImp::ShardInfo::endpoint
beast::IP::Endpoint endpoint
Definition: PeerImp.h:87
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:41
ripple::PeerImp::journal_
const beast::Journal journal_
Definition: PeerImp.h:108
ripple::SHAMapNodeID
Definition: SHAMapNodeID.h:33
ripple::PeerImp::send
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:212
ripple::Application::timeKeeper
virtual TimeKeeper & timeKeeper()=0
ripple::buildHandshake
void buildHandshake(boost::beast::http::fields &h, ripple::uint256 const &sharedValue, boost::optional< std::uint32_t > networkID, beast::IP::Address public_ip, beast::IP::Address remote_ip, Application &app)
Insert fields headers necessary for upgrading the link to the peer protocol.
Definition: Handshake.cpp:105
ripple::OverlayImpl::setup
Setup const & setup() const
Definition: OverlayImpl.h:166
ripple::ProtocolFeature
ProtocolFeature
Definition: ripple/overlay/Peer.h:38
ripple::PeerImp::onTimer
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:621
ripple::Cluster::update
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:59
ripple::PeerImp::lastPingTime_
clock_type::time_point lastPingTime_
Definition: PeerImp.h:150
ripple::PeerImp::Sanity::sane
@ sane
ripple::OverlayImpl::incJqTransOverflow
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:327
ripple::PeerImp
Definition: PeerImp.h:46
ripple::PeerFinder::Config::peerPrivate
bool peerPrivate
true if we want our IP address kept private.
Definition: PeerfinderManager.h:61
ripple::PeerImp::previousLedgerHash_
uint256 previousLedgerHash_
Definition: PeerImp.h:144
std::vector::front
T front(T... args)
algorithm
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::PeerImp::name_
std::string name_
Definition: PeerImp.h:136
ripple::PeerImp::sanity_
std::atomic< Sanity > sanity_
Definition: PeerImp.h:131
ripple::PeerFinder::Manager::on_endpoints
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
ripple::PeerImp::nameMutex_
std::shared_timed_mutex nameMutex_
Definition: PeerImp.h:137
ripple::forceValidity
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:87
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::Application::getFeeTrack
virtual LoadFeeTrack & getFeeTrack()=0
ripple::base_uint< 256 >::size
constexpr static std::size_t size()
Definition: base_uint.h:417
ripple::Tuning::timerSeconds
@ timerSeconds
How often we latency/sendq probe connections.
Definition: overlay/impl/Tuning.h:59
ripple::getPeerWithLedger
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:2552
ripple::PeerImp::publicKey_
const PublicKey publicKey_
Definition: PeerImp.h:135
ripple::protocolMessageName
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
Definition: ProtocolMessage.h:42
ripple::OverlayImpl::for_each
void for_each(UnaryFunc &&f)
Definition: OverlayImpl.h:245
ripple::PeerImp::read_buffer_
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:188
ripple::PeerImp::error_code
boost::system::error_code error_code
Definition: PeerImp.h:95
ripple::JobQueue::getJobCount
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:117
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::PeerImp::remote_address_
const beast::IP::Endpoint remote_address_
Definition: PeerImp.h:120
ripple::Cluster::member
boost::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:40
ripple::jtTXN_DATA
@ jtTXN_DATA
Definition: Job.h:56
ripple::PeerFinder::Manager::on_closed
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
ripple::OverlayImpl::peerFinder
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:148
ripple::getPeerWithTree
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:2525
ripple::base_uint< 256 >
ripple::sfLastLedgerSequence
const SF_U32 sfLastLedgerSequence(access, STI_UINT32, 27, "LastLedgerSequence")
Definition: SField.h:364
ripple::LoadFeeTrack::isLoadedLocal
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:113
ripple::PeerImp::addLedger
void addLedger(uint256 const &hash, std::lock_guard< std::mutex > const &lockedRecentLock)
Definition: PeerImp.cpp:2317
ripple::http_response_type
boost::beast::http::response< boost::beast::http::dynamic_body > http_response_type
Definition: Handoff.h:34
ripple::ConsensusProposal::prevLedger
LedgerID_t const & prevLedger() const
Get the prior accepted ledger this position is based on.
Definition: ConsensusProposal.h:104
ripple::PeerImp::recentTxSets_
std::deque< uint256 > recentTxSets_
Definition: PeerImp.h:146
ripple::Resource::feeInvalidSignature
const Charge feeInvalidSignature
ripple::OverlayImpl::onManifests
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Definition: OverlayImpl.cpp:670
ripple::Overlay::Setup::public_ip
beast::IP::Address public_ip
Definition: Overlay.h:79
std::enable_shared_from_this< PeerImp >::shared_from_this
T shared_from_this(T... args)
ripple::PeerImp::state_
State state_
Definition: PeerImp.h:130
ripple::NetworkOPs::isNeedNetworkLedger
virtual bool isNeedNetworkLedger()=0
ripple::Application::getValidationPublicKey
virtual const PublicKey & getValidationPublicKey() const =0
ripple::Resource::drop
@ drop
Definition: Disposition.h:36
ripple::checkValidity
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
ripple::jtPROPOSAL_t
@ jtPROPOSAL_t
Definition: Job.h:61
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:429
ripple::OverlayImpl::resourceManager
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:154
beast::IP::Address
boost::asio::ip::address Address
Definition: IPAddress.h:41
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:45
ripple::PeerImp::gracefulClose
void gracefulClose()
Definition: PeerImp.cpp:564
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
std::atomic::load
T load(T... args)
ripple::Resource::feeBadData
const Charge feeBadData
ripple::PublicKey::size
std::size_t size() const noexcept
Definition: PublicKey.h:87
ripple::PeerImp::shardInfo_
hash_map< PublicKey, ShardInfo > shardInfo_
Definition: PeerImp.h:203
ripple::Serializer::getDataPtr
const void * getDataPtr() const
Definition: Serializer.h:190
ripple::Resource::Manager::importConsumers
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
ripple::PeerImp::closedLedgerHash_
uint256 closedLedgerHash_
Definition: PeerImp.h:143
ripple::PeerImp::lastPingSeq_
boost::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:149
ripple::PeerImp::detaching_
bool detaching_
Definition: PeerImp.h:133
ripple::PeerImp::onMessageEnd
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:997
ripple::Application::config
virtual Config & config()=0
ripple::PeerImp::onWriteResponse
void onWriteResponse(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:772
ripple::isCurrent
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:145
beast::Journal::active
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:280
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t seq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:207
ripple::PeerImp::stream_
stream_type & stream_
Definition: PeerImp.h:112
ripple::PeerImp::onWriteMessage
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:925
std::unique_lock
STL class.
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:79
ripple::InfoSub::Source::pubPeerStatus
virtual void pubPeerStatus(std::function< Json::Value(void)> const &)=0
ripple::jtVALIDATION_t
@ jtVALIDATION_t
Definition: Job.h:58
ripple::PeerImp::hasRange
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:478
ripple::Resource::feeUnwantedData
const Charge feeUnwantedData
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::Resource::Gossip::items
std::vector< Item > items
Definition: Gossip.h:42
ripple::PeerImp::cycleStatus
void cycleStatus() override
Definition: PeerImp.cpp:468
ripple::set
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:271
ripple::PeerImp::app_
Application & app_
Definition: PeerImp.h:104
ripple::PeerImp::crawl
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:275
ripple::PeerImp::minLedger_
LedgerIndex minLedger_
Definition: PeerImp.h:141
ripple::makeSharedValue
boost::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:73
ripple::base64_decode
std::string base64_decode(std::string const &data)
Definition: base64.cpp:237
beast::Journal::error
Stream error() const
Definition: Journal.h:307
beast::Journal::info
Stream info() const
Definition: Journal.h:297
std::chrono::time_point
ripple::PeerImp::hasLedger
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:423
ripple::Resource::Consumer::balance
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:120
ripple::HashPrefix::proposal
@ proposal
proposal for signing
ripple::ManifestCache::for_each_manifest
void for_each_manifest(Function &&f) const
Invokes the callback once for every populated manifest.
Definition: Manifest.h:371
ripple::TimeKeeper::closeTime
virtual time_point closeTime() const =0
Returns the close time, in network time.
ripple::Job
Definition: Job.h:83
ripple::PeerImp::headers_
boost::beast::http::fields const & headers_
Definition: PeerImp.h:191
std::accumulate
T accumulate(T... args)
ripple::SerialIter
Definition: Serializer.h:311
ripple::PeerImp::Sanity::insane
@ insane
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:60
std::uint32_t
ripple::PeerImp::send_queue_
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:193
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:220
ripple::PeerImp::slot_
const std::shared_ptr< PeerFinder::Slot > slot_
Definition: PeerImp.h:187
ripple::PeerImp::load_event_
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:197
ripple::PeerImp::protocol_
ProtocolVersion protocol_
Definition: PeerImp.h:128
ripple::Cluster::size
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:51
ripple::PeerImp::State::active
@ active
ripple::base_uint::SetHexExact
bool SetHexExact(const char *psz)
Parse a hex string into a base_uint The string must contain exactly bytes * 2 hex characters and must...
Definition: base_uint.h:327
std::nth_element
T nth_element(T... args)
memory
ripple::PeerImp::m_inbound
const bool m_inbound
Definition: PeerImp.h:125
ripple::PeerImp::waitable_timer
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:101
ripple::jtPEER
@ jtPEER
Definition: Job.h:68
ripple::PeerImp::onShutdown
void onShutdown(error_code ec)
Definition: PeerImp.cpp:690
ripple::proposalUniqueId
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
Definition: RCLCxPeerPos.cpp:74
ripple::Application::validators
virtual ValidatorList & validators()=0
ripple::KeyType::secp256k1
@ secp256k1
std::weak_ptr
STL class.
ripple::PeerImp::timer_
waitable_timer timer_
Definition: PeerImp.h:114
std::min
T min(T... args)
ripple::Serializer
Definition: Serializer.h:43
ripple::BuildInfo::getFullVersionString
std::string const & getFullVersionString()
Full server version string.
Definition: BuildInfo.cpp:70
ripple::LedgerMaster::getValidatedLedgerAge
std::chrono::seconds getValidatedLedgerAge()
Definition: LedgerMaster.cpp:276
ripple::Resource::Gossip::Item
Describes a single consumer.
Definition: Gossip.h:34
ripple::PeerImp::ShardInfo
Definition: PeerImp.h:85
ripple::PeerImp::getName
std::string getName() const
Definition: PeerImp.cpp:805
ripple::jtPACK
@ jtPACK
Definition: Job.h:42
ripple::PeerImp::gracefulClose_
bool gracefulClose_
Definition: PeerImp.h:194
beast::IP::AddressV4
boost::asio::ip::address_v4 AddressV4
Definition: IPAddressV4.h:34
ripple::PeerImp::latency_
boost::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:148
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedgers::gotLedgerData
virtual bool gotLedgerData(LedgerHash const &ledgerHash, std::shared_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData >)=0
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::Application::validatorManifests
virtual ManifestCache & validatorManifests()=0
ripple::ValidatorList::for_each_available
void for_each_available(std::function< void(std::string const &manifest, std::string const &blob, std::string const &signature, std::uint32_t version, PublicKey const &pubKey, std::size_t sequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
Definition: ValidatorList.cpp:791
ripple::send_if_not
send_if_not_pred< Predicate > send_if_not(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:102
ripple::jtVALIDATION_ut
@ jtVALIDATION_ut
Definition: Job.h:44
ripple::INVALID
@ INVALID
Definition: Transaction.h:46
ripple::OverlayImpl::remove
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
Definition: OverlayImpl.cpp:466
ripple::base_uint::zero
void zero()
Definition: base_uint.h:431
std::vector::begin
T begin(T... args)
ripple::PeerFinder::Manager::config
virtual Config config()=0
Returns the configuration for the manager.
std
STL namespace.
ripple::Resource::Consumer::disconnect
bool disconnect()
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:114
beast::severities::kWarning
@ kWarning
Definition: Journal.h:39
ripple::sha512Half
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:268
beast::IP::Endpoint::from_string
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:47
ripple::OverlayImpl::activate
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
Definition: OverlayImpl.cpp:638
ripple::OverlayImpl::onPeerDeactivate
void onPeerDeactivate(Peer::id_t id)
Definition: OverlayImpl.cpp:663
ripple::PeerImp::hasShard
bool hasShard(std::uint32_t shardIndex) const override
Definition: PeerImp.cpp:450
ripple::Overlay::Setup::networkID
boost::optional< std::uint32_t > networkID
Definition: Overlay.h:82
ripple::Resource::Gossip::Item::address
beast::IP::Endpoint address
Definition: Gossip.h:39
ripple::invokeProtocolMessage
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler)
Calls the handler for up to one protocol message in the passed buffers.
Definition: ProtocolMessage.h:200
ripple::snfWIRE
@ snfWIRE
Definition: SHAMapTreeNode.h:38
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:217
ripple::Resource::Consumer
An endpoint that consumes resources.
Definition: Consumer.h:33
ripple::Resource::Charge
A consumption charge.
Definition: Charge.h:30
ripple::Resource::Gossip::Item::balance
int balance
Definition: Gossip.h:38
ripple::TimeKeeper::now
virtual time_point now() const override=0
Returns the estimate of wall time, in network time.
ripple::OverlayImpl::lastLink
void lastLink(std::uint32_t id)
Called when the last link from a peer chain is received.
Definition: OverlayImpl.cpp:839
ripple::PeerImp::maxLedger_
LedgerIndex maxLedger_
Definition: PeerImp.h:142
ripple::PeerImp::run
void run()
Definition: PeerImp.cpp:117
ripple::Config::COMPRESSION
bool COMPRESSION
Definition: Config.h:175
ripple::LoadFeeTrack::setClusterFee
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:104
ripple::PeerImp::large_sendq_
int large_sendq_
Definition: PeerImp.h:195
beast::severities::kDebug
@ kDebug
Definition: Journal.h:37
ripple::Tuning::readBufferBytes
@ readBufferBytes
Size of buffer used to read from the socket.
Definition: overlay/impl/Tuning.h:33
std::vector::empty
T empty(T... args)
ripple::Resource::feeLightPeer
const Charge feeLightPeer
ripple::jtPROPOSAL_ut
@ jtPROPOSAL_ut
Definition: Job.h:47
ripple::TokenType::NodePublic
@ NodePublic
ripple::PeerImp::last_status_
protocol::TMStatusChange last_status_
Definition: PeerImp.h:184
ripple::PeerImp::setPublisherListSequence
void setPublisherListSequence(PublicKey const &pubKey, std::size_t const seq) override
Definition: PeerImp.h:366
ripple::RCLCxPeerPos::suppressionID
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:88
ripple::PeerImp::supportsFeature
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:410
ripple::OverlayImpl::findPeerByPublicKey
std::shared_ptr< Peer > findPeerByPublicKey(PublicKey const &pubKey) override
Returns the peer with the matching public key, or null.
Definition: OverlayImpl.cpp:1156
std::stringstream::str
T str(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:292
std::size_t
ripple::PeerImp::makeResponse
http_response_type makeResponse(bool crawl, http_request_type const &req, beast::IP::Address remote_ip, uint256 const &sharedValue)
Definition: PeerImp.cpp:748
ripple::PeerImp::json
Json::Value json() override
Definition: PeerImp.cpp:298
ripple::Cluster::for_each
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:88
ripple::PeerImp::compressionEnabled_
Compressed compressionEnabled_
Definition: PeerImp.h:205
ripple::ProtocolFeature::ValidatorListPropagation
@ ValidatorListPropagation
beast::IP::Endpoint
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
ripple::OverlayImpl::incPeerDisconnect
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:339
ripple::strHex
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:70
std::vector::end
T end(T... args)
ripple::Job::getType
JobType getType() const
Definition: Job.cpp:54
ripple::PeerImp::makePrefix
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:613
ripple::PeerImp::usage_
Resource::Consumer usage_
Definition: PeerImp.h:185
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:70
std::setw
T setw(T... args)
numeric
ripple::OverlayImpl
Definition: OverlayImpl.h:57
beast::IP::Endpoint::from_string_checked
static boost::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:37
std::max
T max(T... args)
beast::IP::Endpoint::at_port
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:63
ripple::ValidatorList::trusted
bool trusted(PublicKey const &identity) const
Returns true if public key is trusted.
Definition: ValidatorList.cpp:583
ripple::PeerFinder::Endpoint::hops
int hops
Definition: PeerfinderManager.h:102
ripple::Serializer::getLength
int getLength() const
Definition: Serializer.h:198
ripple::OverlayImpl::reportTraffic
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
Definition: OverlayImpl.cpp:745
ripple::JobQueue::makeLoadEvent
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:181
ripple::PeerImp::getPeerShardInfo
boost::optional< hash_map< PublicKey, ShardInfo > > getPeerShardInfo() const
Return any known shard info from this peer and its sub peers.
Definition: PeerImp.cpp:555
ripple::PeerFinder::Endpoint
Describes a connectible peer address along with some metadata.
Definition: PeerfinderManager.h:96
ripple::PeerImp::shardInfoMutex_
std::mutex shardInfoMutex_
Definition: PeerImp.h:202
ripple::Resource::Consumer::charge
Disposition charge(Charge const &fee)
Apply a load charge to the consumer.
Definition: Consumer.cpp:98
ripple::PeerImp::overlay_
OverlayImpl & overlay_
Definition: PeerImp.h:124
std::unique_ptr< stream_type >
ripple::PeerImp::cancelTimer
void cancelTimer()
Definition: PeerImp.cpp:604
ripple::PeerImp::fee_
Resource::Charge fee_
Definition: PeerImp.h:186
ripple::stringIsUint256Sized
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:111
ripple::PeerFinder::Endpoint::address
beast::IP::Endpoint address
Definition: PeerfinderManager.h:103
ripple::PeerImp::stop
void stop() override
Definition: PeerImp.cpp:186
ripple::Application::getHashRouter
virtual HashRouter & getHashRouter()=0
ripple::PeerImp::id_
const id_t id_
Definition: PeerImp.h:105
std::ref
T ref(T... args)
ripple::RCLCxPeerPos::checkSign
bool checkSign() const
Verify the signing hash of the proposal.
Definition: RCLCxPeerPos.cpp:55
std::exception::what
T what(T... args)
std::shared_lock
STL class.
ripple::PeerImp::fail
void fail(std::string const &reason)
Definition: PeerImp.cpp:511
ripple::PeerImp::cluster
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:284
ripple::PeerImp::p_journal_
const beast::Journal p_journal_
Definition: PeerImp.h:109
Json::Value
Represents a JSON value.
Definition: json_value.h:141
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:43
ripple::PeerImp::ShardInfo::shardIndexes
RangeSet< std::uint32_t > shardIndexes
Definition: PeerImp.h:88
ripple::jtLEDGER_REQ
@ jtLEDGER_REQ
Definition: Job.h:46
ripple::PeerImp::onReadMessage
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:871
ripple::ConsensusProposal< NodeID, uint256, uint256 >
std::chrono::steady_clock::now
T now(T... args)