rippled
InboundLedger.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/AccountStateSF.h>
21 #include <ripple/app/ledger/InboundLedger.h>
22 #include <ripple/app/ledger/InboundLedgers.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/ledger/TransactionStateSF.h>
25 #include <ripple/app/main/Application.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/basics/Log.h>
28 #include <ripple/core/JobQueue.h>
29 #include <ripple/nodestore/DatabaseShard.h>
30 #include <ripple/overlay/Overlay.h>
31 #include <ripple/protocol/HashPrefix.h>
32 #include <ripple/protocol/jss.h>
33 #include <ripple/resource/Fees.h>
34 #include <ripple/shamap/SHAMapNodeID.h>
35 
36 #include <algorithm>
37 
38 namespace ripple {
39 
40 using namespace std::chrono_literals;
41 
42 enum {
43  // Number of peers to start with
45 
46  // Number of peers to add on a timeout
47  ,
49 
50  // how many timeouts before we give up
51  ,
53 
54  // how many timeouts before we get aggressive
55  ,
57 
58  // Number of nodes to find initially
59  ,
61 
62  // Number of nodes to request for a reply
63  ,
65 
66  // Number of nodes to request blindly
67  ,
69 };
70 
71 // millisecond for each ledger timeout
72 auto constexpr ledgerAcquireTimeout = 2500ms;
73 
75  Application& app,
76  uint256 const& hash,
77  std::uint32_t seq,
78  Reason reason,
79  clock_type& clock,
82  app,
83  hash,
85  {jtLEDGER_DATA, "InboundLedger", 5},
86  app.journal("InboundLedger"))
87  , m_clock(clock)
88  , mHaveHeader(false)
89  , mHaveState(false)
90  , mHaveTransactions(false)
91  , mSignaled(false)
92  , mByHash(true)
93  , mSeq(seq)
94  , mReason(reason)
95  , mReceiveDispatched(false)
96  , mPeerSet(std::move(peerSet))
97 {
98  JLOG(journal_.trace()) << "Acquiring ledger " << hash_;
99  touch();
100 }
101 
102 void
104 {
105  ScopedLockType sl(mtx_);
106  collectionLock.unlock();
107 
108  tryDB(app_.getNodeFamily().db());
109  if (failed_)
110  return;
111 
112  if (!complete_)
113  {
114  auto shardStore = app_.getShardStore();
115  if (mReason == Reason::SHARD)
116  {
117  if (!shardStore)
118  {
119  JLOG(journal_.error())
120  << "Acquiring shard with no shard store available";
121  failed_ = true;
122  return;
123  }
124 
125  mHaveHeader = false;
126  mHaveTransactions = false;
127  mHaveState = false;
128  mLedger.reset();
129 
130  tryDB(app_.getShardFamily()->db());
131  if (failed_)
132  return;
133  }
134  else if (shardStore && mSeq >= shardStore->earliestLedgerSeq())
135  {
136  if (auto l = shardStore->fetchLedger(hash_, mSeq))
137  {
138  mHaveHeader = true;
139  mHaveTransactions = true;
140  mHaveState = true;
141  complete_ = true;
142  mLedger = std::move(l);
143  }
144  }
145  }
146  if (!complete_)
147  {
148  addPeers();
149  queueJob(sl);
150  return;
151  }
152 
153  JLOG(journal_.debug()) << "Acquiring ledger we already have in "
154  << " local store. " << hash_;
155  mLedger->setImmutable(app_.config());
156 
158  return;
159 
161 
162  // Check if this could be a newer fully-validated ledger
163  if (mReason == Reason::CONSENSUS)
165 }
166 
169 {
170  auto const& peerIds = mPeerSet->getPeerIds();
171  return std::count_if(peerIds.begin(), peerIds.end(), [this](auto id) {
172  return (app_.overlay().findPeerByShortID(id) != nullptr);
173  });
174 }
175 
176 void
178 {
179  ScopedLockType sl(mtx_);
180 
181  // If we didn't know the sequence number, but now do, save it
182  if ((seq != 0) && (mSeq == 0))
183  mSeq = seq;
184 
185  // Prevent this from being swept
186  touch();
187 }
188 
189 bool
191 {
192  ScopedLockType sl(mtx_);
193  if (!isDone())
194  {
195  if (mLedger)
196  tryDB(mLedger->stateMap().family().db());
197  else if (mReason == Reason::SHARD)
198  tryDB(app_.getShardFamily()->db());
199  else
200  tryDB(app_.getNodeFamily().db());
201  if (failed_ || complete_)
202  {
203  done();
204  return true;
205  }
206  }
207  return false;
208 }
209 
211 {
212  // Save any received AS data not processed. It could be useful
213  // for populating a different ledger
214  for (auto& entry : mReceivedData)
215  {
216  if (entry.second->type() == protocol::liAS_NODE)
217  app_.getInboundLedgers().gotStaleData(entry.second);
218  }
219  if (!isDone())
220  {
221  JLOG(journal_.debug())
222  << "Acquire " << hash_ << " abort "
223  << ((timeouts_ == 0) ? std::string()
224  : (std::string("timeouts:") +
225  std::to_string(timeouts_) + " "))
226  << mStats.get();
227  }
228 }
229 
232  uint256 const& root,
233  SHAMap& map,
234  int max,
235  SHAMapSyncFilter* filter)
236 {
238 
239  if (!root.isZero())
240  {
241  if (map.getHash().isZero())
242  ret.push_back(root);
243  else
244  {
245  auto mn = map.getMissingNodes(max, filter);
246  ret.reserve(mn.size());
247  for (auto const& n : mn)
248  ret.push_back(n.second);
249  }
250  }
251 
252  return ret;
253 }
254 
257 {
258  return neededHashes(mLedger->info().txHash, mLedger->txMap(), max, filter);
259 }
260 
263 {
264  return neededHashes(
265  mLedger->info().accountHash, mLedger->stateMap(), max, filter);
266 }
267 
269 deserializeHeader(Slice data, bool hasHash)
270 {
271  SerialIter sit(data.data(), data.size());
272 
273  LedgerInfo info;
274 
275  info.seq = sit.get32();
276  info.drops = sit.get64();
277  info.parentHash = sit.get256();
278  info.txHash = sit.get256();
279  info.accountHash = sit.get256();
280  info.parentCloseTime =
284  info.closeFlags = sit.get8();
285 
286  if (hasHash)
287  info.hash = sit.get256();
288 
289  return info;
290 }
291 
292 LedgerInfo
293 deserializePrefixedHeader(Slice data, bool hasHash)
294 {
295  return deserializeHeader(data + 4, hasHash);
296 }
297 
298 // See how much of the ledger data is stored locally
299 // Data found in a fetch pack will be stored
300 void
302 {
303  if (!mHaveHeader)
304  {
305  auto makeLedger = [&, this](Blob const& data) {
306  JLOG(journal_.trace()) << "Ledger header found in fetch pack";
307  mLedger = std::make_shared<Ledger>(
309  app_.config(),
311  : app_.getNodeFamily());
312  if (mLedger->info().hash != hash_ ||
313  (mSeq != 0 && mSeq != mLedger->info().seq))
314  {
315  // We know for a fact the ledger can never be acquired
316  JLOG(journal_.warn())
317  << "hash " << hash_ << " seq " << std::to_string(mSeq)
318  << " cannot be a ledger";
319  mLedger.reset();
320  failed_ = true;
321  }
322  };
323 
324  // Try to fetch the ledger header from the DB
325  if (auto nodeObject = srcDB.fetchNodeObject(hash_, mSeq))
326  {
327  JLOG(journal_.trace()) << "Ledger header found in local store";
328 
329  makeLedger(nodeObject->getData());
330  if (failed_)
331  return;
332 
333  // Store the ledger header if the source and destination differ
334  auto& dstDB{mLedger->stateMap().family().db()};
335  if (std::addressof(dstDB) != std::addressof(srcDB))
336  {
337  Blob blob{nodeObject->getData()};
338  dstDB.store(
339  hotLEDGER, std::move(blob), hash_, mLedger->info().seq);
340  }
341  }
342  else
343  {
344  // Try to fetch the ledger header from a fetch pack
345  auto data = app_.getLedgerMaster().getFetchPack(hash_);
346  if (!data)
347  return;
348 
349  JLOG(journal_.trace()) << "Ledger header found in fetch pack";
350 
351  makeLedger(*data);
352  if (failed_)
353  return;
354 
355  // Store the ledger header in the ledger's database
356  mLedger->stateMap().family().db().store(
357  hotLEDGER, std::move(*data), hash_, mLedger->info().seq);
358  }
359 
360  if (mSeq == 0)
361  mSeq = mLedger->info().seq;
362  mLedger->stateMap().setLedgerSeq(mSeq);
363  mLedger->txMap().setLedgerSeq(mSeq);
364  mHaveHeader = true;
365  }
366 
367  if (!mHaveTransactions)
368  {
369  if (mLedger->info().txHash.isZero())
370  {
371  JLOG(journal_.trace()) << "No TXNs to fetch";
372  mHaveTransactions = true;
373  }
374  else
375  {
376  TransactionStateSF filter(
377  mLedger->txMap().family().db(), app_.getLedgerMaster());
378  if (mLedger->txMap().fetchRoot(
379  SHAMapHash{mLedger->info().txHash}, &filter))
380  {
381  if (neededTxHashes(1, &filter).empty())
382  {
383  JLOG(journal_.trace()) << "Had full txn map locally";
384  mHaveTransactions = true;
385  }
386  }
387  }
388  }
389 
390  if (!mHaveState)
391  {
392  if (mLedger->info().accountHash.isZero())
393  {
394  JLOG(journal_.fatal())
395  << "We are acquiring a ledger with a zero account hash";
396  failed_ = true;
397  return;
398  }
399  AccountStateSF filter(
400  mLedger->stateMap().family().db(), app_.getLedgerMaster());
401  if (mLedger->stateMap().fetchRoot(
402  SHAMapHash{mLedger->info().accountHash}, &filter))
403  {
404  if (neededStateHashes(1, &filter).empty())
405  {
406  JLOG(journal_.trace()) << "Had full AS map locally";
407  mHaveState = true;
408  }
409  }
410  }
411 
413  {
414  JLOG(journal_.debug()) << "Had everything locally";
415  complete_ = true;
416  mLedger->setImmutable(app_.config());
417  }
418 }
419 
422 void
424 {
425  mRecentNodes.clear();
426 
427  if (isDone())
428  {
429  JLOG(journal_.info()) << "Already done " << hash_;
430  return;
431  }
432 
434  {
435  if (mSeq != 0)
436  {
437  JLOG(journal_.warn())
438  << timeouts_ << " timeouts for ledger " << mSeq;
439  }
440  else
441  {
442  JLOG(journal_.warn())
443  << timeouts_ << " timeouts for ledger " << hash_;
444  }
445  failed_ = true;
446  done();
447  return;
448  }
449 
450  if (!wasProgress)
451  {
452  checkLocal();
453 
454  mByHash = true;
455 
456  std::size_t pc = getPeerCount();
457  JLOG(journal_.debug())
458  << "No progress(" << pc << ") for ledger " << hash_;
459 
460  // addPeers triggers if the reason is not HISTORY
461  // So if the reason IS HISTORY, need to trigger after we add
462  // otherwise, we need to trigger before we add
463  // so each peer gets triggered once
464  if (mReason != Reason::HISTORY)
466  addPeers();
467  if (mReason == Reason::HISTORY)
469  }
470 }
471 
473 void
475 {
476  mPeerSet->addPeers(
478  [this](auto peer) { return peer->hasLedger(hash_, mSeq); },
479  [this](auto peer) {
480  // For historical nodes, do not trigger too soon
481  // since a fetch pack is probably coming
482  if (mReason != Reason::HISTORY)
484  });
485 }
486 
489 {
490  return shared_from_this();
491 }
492 
493 void
495 {
496  if (mSignaled)
497  return;
498 
499  mSignaled = true;
500  touch();
501 
502  JLOG(journal_.debug()) << "Acquire " << hash_ << (failed_ ? " fail " : " ")
503  << ((timeouts_ == 0)
504  ? std::string()
505  : (std::string("timeouts:") +
506  std::to_string(timeouts_) + " "))
507  << mStats.get();
508 
509  assert(complete_ || failed_);
510 
511  if (complete_ && !failed_ && mLedger)
512  {
513  mLedger->setImmutable(app_.config());
514  switch (mReason)
515  {
516  case Reason::SHARD:
518  [[fallthrough]];
519  case Reason::HISTORY:
521  break;
522  default:
524  break;
525  }
526  }
527 
528  // We hold the PeerSet lock, so must dispatch
530  jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()](Job&) {
531  if (self->complete_ && !self->failed_)
532  {
533  self->app_.getLedgerMaster().checkAccept(self->getLedger());
534  self->app_.getLedgerMaster().tryAdvance();
535  }
536  else
537  self->app_.getInboundLedgers().logFailure(
538  self->hash_, self->mSeq);
539  });
540 }
541 
544 void
546 {
547  ScopedLockType sl(mtx_);
548 
549  if (isDone())
550  {
551  JLOG(journal_.debug())
552  << "Trigger on ledger: " << hash_ << (complete_ ? " completed" : "")
553  << (failed_ ? " failed" : "");
554  return;
555  }
556 
557  if (auto stream = journal_.trace())
558  {
559  if (peer)
560  stream << "Trigger acquiring ledger " << hash_ << " from " << peer;
561  else
562  stream << "Trigger acquiring ledger " << hash_;
563 
564  if (complete_ || failed_)
565  stream << "complete=" << complete_ << " failed=" << failed_;
566  else
567  stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions
568  << " as=" << mHaveState;
569  }
570 
571  if (!mHaveHeader)
572  {
573  tryDB(
575  : app_.getNodeFamily().db());
576  if (failed_)
577  {
578  JLOG(journal_.warn()) << " failed local for " << hash_;
579  return;
580  }
581  }
582 
583  protocol::TMGetLedger tmGL;
584  tmGL.set_ledgerhash(hash_.begin(), hash_.size());
585 
586  if (timeouts_ != 0)
587  {
588  // Be more aggressive if we've timed out at least once
589  tmGL.set_querytype(protocol::qtINDIRECT);
590 
591  if (!progress_ && !failed_ && mByHash &&
593  {
594  auto need = getNeededHashes();
595 
596  if (!need.empty())
597  {
598  protocol::TMGetObjectByHash tmBH;
599  bool typeSet = false;
600  tmBH.set_query(true);
601  tmBH.set_ledgerhash(hash_.begin(), hash_.size());
602  for (auto const& p : need)
603  {
604  JLOG(journal_.warn()) << "Want: " << p.second;
605 
606  if (!typeSet)
607  {
608  tmBH.set_type(p.first);
609  typeSet = true;
610  }
611 
612  if (p.first == tmBH.type())
613  {
614  protocol::TMIndexedObject* io = tmBH.add_objects();
615  io->set_hash(p.second.begin(), p.second.size());
616  if (mSeq != 0)
617  io->set_ledgerseq(mSeq);
618  }
619  }
620 
621  auto packet =
622  std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
623  auto const& peerIds = mPeerSet->getPeerIds();
625  peerIds.begin(), peerIds.end(), [this, &packet](auto id) {
626  if (auto p = app_.overlay().findPeerByShortID(id))
627  {
628  mByHash = false;
629  p->send(packet);
630  }
631  });
632  }
633  else
634  {
635  JLOG(journal_.info())
636  << "getNeededHashes says acquire is complete";
637  mHaveHeader = true;
638  mHaveTransactions = true;
639  mHaveState = true;
640  complete_ = true;
641  }
642  }
643  }
644 
645  // We can't do much without the header data because we don't know the
646  // state or transaction root hashes.
647  if (!mHaveHeader && !failed_)
648  {
649  tmGL.set_itype(protocol::liBASE);
650  if (mSeq != 0)
651  tmGL.set_ledgerseq(mSeq);
652  JLOG(journal_.trace()) << "Sending header request to "
653  << (peer ? "selected peer" : "all peers");
654  mPeerSet->sendRequest(tmGL, peer);
655  return;
656  }
657 
658  if (mLedger)
659  tmGL.set_ledgerseq(mLedger->info().seq);
660 
661  if (reason != TriggerReason::reply)
662  {
663  // If we're querying blind, don't query deep
664  tmGL.set_querydepth(0);
665  }
666  else if (peer && peer->isHighLatency())
667  {
668  // If the peer has high latency, query extra deep
669  tmGL.set_querydepth(2);
670  }
671  else
672  tmGL.set_querydepth(1);
673 
674  // Get the state data first because it's the most likely to be useful
675  // if we wind up abandoning this fetch.
676  if (mHaveHeader && !mHaveState && !failed_)
677  {
678  assert(mLedger);
679 
680  if (!mLedger->stateMap().isValid())
681  {
682  failed_ = true;
683  }
684  else if (mLedger->stateMap().getHash().isZero())
685  {
686  // we need the root node
687  tmGL.set_itype(protocol::liAS_NODE);
688  *tmGL.add_nodeids() = SHAMapNodeID().getRawString();
689  JLOG(journal_.trace()) << "Sending AS root request to "
690  << (peer ? "selected peer" : "all peers");
691  mPeerSet->sendRequest(tmGL, peer);
692  return;
693  }
694  else
695  {
696  AccountStateSF filter(
697  mLedger->stateMap().family().db(), app_.getLedgerMaster());
698 
699  // Release the lock while we process the large state map
700  sl.unlock();
701  auto nodes =
702  mLedger->stateMap().getMissingNodes(missingNodesFind, &filter);
703  sl.lock();
704 
705  // Make sure nothing happened while we released the lock
706  if (!failed_ && !complete_ && !mHaveState)
707  {
708  if (nodes.empty())
709  {
710  if (!mLedger->stateMap().isValid())
711  failed_ = true;
712  else
713  {
714  mHaveState = true;
715 
716  if (mHaveTransactions)
717  complete_ = true;
718  }
719  }
720  else
721  {
722  filterNodes(nodes, reason);
723 
724  if (!nodes.empty())
725  {
726  tmGL.set_itype(protocol::liAS_NODE);
727  for (auto const& id : nodes)
728  {
729  *(tmGL.add_nodeids()) = id.first.getRawString();
730  }
731 
732  JLOG(journal_.trace())
733  << "Sending AS node request (" << nodes.size()
734  << ") to "
735  << (peer ? "selected peer" : "all peers");
736  mPeerSet->sendRequest(tmGL, peer);
737  return;
738  }
739  else
740  {
741  JLOG(journal_.trace()) << "All AS nodes filtered";
742  }
743  }
744  }
745  }
746  }
747 
748  if (mHaveHeader && !mHaveTransactions && !failed_)
749  {
750  assert(mLedger);
751 
752  if (!mLedger->txMap().isValid())
753  {
754  failed_ = true;
755  }
756  else if (mLedger->txMap().getHash().isZero())
757  {
758  // we need the root node
759  tmGL.set_itype(protocol::liTX_NODE);
760  *(tmGL.add_nodeids()) = SHAMapNodeID().getRawString();
761  JLOG(journal_.trace()) << "Sending TX root request to "
762  << (peer ? "selected peer" : "all peers");
763  mPeerSet->sendRequest(tmGL, peer);
764  return;
765  }
766  else
767  {
768  TransactionStateSF filter(
769  mLedger->txMap().family().db(), app_.getLedgerMaster());
770 
771  auto nodes =
772  mLedger->txMap().getMissingNodes(missingNodesFind, &filter);
773 
774  if (nodes.empty())
775  {
776  if (!mLedger->txMap().isValid())
777  failed_ = true;
778  else
779  {
780  mHaveTransactions = true;
781 
782  if (mHaveState)
783  complete_ = true;
784  }
785  }
786  else
787  {
788  filterNodes(nodes, reason);
789 
790  if (!nodes.empty())
791  {
792  tmGL.set_itype(protocol::liTX_NODE);
793  for (auto const& n : nodes)
794  {
795  *(tmGL.add_nodeids()) = n.first.getRawString();
796  }
797  JLOG(journal_.trace())
798  << "Sending TX node request (" << nodes.size()
799  << ") to " << (peer ? "selected peer" : "all peers");
800  mPeerSet->sendRequest(tmGL, peer);
801  return;
802  }
803  else
804  {
805  JLOG(journal_.trace()) << "All TX nodes filtered";
806  }
807  }
808  }
809  }
810 
811  if (complete_ || failed_)
812  {
813  JLOG(journal_.debug())
814  << "Done:" << (complete_ ? " complete" : "")
815  << (failed_ ? " failed " : " ") << mLedger->info().seq;
816  sl.unlock();
817  done();
818  }
819 }
820 
821 void
822 InboundLedger::filterNodes(
824  TriggerReason reason)
825 {
826  // Sort nodes so that the ones we haven't recently
827  // requested come before the ones we have.
828  auto dup = std::stable_partition(
829  nodes.begin(), nodes.end(), [this](auto const& item) {
830  return mRecentNodes.count(item.second) == 0;
831  });
832 
833  // If everything is a duplicate we don't want to send
834  // any query at all except on a timeout where we need
835  // to query everyone:
836  if (dup == nodes.begin())
837  {
838  JLOG(journal_.trace()) << "filterNodes: all duplicates";
839 
840  if (reason != TriggerReason::timeout)
841  {
842  nodes.clear();
843  return;
844  }
845  }
846  else
847  {
848  JLOG(journal_.trace()) << "filterNodes: pruning duplicates";
849 
850  nodes.erase(dup, nodes.end());
851  }
852 
853  std::size_t const limit =
854  (reason == TriggerReason::reply) ? reqNodesReply : reqNodes;
855 
856  if (nodes.size() > limit)
857  nodes.resize(limit);
858 
859  for (auto const& n : nodes)
860  mRecentNodes.insert(n.second);
861 }
862 
866 // data must not have hash prefix
867 bool
868 InboundLedger::takeHeader(std::string const& data)
869 {
870  // Return value: true=normal, false=bad data
871  JLOG(journal_.trace()) << "got header acquiring ledger " << hash_;
872 
873  if (complete_ || failed_ || mHaveHeader)
874  return true;
875 
876  auto* f = mReason == Reason::SHARD ? app_.getShardFamily()
877  : &app_.getNodeFamily();
878  mLedger = std::make_shared<Ledger>(
879  deserializeHeader(makeSlice(data)), app_.config(), *f);
880  if (mLedger->info().hash != hash_ ||
881  (mSeq != 0 && mSeq != mLedger->info().seq))
882  {
883  JLOG(journal_.warn())
884  << "Acquire hash mismatch: " << mLedger->info().hash
885  << "!=" << hash_;
886  mLedger.reset();
887  return false;
888  }
889  if (mSeq == 0)
890  mSeq = mLedger->info().seq;
891  mLedger->stateMap().setLedgerSeq(mSeq);
892  mLedger->txMap().setLedgerSeq(mSeq);
893  mHaveHeader = true;
894 
895  Serializer s(data.size() + 4);
896  s.add32(HashPrefix::ledgerMaster);
897  s.addRaw(data.data(), data.size());
898  f->db().store(hotLEDGER, std::move(s.modData()), hash_, mSeq);
899 
900  if (mLedger->info().txHash.isZero())
901  mHaveTransactions = true;
902 
903  if (mLedger->info().accountHash.isZero())
904  mHaveState = true;
905 
906  mLedger->txMap().setSynching();
907  mLedger->stateMap().setSynching();
908 
909  return true;
910 }
911 
915 void
916 InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
917 {
918  if (!mHaveHeader)
919  {
920  JLOG(journal_.warn()) << "Missing ledger header";
921  san.incInvalid();
922  return;
923  }
924  if (packet.type() == protocol::liTX_NODE)
925  {
926  if (mHaveTransactions || failed_)
927  {
928  san.incDuplicate();
929  return;
930  }
931  }
932  else if (mHaveState || failed_)
933  {
934  san.incDuplicate();
935  return;
936  }
937 
938  auto [map, rootHash, filter] = [&]()
940  if (packet.type() == protocol::liTX_NODE)
941  return {
942  mLedger->txMap(),
943  SHAMapHash{mLedger->info().txHash},
944  std::make_unique<TransactionStateSF>(
945  mLedger->txMap().family().db(), app_.getLedgerMaster())};
946  return {
947  mLedger->stateMap(),
948  SHAMapHash{mLedger->info().accountHash},
949  std::make_unique<AccountStateSF>(
950  mLedger->stateMap().family().db(), app_.getLedgerMaster())};
951  }();
952 
953  try
954  {
955  for (auto const& node : packet.nodes())
956  {
957  auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
958 
959  if (!nodeID)
960  {
961  san.incInvalid();
962  return;
963  }
964 
965  if (nodeID->isRoot())
966  san += map.addRootNode(
967  rootHash, makeSlice(node.nodedata()), filter.get());
968  else
969  san += map.addKnownNode(
970  *nodeID, makeSlice(node.nodedata()), filter.get());
971 
972  if (!san.isGood())
973  {
974  JLOG(journal_.warn()) << "Received bad node data";
975  return;
976  }
977  }
978  }
979  catch (std::exception const& e)
980  {
981  JLOG(journal_.error()) << "Received bad node data: " << e.what();
982  san.incInvalid();
983  return;
984  }
985 
986  if (!map.isSynching())
987  {
988  if (packet.type() == protocol::liTX_NODE)
989  mHaveTransactions = true;
990  else
991  mHaveState = true;
992 
993  if (mHaveTransactions && mHaveState)
994  {
995  complete_ = true;
996  done();
997  }
998  }
999 }
1000 
1004 bool
1005 InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
1006 {
1007  if (failed_ || mHaveState)
1008  {
1009  san.incDuplicate();
1010  return true;
1011  }
1012 
1013  if (!mHaveHeader)
1014  {
1015  assert(false);
1016  return false;
1017  }
1018 
1019  AccountStateSF filter(
1020  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1021  san += mLedger->stateMap().addRootNode(
1022  SHAMapHash{mLedger->info().accountHash}, data, &filter);
1023  return san.isGood();
1024 }
1025 
1029 bool
1030 InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san)
1031 {
1032  if (failed_ || mHaveTransactions)
1033  {
1034  san.incDuplicate();
1035  return true;
1036  }
1037 
1038  if (!mHaveHeader)
1039  {
1040  assert(false);
1041  return false;
1042  }
1043 
1044  TransactionStateSF filter(
1045  mLedger->txMap().family().db(), app_.getLedgerMaster());
1046  san += mLedger->txMap().addRootNode(
1047  SHAMapHash{mLedger->info().txHash}, data, &filter);
1048  return san.isGood();
1049 }
1050 
1052 InboundLedger::getNeededHashes()
1053 {
1055 
1056  if (!mHaveHeader)
1057  {
1058  ret.push_back(
1059  std::make_pair(protocol::TMGetObjectByHash::otLEDGER, hash_));
1060  return ret;
1061  }
1062 
1063  if (!mHaveState)
1064  {
1065  AccountStateSF filter(
1066  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1067  for (auto const& h : neededStateHashes(4, &filter))
1068  {
1069  ret.push_back(
1070  std::make_pair(protocol::TMGetObjectByHash::otSTATE_NODE, h));
1071  }
1072  }
1073 
1074  if (!mHaveTransactions)
1075  {
1076  TransactionStateSF filter(
1077  mLedger->txMap().family().db(), app_.getLedgerMaster());
1078  for (auto const& h : neededTxHashes(4, &filter))
1079  {
1081  protocol::TMGetObjectByHash::otTRANSACTION_NODE, h));
1082  }
1083  }
1084 
1085  return ret;
1086 }
1087 
1091 bool
1092 InboundLedger::gotData(
1093  std::weak_ptr<Peer> peer,
1095 {
1096  std::lock_guard sl(mReceivedDataLock);
1097 
1098  if (isDone())
1099  return false;
1100 
1101  mReceivedData.emplace_back(peer, data);
1102 
1103  if (mReceiveDispatched)
1104  return false;
1105 
1106  mReceiveDispatched = true;
1107  return true;
1108 }
1109 
1113 // VFALCO NOTE, it is not necessary to pass the entire Peer,
1114 // we can get away with just a Resource::Consumer endpoint.
1115 //
1116 // TODO Change peer to Consumer
1117 //
1118 int
1119 InboundLedger::processData(
1120  std::shared_ptr<Peer> peer,
1121  protocol::TMLedgerData& packet)
1122 {
1123  ScopedLockType sl(mtx_);
1124 
1125  if (packet.type() == protocol::liBASE)
1126  {
1127  if (packet.nodes_size() < 1)
1128  {
1129  JLOG(journal_.warn()) << "Got empty header data";
1130  peer->charge(Resource::feeInvalidRequest);
1131  return -1;
1132  }
1133 
1134  SHAMapAddNode san;
1135 
1136  try
1137  {
1138  if (!mHaveHeader)
1139  {
1140  if (!takeHeader(packet.nodes(0).nodedata()))
1141  {
1142  JLOG(journal_.warn()) << "Got invalid header data";
1143  peer->charge(Resource::feeInvalidRequest);
1144  return -1;
1145  }
1146 
1147  san.incUseful();
1148  }
1149 
1150  if (!mHaveState && (packet.nodes().size() > 1) &&
1151  !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
1152  {
1153  JLOG(journal_.warn()) << "Included AS root invalid";
1154  }
1155 
1156  if (!mHaveTransactions && (packet.nodes().size() > 2) &&
1157  !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
1158  {
1159  JLOG(journal_.warn()) << "Included TX root invalid";
1160  }
1161  }
1162  catch (std::exception const& ex)
1163  {
1164  JLOG(journal_.warn())
1165  << "Included AS/TX root invalid: " << ex.what();
1166  peer->charge(Resource::feeBadData);
1167  return -1;
1168  }
1169 
1170  if (san.isUseful())
1171  progress_ = true;
1172 
1173  mStats += san;
1174  return san.getGood();
1175  }
1176 
1177  if ((packet.type() == protocol::liTX_NODE) ||
1178  (packet.type() == protocol::liAS_NODE))
1179  {
1180  if (packet.nodes().size() == 0)
1181  {
1182  JLOG(journal_.info()) << "Got response with no nodes";
1183  peer->charge(Resource::feeInvalidRequest);
1184  return -1;
1185  }
1186 
1187  // Verify node IDs and data are complete
1188  for (auto const& node : packet.nodes())
1189  {
1190  if (!node.has_nodeid() || !node.has_nodedata())
1191  {
1192  JLOG(journal_.warn()) << "Got bad node";
1193  peer->charge(Resource::feeInvalidRequest);
1194  return -1;
1195  }
1196  }
1197 
1198  SHAMapAddNode san;
1199  receiveNode(packet, san);
1200 
1201  if (packet.type() == protocol::liTX_NODE)
1202  {
1203  JLOG(journal_.debug()) << "Ledger TX node stats: " << san.get();
1204  }
1205  else
1206  {
1207  JLOG(journal_.debug()) << "Ledger AS node stats: " << san.get();
1208  }
1209 
1210  if (san.isUseful())
1211  progress_ = true;
1212 
1213  mStats += san;
1214  return san.getGood();
1215  }
1216 
1217  return -1;
1218 }
1219 
1223 void
1224 InboundLedger::runData()
1225 {
1226  std::shared_ptr<Peer> chosenPeer;
1227  int chosenPeerCount = -1;
1228 
1230 
1231  for (;;)
1232  {
1233  data.clear();
1234  {
1235  std::lock_guard sl(mReceivedDataLock);
1236 
1237  if (mReceivedData.empty())
1238  {
1239  mReceiveDispatched = false;
1240  break;
1241  }
1242 
1243  data.swap(mReceivedData);
1244  }
1245 
1246  // Select the peer that gives us the most nodes that are useful,
1247  // breaking ties in favor of the peer that responded first.
1248  for (auto& entry : data)
1249  {
1250  if (auto peer = entry.first.lock())
1251  {
1252  int count = processData(peer, *(entry.second));
1253  if (count > chosenPeerCount)
1254  {
1255  chosenPeerCount = count;
1256  chosenPeer = std::move(peer);
1257  }
1258  }
1259  }
1260  }
1261 
1262  if (chosenPeer)
1263  trigger(chosenPeer, TriggerReason::reply);
1264 }
1265 
1267 InboundLedger::getJson(int)
1268 {
1270 
1271  ScopedLockType sl(mtx_);
1272 
1273  ret[jss::hash] = to_string(hash_);
1274 
1275  if (complete_)
1276  ret[jss::complete] = true;
1277 
1278  if (failed_)
1279  ret[jss::failed] = true;
1280 
1281  if (!complete_ && !failed_)
1282  ret[jss::peers] = static_cast<int>(mPeerSet->getPeerIds().size());
1283 
1284  ret[jss::have_header] = mHaveHeader;
1285 
1286  if (mHaveHeader)
1287  {
1288  ret[jss::have_state] = mHaveState;
1289  ret[jss::have_transactions] = mHaveTransactions;
1290  }
1291 
1292  ret[jss::timeouts] = timeouts_;
1293 
1294  if (mHaveHeader && !mHaveState)
1295  {
1297  for (auto const& h : neededStateHashes(16, nullptr))
1298  {
1299  hv.append(to_string(h));
1300  }
1301  ret[jss::needed_state_hashes] = hv;
1302  }
1303 
1304  if (mHaveHeader && !mHaveTransactions)
1305  {
1307  for (auto const& h : neededTxHashes(16, nullptr))
1308  {
1309  hv.append(to_string(h));
1310  }
1311  ret[jss::needed_transaction_hashes] = hv;
1312  }
1313 
1314  return ret;
1315 }
1316 
1317 } // namespace ripple
ripple::InboundLedger::mRecentNodes
std::set< uint256 > mRecentNodes
Definition: InboundLedger.h:190
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::Application
Definition: Application.h:102
ripple::SHAMapAddNode
Definition: SHAMapAddNode.h:28
ripple::Application::getNodeFamily
virtual Family & getNodeFamily()=0
ripple::SHAMapAddNode::get
std::string get() const
Definition: SHAMapAddNode.h:156
ripple::InboundLedger::Reason::HISTORY
@ HISTORY
ripple::InboundLedger::getNeededHashes
std::vector< neededHash_t > getNeededHashes()
Definition: InboundLedger.cpp:1052
ripple::InboundLedger::mReason
const Reason mReason
Definition: InboundLedger.h:188
std::for_each
T for_each(T... args)
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::InboundLedger::TriggerReason
TriggerReason
Definition: InboundLedger.h:126
ripple::reqNodes
@ reqNodes
Definition: InboundLedger.cpp:68
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:52
std::string
STL class.
ripple::InboundLedger::Reason::CONSENSUS
@ CONSENSUS
std::shared_ptr
STL class.
ripple::InboundLedger::mHaveState
bool mHaveState
Definition: InboundLedger.h:183
ripple::LedgerInfo::parentHash
uint256 parentHash
Definition: ReadView.h:103
ripple::SHAMap::getHash
SHAMapHash getHash() const
Definition: SHAMap.cpp:783
std::exception
STL class.
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::InboundLedger::TriggerReason::added
@ added
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:44
Json::arrayValue
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
ripple::InboundLedger::mSignaled
bool mSignaled
Definition: InboundLedger.h:185
std::pair
std::vector::reserve
T reserve(T... args)
ripple::LedgerInfo::hash
uint256 hash
Definition: ReadView.h:100
ripple::ledgerAcquireTimeout
constexpr auto ledgerAcquireTimeout
Definition: InboundLedger.cpp:72
std::vector
STL class.
ripple::InboundLedger::update
void update(std::uint32_t seq)
Definition: InboundLedger.cpp:177
ripple::InboundLedger::touch
void touch()
Definition: InboundLedger.h:114
std::chrono::duration
ripple::peerCountStart
@ peerCountStart
Definition: InboundLedger.cpp:44
ripple::InboundLedger::~InboundLedger
~InboundLedger()
Definition: InboundLedger.cpp:210
ripple::TimeoutCounter::queueJob
void queueJob(ScopedLockType &)
Queue a job to call invokeOnTimer().
Definition: TimeoutCounter.cpp:69
ripple::TimeoutCounter::progress_
bool progress_
Whether forward progress has been made.
Definition: TimeoutCounter.h:134
ripple::InboundLedger::mByHash
bool mByHash
Definition: InboundLedger.h:186
ripple::neededHashes
static std::vector< uint256 > neededHashes(uint256 const &root, SHAMap &map, int max, SHAMapSyncFilter *filter)
Definition: InboundLedger.cpp:231
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::SHAMapHash::isZero
bool isZero() const
Definition: SHAMapTreeNode.h:68
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::InboundLedger::neededStateHashes
std::vector< uint256 > neededStateHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:262
std::tuple
ripple::AccountStateSF
Definition: AccountStateSF.h:31
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::InboundLedger::mPeerSet
std::unique_ptr< PeerSet > mPeerSet
Definition: InboundLedger.h:198
ripple::LedgerInfo::seq
LedgerIndex seq
Definition: ReadView.h:92
ripple::TimeoutCounter
This class is an "active" object.
Definition: TimeoutCounter.h:66
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:293
ripple::Family::db
virtual NodeStore::Database & db()=0
ripple::LedgerInfo::txHash
uint256 txHash
Definition: ReadView.h:101
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:47
algorithm
ripple::jtLEDGER_DATA
@ jtLEDGER_DATA
Definition: Job.h:49
ripple::TimeoutCounter::mtx_
std::recursive_mutex mtx_
Definition: TimeoutCounter.h:125
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::base_uint::size
constexpr static std::size_t size()
Definition: base_uint.h:426
std::unique_lock::unlock
T unlock(T... args)
ripple::InboundLedger::neededTxHashes
std::vector< uint256 > neededTxHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:256
std::vector::push_back
T push_back(T... args)
ripple::LedgerInfo::closeTime
NetClock::time_point closeTime
Definition: ReadView.h:123
ripple::base_uint< 256 >
ripple::reqNodesReply
@ reqNodesReply
Definition: InboundLedger.cpp:64
std::addressof
T addressof(T... args)
Json::Value::append
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:882
ripple::InboundLedger::mLedger
std::shared_ptr< Ledger > mLedger
Definition: InboundLedger.h:181
std::enable_shared_from_this< InboundLedger >::shared_from_this
T shared_from_this(T... args)
ripple::TimeoutCounter::app_
Application & app_
Definition: TimeoutCounter.h:123
ripple::SerialIter::get8
unsigned char get8()
Definition: Serializer.cpp:354
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:439
ripple::SHAMapAddNode::isUseful
bool isUseful() const
Definition: SHAMapAddNode.h:116
ripple::InboundLedger::getPeerCount
std::size_t getPeerCount() const
Definition: InboundLedger.cpp:168
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::SerialIter::get256
uint256 get256()
Definition: Serializer.h:374
ripple::SerialIter::get64
std::uint64_t get64()
Definition: Serializer.cpp:391
ripple::InboundLedger::addPeers
void addPeers()
Add more peers to the set, if possible.
Definition: InboundLedger.cpp:474
ripple::peerCountAdd
@ peerCountAdd
Definition: InboundLedger.cpp:48
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::TimeoutCounter::failed_
bool failed_
Definition: TimeoutCounter.h:132
ripple::InboundLedgers::gotStaleData
virtual void gotStaleData(std::shared_ptr< protocol::TMLedgerData > packet)=0
ripple::LedgerMaster::getFetchPack
boost::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
Definition: LedgerMaster.cpp:2125
ripple::InboundLedger::InboundLedger
InboundLedger(Application &app, uint256 const &hash, std::uint32_t seq, Reason reason, clock_type &, std::unique_ptr< PeerSet > peerSet)
Definition: InboundLedger.cpp:74
ripple::Application::config
virtual Config & config()=0
ripple::InboundLedgers::onLedgerFetched
virtual void onLedgerFetched()=0
Called when a complete ledger is obtained.
ripple::SHAMapAddNode::isGood
bool isGood() const
Definition: SHAMapAddNode.h:132
std::unique_lock< std::recursive_mutex >
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
ripple::LedgerInfo::closeFlags
int closeFlags
Definition: ReadView.h:114
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::InboundLedger::tryDB
void tryDB(NodeStore::Database &srcDB)
Definition: InboundLedger.cpp:301
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::Job
Definition: Job.h:84
ripple::SerialIter
Definition: Serializer.h:308
std::uint32_t
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:145
ripple::missingNodesFind
@ missingNodesFind
Definition: InboundLedger.cpp:60
ripple::TimeoutCounter::isDone
bool isDone() const
Definition: TimeoutCounter.h:116
ripple::InboundLedger::mHaveHeader
bool mHaveHeader
Definition: InboundLedger.h:182
beast::abstract_clock< std::chrono::steady_clock >
ripple::SHAMap::getMissingNodes
std::vector< std::pair< SHAMapNodeID, uint256 > > getMissingNodes(int maxNodes, SHAMapSyncFilter *filter)
Check for nodes in the SHAMap not available.
Definition: SHAMapSync.cpp:317
ripple::LedgerMaster::checkAccept
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
Definition: LedgerMaster.cpp:1038
ripple::LedgerInfo::drops
XRPAmount drops
Definition: ReadView.h:105
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::InboundLedger::TriggerReason::timeout
@ timeout
ripple::InboundLedger::pmDowncast
std::weak_ptr< TimeoutCounter > pmDowncast() override
Return a weak pointer to this.
Definition: InboundLedger.cpp:488
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::deserializeHeader
LedgerInfo deserializeHeader(Slice data, bool hasHash)
Deserialize a ledger header from a byte array.
Definition: InboundLedger.cpp:269
ripple::InboundLedger::mStats
SHAMapAddNode mStats
Definition: InboundLedger.h:192
ripple::TimeoutCounter::hash_
const uint256 hash_
The hash of the object (in practice, always a ledger) we are trying to fetch.
Definition: TimeoutCounter.h:129
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::base_uint::begin
iterator begin()
Definition: base_uint.h:124
ripple::InboundLedger::mSeq
std::uint32_t mSeq
Definition: InboundLedger.h:187
ripple::LedgerInfo::closeTimeResolution
NetClock::duration closeTimeResolution
Definition: ReadView.h:117
ripple::NodeStore::DatabaseShard::setStored
virtual void setStored(std::shared_ptr< Ledger const > const &ledger)=0
Notifies the database that the given ledger has been fully acquired and stored.
std::count_if
T count_if(T... args)
ripple::InboundLedger::done
void done()
Definition: InboundLedger.cpp:494
ripple::InboundLedger::trigger
void trigger(std::shared_ptr< Peer > const &, TriggerReason)
Request more nodes, perhaps from a specific peer.
Definition: InboundLedger.cpp:545
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::SHAMapAddNode::incInvalid
void incInvalid()
Definition: SHAMapAddNode.h:80
std::size_t
ripple::SHAMapAddNode::incUseful
void incUseful()
Definition: SHAMapAddNode.h:86
ripple::hotLEDGER
@ hotLEDGER
Definition: NodeObject.h:34
std::make_pair
T make_pair(T... args)
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::LedgerInfo
Information about the notional ledger backing the view.
Definition: ReadView.h:84
ripple::SHAMapAddNode::getGood
int getGood() const
Definition: SHAMapAddNode.h:104
ripple::LedgerMaster::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
Definition: LedgerMaster.cpp:527
ripple::InboundLedger::Reason
Reason
Definition: InboundLedger.h:46
ripple::SHAMapAddNode::incDuplicate
void incDuplicate()
Definition: SHAMapAddNode.h:92
ripple::TimeoutCounter::complete_
bool complete_
Definition: TimeoutCounter.h:131
ripple::SerialIter::get32
std::uint32_t get32()
Definition: Serializer.cpp:378
ripple::InboundLedger::mReceivedData
std::vector< PeerDataPairType > mReceivedData
Definition: InboundLedger.h:196
ripple::ledgerBecomeAggressiveThreshold
@ ledgerBecomeAggressiveThreshold
Definition: InboundLedger.cpp:56
ripple::SHAMapSyncFilter
Definition: SHAMapSyncFilter.h:30
ripple::TimeoutCounter::timeouts_
int timeouts_
Definition: TimeoutCounter.h:130
std::unique_ptr
STL class.
std::stable_partition
T stable_partition(T... args)
ripple::InboundLedger::Reason::SHARD
@ SHARD
ripple::InboundLedger::onTimer
void onTimer(bool progress, ScopedLockType &peerSetLock) override
Called with a lock by the PeerSet when the timer expires.
Definition: InboundLedger.cpp:423
ripple::InboundLedger::checkLocal
bool checkLocal()
Definition: InboundLedger.cpp:190
ripple::TransactionStateSF
Definition: TransactionStateSF.h:31
ripple::ledgerTimeoutRetriesMax
@ ledgerTimeoutRetriesMax
Definition: InboundLedger.cpp:52
ripple::InboundLedger::mHaveTransactions
bool mHaveTransactions
Definition: InboundLedger.h:184
ripple::InboundLedger::init
void init(ScopedLockType &collectionLock)
Definition: InboundLedger.cpp:103
ripple::LedgerInfo::accountHash
uint256 accountHash
Definition: ReadView.h:102
ripple::TimeoutCounter::journal_
beast::Journal journal_
Definition: TimeoutCounter.h:124
std::exception::what
T what(T... args)
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::LedgerInfo::parentCloseTime
NetClock::time_point parentCloseTime
Definition: ReadView.h:93