rippled
InboundLedger.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/AccountStateSF.h>
21 #include <ripple/app/ledger/InboundLedger.h>
22 #include <ripple/app/ledger/InboundLedgers.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/ledger/TransactionStateSF.h>
25 #include <ripple/app/main/Application.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/basics/Log.h>
28 #include <ripple/core/JobQueue.h>
29 #include <ripple/nodestore/DatabaseShard.h>
30 #include <ripple/overlay/Overlay.h>
31 #include <ripple/protocol/HashPrefix.h>
32 #include <ripple/protocol/jss.h>
33 #include <ripple/resource/Fees.h>
34 #include <ripple/shamap/SHAMapNodeID.h>
35 
36 #include <boost/iterator/function_output_iterator.hpp>
37 
38 #include <algorithm>
39 #include <random>
40 
41 namespace ripple {
42 
43 using namespace std::chrono_literals;
44 
45 enum {
46  // Number of peers to start with
48 
49  // Number of peers to add on a timeout
50  ,
52 
53  // how many timeouts before we give up
54  ,
56 
57  // how many timeouts before we get aggressive
58  ,
60 
61  // Number of nodes to find initially
62  ,
64 
65  // Number of nodes to request for a reply
66  ,
68 
69  // Number of nodes to request blindly
70  ,
71  reqNodes = 12
72 };
73 
74 // millisecond for each ledger timeout
75 auto constexpr ledgerAcquireTimeout = 3000ms;
76 
78  Application& app,
79  uint256 const& hash,
80  std::uint32_t seq,
81  Reason reason,
82  clock_type& clock,
85  app,
86  hash,
88  {jtLEDGER_DATA, "InboundLedger", 5},
89  app.journal("InboundLedger"))
90  , m_clock(clock)
91  , mHaveHeader(false)
92  , mHaveState(false)
93  , mHaveTransactions(false)
94  , mSignaled(false)
95  , mByHash(true)
96  , mSeq(seq)
97  , mReason(reason)
98  , mReceiveDispatched(false)
99  , mPeerSet(std::move(peerSet))
100 {
101  JLOG(journal_.trace()) << "Acquiring ledger " << hash_;
102  touch();
103 }
104 
105 void
107 {
108  ScopedLockType sl(mtx_);
109  collectionLock.unlock();
110 
111  tryDB(app_.getNodeFamily().db());
112  if (failed_)
113  return;
114 
115  if (!complete_)
116  {
117  auto shardStore = app_.getShardStore();
118  if (mReason == Reason::SHARD)
119  {
120  if (!shardStore)
121  {
122  JLOG(journal_.error())
123  << "Acquiring shard with no shard store available";
124  failed_ = true;
125  return;
126  }
127 
128  mHaveHeader = false;
129  mHaveTransactions = false;
130  mHaveState = false;
131  mLedger.reset();
132 
133  tryDB(app_.getShardFamily()->db());
134  if (failed_)
135  return;
136  }
137  else if (shardStore && mSeq >= shardStore->earliestLedgerSeq())
138  {
139  if (auto l = shardStore->fetchLedger(hash_, mSeq))
140  {
141  mHaveHeader = true;
142  mHaveTransactions = true;
143  mHaveState = true;
144  complete_ = true;
145  mLedger = std::move(l);
146  }
147  }
148  }
149  if (!complete_)
150  {
151  addPeers();
152  queueJob(sl);
153  return;
154  }
155 
156  JLOG(journal_.debug()) << "Acquiring ledger we already have in "
157  << " local store. " << hash_;
158  assert(
159  mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES ||
160  mLedger->read(keylet::fees()));
161  mLedger->setImmutable();
162 
164  return;
165 
167 
168  // Check if this could be a newer fully-validated ledger
169  if (mReason == Reason::CONSENSUS)
171 }
172 
175 {
176  auto const& peerIds = mPeerSet->getPeerIds();
177  return std::count_if(peerIds.begin(), peerIds.end(), [this](auto id) {
178  return (app_.overlay().findPeerByShortID(id) != nullptr);
179  });
180 }
181 
182 void
184 {
185  ScopedLockType sl(mtx_);
186 
187  // If we didn't know the sequence number, but now do, save it
188  if ((seq != 0) && (mSeq == 0))
189  mSeq = seq;
190 
191  // Prevent this from being swept
192  touch();
193 }
194 
195 bool
197 {
198  ScopedLockType sl(mtx_);
199  if (!isDone())
200  {
201  if (mLedger)
202  tryDB(mLedger->stateMap().family().db());
203  else if (mReason == Reason::SHARD)
204  tryDB(app_.getShardFamily()->db());
205  else
206  tryDB(app_.getNodeFamily().db());
207  if (failed_ || complete_)
208  {
209  done();
210  return true;
211  }
212  }
213  return false;
214 }
215 
217 {
218  // Save any received AS data not processed. It could be useful
219  // for populating a different ledger
220  for (auto& entry : mReceivedData)
221  {
222  if (entry.second->type() == protocol::liAS_NODE)
223  app_.getInboundLedgers().gotStaleData(entry.second);
224  }
225  if (!isDone())
226  {
227  JLOG(journal_.debug())
228  << "Acquire " << hash_ << " abort "
229  << ((timeouts_ == 0) ? std::string()
230  : (std::string("timeouts:") +
231  std::to_string(timeouts_) + " "))
232  << mStats.get();
233  }
234 }
235 
238  uint256 const& root,
239  SHAMap& map,
240  int max,
241  SHAMapSyncFilter* filter)
242 {
244 
245  if (!root.isZero())
246  {
247  if (map.getHash().isZero())
248  ret.push_back(root);
249  else
250  {
251  auto mn = map.getMissingNodes(max, filter);
252  ret.reserve(mn.size());
253  for (auto const& n : mn)
254  ret.push_back(n.second);
255  }
256  }
257 
258  return ret;
259 }
260 
263 {
264  return neededHashes(mLedger->info().txHash, mLedger->txMap(), max, filter);
265 }
266 
269 {
270  return neededHashes(
271  mLedger->info().accountHash, mLedger->stateMap(), max, filter);
272 }
273 
274 // See how much of the ledger data is stored locally
275 // Data found in a fetch pack will be stored
276 void
278 {
279  if (!mHaveHeader)
280  {
281  auto makeLedger = [&, this](Blob const& data) {
282  JLOG(journal_.trace()) << "Ledger header found in fetch pack";
283  mLedger = std::make_shared<Ledger>(
285  app_.config(),
287  : app_.getNodeFamily());
288  if (mLedger->info().hash != hash_ ||
289  (mSeq != 0 && mSeq != mLedger->info().seq))
290  {
291  // We know for a fact the ledger can never be acquired
292  JLOG(journal_.warn())
293  << "hash " << hash_ << " seq " << std::to_string(mSeq)
294  << " cannot be a ledger";
295  mLedger.reset();
296  failed_ = true;
297  }
298  };
299 
300  // Try to fetch the ledger header from the DB
301  if (auto nodeObject = srcDB.fetchNodeObject(hash_, mSeq))
302  {
303  JLOG(journal_.trace()) << "Ledger header found in local store";
304 
305  makeLedger(nodeObject->getData());
306  if (failed_)
307  return;
308 
309  // Store the ledger header if the source and destination differ
310  auto& dstDB{mLedger->stateMap().family().db()};
311  if (std::addressof(dstDB) != std::addressof(srcDB))
312  {
313  Blob blob{nodeObject->getData()};
314  dstDB.store(
315  hotLEDGER, std::move(blob), hash_, mLedger->info().seq);
316  }
317  }
318  else
319  {
320  // Try to fetch the ledger header from a fetch pack
321  auto data = app_.getLedgerMaster().getFetchPack(hash_);
322  if (!data)
323  return;
324 
325  JLOG(journal_.trace()) << "Ledger header found in fetch pack";
326 
327  makeLedger(*data);
328  if (failed_)
329  return;
330 
331  // Store the ledger header in the ledger's database
332  mLedger->stateMap().family().db().store(
333  hotLEDGER, std::move(*data), hash_, mLedger->info().seq);
334  }
335 
336  if (mSeq == 0)
337  mSeq = mLedger->info().seq;
338  mLedger->stateMap().setLedgerSeq(mSeq);
339  mLedger->txMap().setLedgerSeq(mSeq);
340  mHaveHeader = true;
341  }
342 
343  if (!mHaveTransactions)
344  {
345  if (mLedger->info().txHash.isZero())
346  {
347  JLOG(journal_.trace()) << "No TXNs to fetch";
348  mHaveTransactions = true;
349  }
350  else
351  {
352  TransactionStateSF filter(
353  mLedger->txMap().family().db(), app_.getLedgerMaster());
354  if (mLedger->txMap().fetchRoot(
355  SHAMapHash{mLedger->info().txHash}, &filter))
356  {
357  if (neededTxHashes(1, &filter).empty())
358  {
359  JLOG(journal_.trace()) << "Had full txn map locally";
360  mHaveTransactions = true;
361  }
362  }
363  }
364  }
365 
366  if (!mHaveState)
367  {
368  if (mLedger->info().accountHash.isZero())
369  {
370  JLOG(journal_.fatal())
371  << "We are acquiring a ledger with a zero account hash";
372  failed_ = true;
373  return;
374  }
375  AccountStateSF filter(
376  mLedger->stateMap().family().db(), app_.getLedgerMaster());
377  if (mLedger->stateMap().fetchRoot(
378  SHAMapHash{mLedger->info().accountHash}, &filter))
379  {
380  if (neededStateHashes(1, &filter).empty())
381  {
382  JLOG(journal_.trace()) << "Had full AS map locally";
383  mHaveState = true;
384  }
385  }
386  }
387 
389  {
390  JLOG(journal_.debug()) << "Had everything locally";
391  complete_ = true;
392  assert(
393  mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES ||
394  mLedger->read(keylet::fees()));
395  mLedger->setImmutable();
396  }
397 }
398 
401 void
403 {
404  mRecentNodes.clear();
405 
406  if (isDone())
407  {
408  JLOG(journal_.info()) << "Already done " << hash_;
409  return;
410  }
411 
413  {
414  if (mSeq != 0)
415  {
416  JLOG(journal_.warn())
417  << timeouts_ << " timeouts for ledger " << mSeq;
418  }
419  else
420  {
421  JLOG(journal_.warn())
422  << timeouts_ << " timeouts for ledger " << hash_;
423  }
424  failed_ = true;
425  done();
426  return;
427  }
428 
429  if (!wasProgress)
430  {
431  checkLocal();
432 
433  mByHash = true;
434 
435  std::size_t pc = getPeerCount();
436  JLOG(journal_.debug())
437  << "No progress(" << pc << ") for ledger " << hash_;
438 
439  // addPeers triggers if the reason is not HISTORY
440  // So if the reason IS HISTORY, need to trigger after we add
441  // otherwise, we need to trigger before we add
442  // so each peer gets triggered once
443  if (mReason != Reason::HISTORY)
445  addPeers();
446  if (mReason == Reason::HISTORY)
448  }
449 }
450 
452 void
454 {
455  mPeerSet->addPeers(
457  [this](auto peer) { return peer->hasLedger(hash_, mSeq); },
458  [this](auto peer) {
459  // For historical nodes, do not trigger too soon
460  // since a fetch pack is probably coming
461  if (mReason != Reason::HISTORY)
463  });
464 }
465 
468 {
469  return shared_from_this();
470 }
471 
472 void
474 {
475  if (mSignaled)
476  return;
477 
478  mSignaled = true;
479  touch();
480 
481  JLOG(journal_.debug()) << "Acquire " << hash_ << (failed_ ? " fail " : " ")
482  << ((timeouts_ == 0)
483  ? std::string()
484  : (std::string("timeouts:") +
485  std::to_string(timeouts_) + " "))
486  << mStats.get();
487 
488  assert(complete_ || failed_);
489 
490  if (complete_ && !failed_ && mLedger)
491  {
492  assert(
493  mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES ||
494  mLedger->read(keylet::fees()));
495  mLedger->setImmutable();
496  switch (mReason)
497  {
498  case Reason::SHARD:
500  [[fallthrough]];
501  case Reason::HISTORY:
503  break;
504  default:
506  break;
507  }
508  }
509 
510  // We hold the PeerSet lock, so must dispatch
512  jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() {
513  if (self->complete_ && !self->failed_)
514  {
515  self->app_.getLedgerMaster().checkAccept(self->getLedger());
516  self->app_.getLedgerMaster().tryAdvance();
517  }
518  else
519  self->app_.getInboundLedgers().logFailure(
520  self->hash_, self->mSeq);
521  });
522 }
523 
526 void
528 {
529  ScopedLockType sl(mtx_);
530 
531  if (isDone())
532  {
533  JLOG(journal_.debug())
534  << "Trigger on ledger: " << hash_ << (complete_ ? " completed" : "")
535  << (failed_ ? " failed" : "");
536  return;
537  }
538 
539  if (auto stream = journal_.trace())
540  {
541  stream << "Trigger acquiring ledger " << hash_;
542  if (peer)
543  stream << " from " << peer;
544 
545  if (complete_ || failed_)
546  stream << "complete=" << complete_ << " failed=" << failed_;
547  else
548  stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions
549  << " as=" << mHaveState;
550  }
551 
552  if (!mHaveHeader)
553  {
554  tryDB(
556  : app_.getNodeFamily().db());
557  if (failed_)
558  {
559  JLOG(journal_.warn()) << " failed local for " << hash_;
560  return;
561  }
562  }
563 
564  protocol::TMGetLedger tmGL;
565  tmGL.set_ledgerhash(hash_.begin(), hash_.size());
566 
567  if (timeouts_ != 0)
568  {
569  // Be more aggressive if we've timed out at least once
570  tmGL.set_querytype(protocol::qtINDIRECT);
571 
572  if (!progress_ && !failed_ && mByHash &&
574  {
575  auto need = getNeededHashes();
576 
577  if (!need.empty())
578  {
579  protocol::TMGetObjectByHash tmBH;
580  bool typeSet = false;
581  tmBH.set_query(true);
582  tmBH.set_ledgerhash(hash_.begin(), hash_.size());
583  for (auto const& p : need)
584  {
585  JLOG(journal_.debug()) << "Want: " << p.second;
586 
587  if (!typeSet)
588  {
589  tmBH.set_type(p.first);
590  typeSet = true;
591  }
592 
593  if (p.first == tmBH.type())
594  {
595  protocol::TMIndexedObject* io = tmBH.add_objects();
596  io->set_hash(p.second.begin(), p.second.size());
597  if (mSeq != 0)
598  io->set_ledgerseq(mSeq);
599  }
600  }
601 
602  auto packet =
603  std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
604  auto const& peerIds = mPeerSet->getPeerIds();
606  peerIds.begin(), peerIds.end(), [this, &packet](auto id) {
607  if (auto p = app_.overlay().findPeerByShortID(id))
608  {
609  mByHash = false;
610  p->send(packet);
611  }
612  });
613  }
614  else
615  {
616  JLOG(journal_.info())
617  << "getNeededHashes says acquire is complete";
618  mHaveHeader = true;
619  mHaveTransactions = true;
620  mHaveState = true;
621  complete_ = true;
622  }
623  }
624  }
625 
626  // We can't do much without the header data because we don't know the
627  // state or transaction root hashes.
628  if (!mHaveHeader && !failed_)
629  {
630  tmGL.set_itype(protocol::liBASE);
631  if (mSeq != 0)
632  tmGL.set_ledgerseq(mSeq);
633  JLOG(journal_.trace()) << "Sending header request to "
634  << (peer ? "selected peer" : "all peers");
635  mPeerSet->sendRequest(tmGL, peer);
636  return;
637  }
638 
639  if (mLedger)
640  tmGL.set_ledgerseq(mLedger->info().seq);
641 
642  if (reason != TriggerReason::reply)
643  {
644  // If we're querying blind, don't query deep
645  tmGL.set_querydepth(0);
646  }
647  else if (peer && peer->isHighLatency())
648  {
649  // If the peer has high latency, query extra deep
650  tmGL.set_querydepth(2);
651  }
652  else
653  tmGL.set_querydepth(1);
654 
655  // Get the state data first because it's the most likely to be useful
656  // if we wind up abandoning this fetch.
657  if (mHaveHeader && !mHaveState && !failed_)
658  {
659  assert(mLedger);
660 
661  if (!mLedger->stateMap().isValid())
662  {
663  failed_ = true;
664  }
665  else if (mLedger->stateMap().getHash().isZero())
666  {
667  // we need the root node
668  tmGL.set_itype(protocol::liAS_NODE);
669  *tmGL.add_nodeids() = SHAMapNodeID().getRawString();
670  JLOG(journal_.trace()) << "Sending AS root request to "
671  << (peer ? "selected peer" : "all peers");
672  mPeerSet->sendRequest(tmGL, peer);
673  return;
674  }
675  else
676  {
677  AccountStateSF filter(
678  mLedger->stateMap().family().db(), app_.getLedgerMaster());
679 
680  // Release the lock while we process the large state map
681  sl.unlock();
682  auto nodes =
683  mLedger->stateMap().getMissingNodes(missingNodesFind, &filter);
684  sl.lock();
685 
686  // Make sure nothing happened while we released the lock
687  if (!failed_ && !complete_ && !mHaveState)
688  {
689  if (nodes.empty())
690  {
691  if (!mLedger->stateMap().isValid())
692  failed_ = true;
693  else
694  {
695  mHaveState = true;
696 
697  if (mHaveTransactions)
698  complete_ = true;
699  }
700  }
701  else
702  {
703  filterNodes(nodes, reason);
704 
705  if (!nodes.empty())
706  {
707  tmGL.set_itype(protocol::liAS_NODE);
708  for (auto const& id : nodes)
709  {
710  *(tmGL.add_nodeids()) = id.first.getRawString();
711  }
712 
713  JLOG(journal_.trace())
714  << "Sending AS node request (" << nodes.size()
715  << ") to "
716  << (peer ? "selected peer" : "all peers");
717  mPeerSet->sendRequest(tmGL, peer);
718  return;
719  }
720  else
721  {
722  JLOG(journal_.trace()) << "All AS nodes filtered";
723  }
724  }
725  }
726  }
727  }
728 
729  if (mHaveHeader && !mHaveTransactions && !failed_)
730  {
731  assert(mLedger);
732 
733  if (!mLedger->txMap().isValid())
734  {
735  failed_ = true;
736  }
737  else if (mLedger->txMap().getHash().isZero())
738  {
739  // we need the root node
740  tmGL.set_itype(protocol::liTX_NODE);
741  *(tmGL.add_nodeids()) = SHAMapNodeID().getRawString();
742  JLOG(journal_.trace()) << "Sending TX root request to "
743  << (peer ? "selected peer" : "all peers");
744  mPeerSet->sendRequest(tmGL, peer);
745  return;
746  }
747  else
748  {
749  TransactionStateSF filter(
750  mLedger->txMap().family().db(), app_.getLedgerMaster());
751 
752  auto nodes =
753  mLedger->txMap().getMissingNodes(missingNodesFind, &filter);
754 
755  if (nodes.empty())
756  {
757  if (!mLedger->txMap().isValid())
758  failed_ = true;
759  else
760  {
761  mHaveTransactions = true;
762 
763  if (mHaveState)
764  complete_ = true;
765  }
766  }
767  else
768  {
769  filterNodes(nodes, reason);
770 
771  if (!nodes.empty())
772  {
773  tmGL.set_itype(protocol::liTX_NODE);
774  for (auto const& n : nodes)
775  {
776  *(tmGL.add_nodeids()) = n.first.getRawString();
777  }
778  JLOG(journal_.trace())
779  << "Sending TX node request (" << nodes.size()
780  << ") to " << (peer ? "selected peer" : "all peers");
781  mPeerSet->sendRequest(tmGL, peer);
782  return;
783  }
784  else
785  {
786  JLOG(journal_.trace()) << "All TX nodes filtered";
787  }
788  }
789  }
790  }
791 
792  if (complete_ || failed_)
793  {
794  JLOG(journal_.debug())
795  << "Done:" << (complete_ ? " complete" : "")
796  << (failed_ ? " failed " : " ") << mLedger->info().seq;
797  sl.unlock();
798  done();
799  }
800 }
801 
802 void
803 InboundLedger::filterNodes(
805  TriggerReason reason)
806 {
807  // Sort nodes so that the ones we haven't recently
808  // requested come before the ones we have.
809  auto dup = std::stable_partition(
810  nodes.begin(), nodes.end(), [this](auto const& item) {
811  return mRecentNodes.count(item.second) == 0;
812  });
813 
814  // If everything is a duplicate we don't want to send
815  // any query at all except on a timeout where we need
816  // to query everyone:
817  if (dup == nodes.begin())
818  {
819  JLOG(journal_.trace()) << "filterNodes: all duplicates";
820 
821  if (reason != TriggerReason::timeout)
822  {
823  nodes.clear();
824  return;
825  }
826  }
827  else
828  {
829  JLOG(journal_.trace()) << "filterNodes: pruning duplicates";
830 
831  nodes.erase(dup, nodes.end());
832  }
833 
834  std::size_t const limit =
835  (reason == TriggerReason::reply) ? reqNodesReply : reqNodes;
836 
837  if (nodes.size() > limit)
838  nodes.resize(limit);
839 
840  for (auto const& n : nodes)
841  mRecentNodes.insert(n.second);
842 }
843 
847 // data must not have hash prefix
848 bool
849 InboundLedger::takeHeader(std::string const& data)
850 {
851  // Return value: true=normal, false=bad data
852  JLOG(journal_.trace()) << "got header acquiring ledger " << hash_;
853 
854  if (complete_ || failed_ || mHaveHeader)
855  return true;
856 
857  auto* f = mReason == Reason::SHARD ? app_.getShardFamily()
858  : &app_.getNodeFamily();
859  mLedger = std::make_shared<Ledger>(
860  deserializeHeader(makeSlice(data)), app_.config(), *f);
861  if (mLedger->info().hash != hash_ ||
862  (mSeq != 0 && mSeq != mLedger->info().seq))
863  {
864  JLOG(journal_.warn())
865  << "Acquire hash mismatch: " << mLedger->info().hash
866  << "!=" << hash_;
867  mLedger.reset();
868  return false;
869  }
870  if (mSeq == 0)
871  mSeq = mLedger->info().seq;
872  mLedger->stateMap().setLedgerSeq(mSeq);
873  mLedger->txMap().setLedgerSeq(mSeq);
874  mHaveHeader = true;
875 
876  Serializer s(data.size() + 4);
877  s.add32(HashPrefix::ledgerMaster);
878  s.addRaw(data.data(), data.size());
879  f->db().store(hotLEDGER, std::move(s.modData()), hash_, mSeq);
880 
881  if (mLedger->info().txHash.isZero())
882  mHaveTransactions = true;
883 
884  if (mLedger->info().accountHash.isZero())
885  mHaveState = true;
886 
887  mLedger->txMap().setSynching();
888  mLedger->stateMap().setSynching();
889 
890  return true;
891 }
892 
896 void
897 InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
898 {
899  if (!mHaveHeader)
900  {
901  JLOG(journal_.warn()) << "Missing ledger header";
902  san.incInvalid();
903  return;
904  }
905  if (packet.type() == protocol::liTX_NODE)
906  {
907  if (mHaveTransactions || failed_)
908  {
909  san.incDuplicate();
910  return;
911  }
912  }
913  else if (mHaveState || failed_)
914  {
915  san.incDuplicate();
916  return;
917  }
918 
919  auto [map, rootHash, filter] = [&]()
921  if (packet.type() == protocol::liTX_NODE)
922  return {
923  mLedger->txMap(),
924  SHAMapHash{mLedger->info().txHash},
925  std::make_unique<TransactionStateSF>(
926  mLedger->txMap().family().db(), app_.getLedgerMaster())};
927  return {
928  mLedger->stateMap(),
929  SHAMapHash{mLedger->info().accountHash},
930  std::make_unique<AccountStateSF>(
931  mLedger->stateMap().family().db(), app_.getLedgerMaster())};
932  }();
933 
934  try
935  {
936  auto const f = filter.get();
937 
938  for (auto const& node : packet.nodes())
939  {
940  auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
941 
942  if (!nodeID)
943  throw std::runtime_error("data does not properly deserialize");
944 
945  if (nodeID->isRoot())
946  {
947  san += map.addRootNode(rootHash, makeSlice(node.nodedata()), f);
948  }
949  else
950  {
951  san += map.addKnownNode(*nodeID, makeSlice(node.nodedata()), f);
952  }
953 
954  if (!san.isGood())
955  {
956  JLOG(journal_.warn()) << "Received bad node data";
957  return;
958  }
959  }
960  }
961  catch (std::exception const& e)
962  {
963  JLOG(journal_.error()) << "Received bad node data: " << e.what();
964  san.incInvalid();
965  return;
966  }
967 
968  if (!map.isSynching())
969  {
970  if (packet.type() == protocol::liTX_NODE)
971  mHaveTransactions = true;
972  else
973  mHaveState = true;
974 
975  if (mHaveTransactions && mHaveState)
976  {
977  complete_ = true;
978  done();
979  }
980  }
981 }
982 
986 bool
987 InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
988 {
989  if (failed_ || mHaveState)
990  {
991  san.incDuplicate();
992  return true;
993  }
994 
995  if (!mHaveHeader)
996  {
997  assert(false);
998  return false;
999  }
1000 
1001  AccountStateSF filter(
1002  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1003  san += mLedger->stateMap().addRootNode(
1004  SHAMapHash{mLedger->info().accountHash}, data, &filter);
1005  return san.isGood();
1006 }
1007 
1011 bool
1012 InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san)
1013 {
1014  if (failed_ || mHaveTransactions)
1015  {
1016  san.incDuplicate();
1017  return true;
1018  }
1019 
1020  if (!mHaveHeader)
1021  {
1022  assert(false);
1023  return false;
1024  }
1025 
1026  TransactionStateSF filter(
1027  mLedger->txMap().family().db(), app_.getLedgerMaster());
1028  san += mLedger->txMap().addRootNode(
1029  SHAMapHash{mLedger->info().txHash}, data, &filter);
1030  return san.isGood();
1031 }
1032 
1034 InboundLedger::getNeededHashes()
1035 {
1037 
1038  if (!mHaveHeader)
1039  {
1040  ret.push_back(
1041  std::make_pair(protocol::TMGetObjectByHash::otLEDGER, hash_));
1042  return ret;
1043  }
1044 
1045  if (!mHaveState)
1046  {
1047  AccountStateSF filter(
1048  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1049  for (auto const& h : neededStateHashes(4, &filter))
1050  {
1051  ret.push_back(
1052  std::make_pair(protocol::TMGetObjectByHash::otSTATE_NODE, h));
1053  }
1054  }
1055 
1056  if (!mHaveTransactions)
1057  {
1058  TransactionStateSF filter(
1059  mLedger->txMap().family().db(), app_.getLedgerMaster());
1060  for (auto const& h : neededTxHashes(4, &filter))
1061  {
1063  protocol::TMGetObjectByHash::otTRANSACTION_NODE, h));
1064  }
1065  }
1066 
1067  return ret;
1068 }
1069 
1073 bool
1074 InboundLedger::gotData(
1075  std::weak_ptr<Peer> peer,
1077 {
1078  std::lock_guard sl(mReceivedDataLock);
1079 
1080  if (isDone())
1081  return false;
1082 
1083  mReceivedData.emplace_back(peer, data);
1084 
1085  if (mReceiveDispatched)
1086  return false;
1087 
1088  mReceiveDispatched = true;
1089  return true;
1090 }
1091 
1095 // VFALCO NOTE, it is not necessary to pass the entire Peer,
1096 // we can get away with just a Resource::Consumer endpoint.
1097 //
1098 // TODO Change peer to Consumer
1099 //
1100 int
1101 InboundLedger::processData(
1102  std::shared_ptr<Peer> peer,
1103  protocol::TMLedgerData& packet)
1104 {
1105  if (packet.type() == protocol::liBASE)
1106  {
1107  if (packet.nodes().empty())
1108  {
1109  JLOG(journal_.warn()) << peer->id() << ": empty header data";
1110  peer->charge(Resource::feeInvalidRequest);
1111  return -1;
1112  }
1113 
1114  SHAMapAddNode san;
1115 
1116  ScopedLockType sl(mtx_);
1117 
1118  try
1119  {
1120  if (!mHaveHeader)
1121  {
1122  if (!takeHeader(packet.nodes(0).nodedata()))
1123  {
1124  JLOG(journal_.warn()) << "Got invalid header data";
1125  peer->charge(Resource::feeInvalidRequest);
1126  return -1;
1127  }
1128 
1129  san.incUseful();
1130  }
1131 
1132  if (!mHaveState && (packet.nodes().size() > 1) &&
1133  !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
1134  {
1135  JLOG(journal_.warn()) << "Included AS root invalid";
1136  }
1137 
1138  if (!mHaveTransactions && (packet.nodes().size() > 2) &&
1139  !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
1140  {
1141  JLOG(journal_.warn()) << "Included TX root invalid";
1142  }
1143  }
1144  catch (std::exception const& ex)
1145  {
1146  JLOG(journal_.warn())
1147  << "Included AS/TX root invalid: " << ex.what();
1148  peer->charge(Resource::feeBadData);
1149  return -1;
1150  }
1151 
1152  if (san.isUseful())
1153  progress_ = true;
1154 
1155  mStats += san;
1156  return san.getGood();
1157  }
1158 
1159  if ((packet.type() == protocol::liTX_NODE) ||
1160  (packet.type() == protocol::liAS_NODE))
1161  {
1162  std::string type = packet.type() == protocol::liTX_NODE ? "liTX_NODE: "
1163  : "liAS_NODE: ";
1164 
1165  if (packet.nodes().empty())
1166  {
1167  JLOG(journal_.info()) << peer->id() << ": response with no nodes";
1168  peer->charge(Resource::feeInvalidRequest);
1169  return -1;
1170  }
1171 
1172  ScopedLockType sl(mtx_);
1173 
1174  // Verify node IDs and data are complete
1175  for (auto const& node : packet.nodes())
1176  {
1177  if (!node.has_nodeid() || !node.has_nodedata())
1178  {
1179  JLOG(journal_.warn()) << "Got bad node";
1180  peer->charge(Resource::feeInvalidRequest);
1181  return -1;
1182  }
1183  }
1184 
1185  SHAMapAddNode san;
1186  receiveNode(packet, san);
1187 
1188  JLOG(journal_.debug())
1189  << "Ledger "
1190  << ((packet.type() == protocol::liTX_NODE) ? "TX" : "AS")
1191  << " node stats: " << san.get();
1192 
1193  if (san.isUseful())
1194  progress_ = true;
1195 
1196  mStats += san;
1197  return san.getGood();
1198  }
1199 
1200  return -1;
1201 }
1202 
1203 namespace detail {
1204 // Track the amount of useful data that each peer returns
1206 {
1207  // Map from peer to amount of useful the peer returned
1209  // The largest amount of useful data that any peer returned
1210  int maxCount = 0;
1211 
1212  // Update the data count for a peer
1213  void
1214  update(std::shared_ptr<Peer>&& peer, int dataCount)
1215  {
1216  if (dataCount <= 0)
1217  return;
1218  maxCount = std::max(maxCount, dataCount);
1219  auto i = counts.find(peer);
1220  if (i == counts.end())
1221  {
1222  counts.emplace(std::move(peer), dataCount);
1223  return;
1224  }
1225  i->second = std::max(i->second, dataCount);
1226  }
1227 
1228  // Prune all the peers that didn't return enough data.
1229  void
1231  {
1232  // Remove all the peers that didn't return at least half as much data as
1233  // the best peer
1234  auto const thresh = maxCount / 2;
1235  auto i = counts.begin();
1236  while (i != counts.end())
1237  {
1238  if (i->second < thresh)
1239  i = counts.erase(i);
1240  else
1241  ++i;
1242  }
1243  }
1244 
1245  // call F with the `peer` parameter with a random sample of at most n values
1246  // of the counts vector.
1247  template <class F>
1248  void
1250  {
1251  if (counts.empty())
1252  return;
1253 
1254  auto outFunc = [&f](auto&& v) { f(v.first); };
1256 #if _MSC_VER
1258  s.reserve(n);
1259  std::sample(
1260  counts.begin(), counts.end(), std::back_inserter(s), n, rng);
1261  for (auto& v : s)
1262  {
1263  outFunc(v);
1264  }
1265 #else
1266  std::sample(
1267  counts.begin(),
1268  counts.end(),
1269  boost::make_function_output_iterator(outFunc),
1270  n,
1271  rng);
1272 #endif
1273  }
1274 };
1275 } // namespace detail
1276 
1280 void
1281 InboundLedger::runData()
1282 {
1283  // Maximum number of peers to request data from
1284  constexpr std::size_t maxUsefulPeers = 6;
1285 
1286  decltype(mReceivedData) data;
1287 
1288  // Reserve some memory so the first couple iterations don't reallocate
1289  data.reserve(8);
1290 
1291  detail::PeerDataCounts dataCounts;
1292 
1293  for (;;)
1294  {
1295  data.clear();
1296 
1297  {
1298  std::lock_guard sl(mReceivedDataLock);
1299 
1300  if (mReceivedData.empty())
1301  {
1302  mReceiveDispatched = false;
1303  break;
1304  }
1305 
1306  data.swap(mReceivedData);
1307  }
1308 
1309  for (auto& entry : data)
1310  {
1311  if (auto peer = entry.first.lock())
1312  {
1313  int count = processData(peer, *(entry.second));
1314  dataCounts.update(std::move(peer), count);
1315  }
1316  }
1317  }
1318 
1319  // Select a random sample of the peers that gives us the most nodes that are
1320  // useful
1321  dataCounts.prune();
1322  dataCounts.sampleN(maxUsefulPeers, [&](std::shared_ptr<Peer> const& peer) {
1323  trigger(peer, TriggerReason::reply);
1324  });
1325 }
1326 
1328 InboundLedger::getJson(int)
1329 {
1331 
1332  ScopedLockType sl(mtx_);
1333 
1334  ret[jss::hash] = to_string(hash_);
1335 
1336  if (complete_)
1337  ret[jss::complete] = true;
1338 
1339  if (failed_)
1340  ret[jss::failed] = true;
1341 
1342  if (!complete_ && !failed_)
1343  ret[jss::peers] = static_cast<int>(mPeerSet->getPeerIds().size());
1344 
1345  ret[jss::have_header] = mHaveHeader;
1346 
1347  if (mHaveHeader)
1348  {
1349  ret[jss::have_state] = mHaveState;
1350  ret[jss::have_transactions] = mHaveTransactions;
1351  }
1352 
1353  ret[jss::timeouts] = timeouts_;
1354 
1355  if (mHaveHeader && !mHaveState)
1356  {
1358  for (auto const& h : neededStateHashes(16, nullptr))
1359  {
1360  hv.append(to_string(h));
1361  }
1362  ret[jss::needed_state_hashes] = hv;
1363  }
1364 
1365  if (mHaveHeader && !mHaveTransactions)
1366  {
1368  for (auto const& h : neededTxHashes(16, nullptr))
1369  {
1370  hv.append(to_string(h));
1371  }
1372  ret[jss::needed_transaction_hashes] = hv;
1373  }
1374 
1375  return ret;
1376 }
1377 
1378 } // namespace ripple
ripple::InboundLedger::mRecentNodes
std::set< uint256 > mRecentNodes
Definition: InboundLedger.h:187
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:338
ripple::Application
Definition: Application.h:116
ripple::SHAMapAddNode
Definition: SHAMapAddNode.h:28
ripple::Application::getNodeFamily
virtual Family & getNodeFamily()=0
ripple::SHAMapAddNode::get
std::string get() const
Definition: SHAMapAddNode.h:156
ripple::InboundLedger::Reason::HISTORY
@ HISTORY
ripple::InboundLedger::getNeededHashes
std::vector< neededHash_t > getNeededHashes()
Definition: InboundLedger.cpp:1034
ripple::InboundLedger::mReason
const Reason mReason
Definition: InboundLedger.h:185
std::for_each
T for_each(T... args)
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:241
ripple::InboundLedger::TriggerReason
TriggerReason
Definition: InboundLedger.h:123
ripple::reqNodes
@ reqNodes
Definition: InboundLedger.cpp:71
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:51
std::string
STL class.
ripple::InboundLedger::Reason::CONSENSUS
@ CONSENSUS
std::shared_ptr
STL class.
ripple::InboundLedger::mHaveState
bool mHaveState
Definition: InboundLedger.h:180
ripple::SHAMap::getHash
SHAMapHash getHash() const
Definition: SHAMap.cpp:852
std::exception
STL class.
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:308
ripple::InboundLedger::TriggerReason::added
@ added
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:179
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:44
Json::arrayValue
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
ripple::InboundLedger::mSignaled
bool mSignaled
Definition: InboundLedger.h:182
std::pair
std::vector::reserve
T reserve(T... args)
ripple::ledgerAcquireTimeout
constexpr auto ledgerAcquireTimeout
Definition: InboundLedger.cpp:75
std::vector
STL class.
std::unordered_map::find
T find(T... args)
ripple::InboundLedger::update
void update(std::uint32_t seq)
Definition: InboundLedger.cpp:183
ripple::InboundLedger::touch
void touch()
Definition: InboundLedger.h:111
std::back_inserter
T back_inserter(T... args)
ripple::peerCountStart
@ peerCountStart
Definition: InboundLedger.cpp:47
ripple::InboundLedger::~InboundLedger
~InboundLedger()
Definition: InboundLedger.cpp:216
ripple::TimeoutCounter::queueJob
void queueJob(ScopedLockType &)
Queue a job to call invokeOnTimer().
Definition: TimeoutCounter.cpp:69
ripple::TimeoutCounter::progress_
bool progress_
Whether forward progress has been made.
Definition: TimeoutCounter.h:134
random
ripple::InboundLedger::mByHash
bool mByHash
Definition: InboundLedger.h:183
std::unordered_map::emplace
T emplace(T... args)
ripple::neededHashes
static std::vector< uint256 > neededHashes(uint256 const &root, SHAMap &map, int max, SHAMapSyncFilter *filter)
Definition: InboundLedger.cpp:237
beast::Journal::warn
Stream warn() const
Definition: Journal.h:326
std::lock_guard
STL class.
ripple::SHAMapHash::isZero
bool isZero() const
Definition: SHAMapHash.h:53
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::InboundLedger::neededStateHashes
std::vector< uint256 > neededStateHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:268
ripple::detail::PeerDataCounts::prune
void prune()
Definition: InboundLedger.cpp:1230
std::tuple
ripple::AccountStateSF
Definition: AccountStateSF.h:31
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::InboundLedger::mPeerSet
std::unique_ptr< PeerSet > mPeerSet
Definition: InboundLedger.h:197
std::minstd_rand
ripple::TimeoutCounter
This class is an "active" object.
Definition: TimeoutCounter.h:66
ripple::Family::db
virtual NodeStore::Database & db()=0
ripple::XRP_LEDGER_EARLIEST_FEES
static constexpr std::uint32_t XRP_LEDGER_EARLIEST_FEES
The XRP Ledger mainnet's earliest ledger with a FeeSettings object.
Definition: SystemParameters.h:73
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapHash.h:32
algorithm
ripple::jtLEDGER_DATA
@ jtLEDGER_DATA
Definition: Job.h:67
ripple::TimeoutCounter::mtx_
std::recursive_mutex mtx_
Definition: TimeoutCounter.h:125
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::base_uint::size
constexpr static std::size_t size()
Definition: base_uint.h:519
std::unique_lock::unlock
T unlock(T... args)
ripple::InboundLedger::neededTxHashes
std::vector< uint256 > neededTxHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:262
std::vector::push_back
T push_back(T... args)
ripple::base_uint< 256 >
std::sample
T sample(T... args)
ripple::reqNodesReply
@ reqNodesReply
Definition: InboundLedger.cpp:67
std::addressof
T addressof(T... args)
Json::Value::append
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:882
ripple::InboundLedger::mLedger
std::shared_ptr< Ledger > mLedger
Definition: InboundLedger.h:178
std::enable_shared_from_this< InboundLedger >::shared_from_this
T shared_from_this(T... args)
ripple::TimeoutCounter::app_
Application & app_
Definition: TimeoutCounter.h:123
ripple::SHAMapAddNode::isUseful
bool isUseful() const
Definition: SHAMapAddNode.h:116
ripple::InboundLedger::getPeerCount
std::size_t getPeerCount() const
Definition: InboundLedger.cpp:174
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
std::random_device
ripple::InboundLedger::addPeers
void addPeers()
Add more peers to the set, if possible.
Definition: InboundLedger.cpp:453
ripple::LedgerMaster::getFetchPack
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
Definition: LedgerMaster.cpp:2151
ripple::peerCountAdd
@ peerCountAdd
Definition: InboundLedger.cpp:51
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::TimeoutCounter::failed_
bool failed_
Definition: TimeoutCounter.h:132
ripple::InboundLedgers::gotStaleData
virtual void gotStaleData(std::shared_ptr< protocol::TMLedgerData > packet)=0
ripple::InboundLedger::InboundLedger
InboundLedger(Application &app, uint256 const &hash, std::uint32_t seq, Reason reason, clock_type &, std::unique_ptr< PeerSet > peerSet)
Definition: InboundLedger.cpp:77
ripple::Application::config
virtual Config & config()=0
ripple::InboundLedgers::onLedgerFetched
virtual void onLedgerFetched()=0
Called when a complete ledger is obtained.
ripple::SHAMapAddNode::isGood
bool isGood() const
Definition: SHAMapAddNode.h:132
std::unique_lock< std::recursive_mutex >
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::InboundLedger::tryDB
void tryDB(NodeStore::Database &srcDB)
Definition: InboundLedger.cpp:277
beast::Journal::error
Stream error() const
Definition: Journal.h:332
beast::Journal::info
Stream info() const
Definition: Journal.h:320
std::unordered_map::erase
T erase(T... args)
std::runtime_error
STL class.
ripple::detail::PeerDataCounts
Definition: InboundLedger.cpp:1205
std::uint32_t
ripple::missingNodesFind
@ missingNodesFind
Definition: InboundLedger.cpp:63
ripple::TimeoutCounter::isDone
bool isDone() const
Definition: TimeoutCounter.h:116
ripple::InboundLedger::mHaveHeader
bool mHaveHeader
Definition: InboundLedger.h:179
beast::abstract_clock< std::chrono::steady_clock >
ripple::SHAMap::getMissingNodes
std::vector< std::pair< SHAMapNodeID, uint256 > > getMissingNodes(int maxNodes, SHAMapSyncFilter *filter)
Check for nodes in the SHAMap not available.
Definition: SHAMapSync.cpp:317
ripple::LedgerMaster::checkAccept
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
Definition: LedgerMaster.cpp:1047
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:40
ripple::InboundLedger::TriggerReason::timeout
@ timeout
ripple::InboundLedger::pmDowncast
std::weak_ptr< TimeoutCounter > pmDowncast() override
Return a weak pointer to this.
Definition: InboundLedger.cpp:467
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedger::mStats
SHAMapAddNode mStats
Definition: InboundLedger.h:189
ripple::TimeoutCounter::hash_
const uint256 hash_
The hash of the object (in practice, always a ledger) we are trying to fetch.
Definition: TimeoutCounter.h:129
ripple::detail::PeerDataCounts::update
void update(std::shared_ptr< Peer > &&peer, int dataCount)
Definition: InboundLedger.cpp:1214
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::base_uint::begin
iterator begin()
Definition: base_uint.h:133
std::unordered_map::begin
T begin(T... args)
ripple::InboundLedger::mSeq
std::uint32_t mSeq
Definition: InboundLedger.h:184
ripple::detail::PeerDataCounts::sampleN
void sampleN(std::size_t n, F &&f)
Definition: InboundLedger.cpp:1249
ripple::deserializePrefixedHeader
LedgerHeader deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: protocol/impl/LedgerHeader.cpp:66
ripple::deserializeHeader
LedgerHeader deserializeHeader(Slice data, bool hasHash)
Deserialize a ledger header from a byte array.
Definition: protocol/impl/LedgerHeader.cpp:42
ripple::NodeStore::DatabaseShard::setStored
virtual void setStored(std::shared_ptr< Ledger const > const &ledger)=0
Notifies the database that the given ledger has been fully acquired and stored.
std::count_if
T count_if(T... args)
ripple::InboundLedger::done
void done()
Definition: InboundLedger.cpp:473
std::unordered_map::empty
T empty(T... args)
ripple::detail::PeerDataCounts::counts
std::unordered_map< std::shared_ptr< Peer >, int > counts
Definition: InboundLedger.cpp:1208
ripple::InboundLedger::trigger
void trigger(std::shared_ptr< Peer > const &, TriggerReason)
Request more nodes, perhaps from a specific peer.
Definition: InboundLedger.cpp:527
beast::Journal::debug
Stream debug() const
Definition: Journal.h:314
ripple::SHAMapAddNode::incInvalid
void incInvalid()
Definition: SHAMapAddNode.h:80
std::size_t
ripple::keylet::fees
Keylet const & fees() noexcept
The (fixed) index of the object containing the ledger fees.
Definition: Indexes.cpp:180
ripple::SHAMapAddNode::incUseful
void incUseful()
Definition: SHAMapAddNode.h:86
ripple::hotLEDGER
@ hotLEDGER
Definition: NodeObject.h:34
std::make_pair
T make_pair(T... args)
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::SHAMapAddNode::getGood
int getGood() const
Definition: SHAMapAddNode.h:104
ripple::LedgerMaster::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
Definition: LedgerMaster.cpp:538
std::unordered_map::end
T end(T... args)
ripple::InboundLedger::Reason
Reason
Definition: InboundLedger.h:43
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition: Database.cpp:252
ripple::SHAMapAddNode::incDuplicate
void incDuplicate()
Definition: SHAMapAddNode.h:92
ripple::TimeoutCounter::complete_
bool complete_
Definition: TimeoutCounter.h:131
std::max
T max(T... args)
ripple::ledgerBecomeAggressiveThreshold
@ ledgerBecomeAggressiveThreshold
Definition: InboundLedger.cpp:59
ripple::SHAMapSyncFilter
Definition: SHAMapSyncFilter.h:30
ripple::TimeoutCounter::timeouts_
int timeouts_
Definition: TimeoutCounter.h:130
std::unique_ptr
STL class.
std::stable_partition
T stable_partition(T... args)
ripple::InboundLedger::Reason::SHARD
@ SHARD
std::unordered_map
STL class.
ripple::InboundLedger::onTimer
void onTimer(bool progress, ScopedLockType &peerSetLock) override
Called with a lock by the PeerSet when the timer expires.
Definition: InboundLedger.cpp:402
ripple::InboundLedger::checkLocal
bool checkLocal()
Definition: InboundLedger.cpp:196
ripple::TransactionStateSF
Definition: TransactionStateSF.h:31
ripple::ledgerTimeoutRetriesMax
@ ledgerTimeoutRetriesMax
Definition: InboundLedger.cpp:55
ripple::InboundLedger::mHaveTransactions
bool mHaveTransactions
Definition: InboundLedger.h:181
ripple::InboundLedger::mReceivedData
std::vector< std::pair< std::weak_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData > > > mReceivedData
Definition: InboundLedger.h:195
ripple::InboundLedger::init
void init(ScopedLockType &collectionLock)
Definition: InboundLedger.cpp:106
ripple::TimeoutCounter::journal_
beast::Journal journal_
Definition: TimeoutCounter.h:124
std::exception::what
T what(T... args)
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::root
Number root(Number f, unsigned d)
Definition: Number.cpp:624