rippled
InboundLedger.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/AccountStateSF.h>
21 #include <ripple/app/ledger/InboundLedger.h>
22 #include <ripple/app/ledger/InboundLedgers.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/ledger/TransactionStateSF.h>
25 #include <ripple/app/main/Application.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/basics/Log.h>
28 #include <ripple/core/JobQueue.h>
29 #include <ripple/nodestore/DatabaseShard.h>
30 #include <ripple/overlay/Overlay.h>
31 #include <ripple/protocol/HashPrefix.h>
32 #include <ripple/protocol/jss.h>
33 #include <ripple/resource/Fees.h>
34 #include <ripple/shamap/SHAMapNodeID.h>
35 
36 #include <boost/iterator/function_output_iterator.hpp>
37 
38 #include <algorithm>
39 #include <random>
40 
41 namespace ripple {
42 
43 using namespace std::chrono_literals;
44 
45 enum {
46  // Number of peers to start with
48 
49  // Number of peers to add on a timeout
50  ,
52 
53  // how many timeouts before we give up
54  ,
56 
57  // how many timeouts before we get aggressive
58  ,
60 
61  // Number of nodes to find initially
62  ,
64 
65  // Number of nodes to request for a reply
66  ,
68 
69  // Number of nodes to request blindly
70  ,
71  reqNodes = 12
72 };
73 
74 // millisecond for each ledger timeout
75 auto constexpr ledgerAcquireTimeout = 3000ms;
76 
78  Application& app,
79  uint256 const& hash,
80  std::uint32_t seq,
81  Reason reason,
82  clock_type& clock,
85  app,
86  hash,
88  {jtLEDGER_DATA, "InboundLedger", 5},
89  app.journal("InboundLedger"))
90  , m_clock(clock)
91  , mHaveHeader(false)
92  , mHaveState(false)
93  , mHaveTransactions(false)
94  , mSignaled(false)
95  , mByHash(true)
96  , mSeq(seq)
97  , mReason(reason)
98  , mReceiveDispatched(false)
99  , mPeerSet(std::move(peerSet))
100 {
101  JLOG(journal_.trace()) << "Acquiring ledger " << hash_;
102  touch();
103 }
104 
105 void
107 {
108  ScopedLockType sl(mtx_);
109  collectionLock.unlock();
110 
111  tryDB(app_.getNodeFamily().db());
112  if (failed_)
113  return;
114 
115  if (!complete_)
116  {
117  auto shardStore = app_.getShardStore();
118  if (mReason == Reason::SHARD)
119  {
120  if (!shardStore)
121  {
122  JLOG(journal_.error())
123  << "Acquiring shard with no shard store available";
124  failed_ = true;
125  return;
126  }
127 
128  mHaveHeader = false;
129  mHaveTransactions = false;
130  mHaveState = false;
131  mLedger.reset();
132 
133  tryDB(app_.getShardFamily()->db());
134  if (failed_)
135  return;
136  }
137  else if (shardStore && mSeq >= shardStore->earliestLedgerSeq())
138  {
139  if (auto l = shardStore->fetchLedger(hash_, mSeq))
140  {
141  mHaveHeader = true;
142  mHaveTransactions = true;
143  mHaveState = true;
144  complete_ = true;
145  mLedger = std::move(l);
146  }
147  }
148  }
149  if (!complete_)
150  {
151  addPeers();
152  queueJob(sl);
153  return;
154  }
155 
156  JLOG(journal_.debug()) << "Acquiring ledger we already have in "
157  << " local store. " << hash_;
158  mLedger->setImmutable(app_.config());
159 
161  return;
162 
164 
165  // Check if this could be a newer fully-validated ledger
166  if (mReason == Reason::CONSENSUS)
168 }
169 
172 {
173  auto const& peerIds = mPeerSet->getPeerIds();
174  return std::count_if(peerIds.begin(), peerIds.end(), [this](auto id) {
175  return (app_.overlay().findPeerByShortID(id) != nullptr);
176  });
177 }
178 
179 void
181 {
182  ScopedLockType sl(mtx_);
183 
184  // If we didn't know the sequence number, but now do, save it
185  if ((seq != 0) && (mSeq == 0))
186  mSeq = seq;
187 
188  // Prevent this from being swept
189  touch();
190 }
191 
192 bool
194 {
195  ScopedLockType sl(mtx_);
196  if (!isDone())
197  {
198  if (mLedger)
199  tryDB(mLedger->stateMap().family().db());
200  else if (mReason == Reason::SHARD)
201  tryDB(app_.getShardFamily()->db());
202  else
203  tryDB(app_.getNodeFamily().db());
204  if (failed_ || complete_)
205  {
206  done();
207  return true;
208  }
209  }
210  return false;
211 }
212 
214 {
215  // Save any received AS data not processed. It could be useful
216  // for populating a different ledger
217  for (auto& entry : mReceivedData)
218  {
219  if (entry.second->type() == protocol::liAS_NODE)
220  app_.getInboundLedgers().gotStaleData(entry.second);
221  }
222  if (!isDone())
223  {
224  JLOG(journal_.debug())
225  << "Acquire " << hash_ << " abort "
226  << ((timeouts_ == 0) ? std::string()
227  : (std::string("timeouts:") +
228  std::to_string(timeouts_) + " "))
229  << mStats.get();
230  }
231 }
232 
235  uint256 const& root,
236  SHAMap& map,
237  int max,
238  SHAMapSyncFilter* filter)
239 {
241 
242  if (!root.isZero())
243  {
244  if (map.getHash().isZero())
245  ret.push_back(root);
246  else
247  {
248  auto mn = map.getMissingNodes(max, filter);
249  ret.reserve(mn.size());
250  for (auto const& n : mn)
251  ret.push_back(n.second);
252  }
253  }
254 
255  return ret;
256 }
257 
260 {
261  return neededHashes(mLedger->info().txHash, mLedger->txMap(), max, filter);
262 }
263 
266 {
267  return neededHashes(
268  mLedger->info().accountHash, mLedger->stateMap(), max, filter);
269 }
270 
272 deserializeHeader(Slice data, bool hasHash)
273 {
274  SerialIter sit(data.data(), data.size());
275 
276  LedgerInfo info;
277 
278  info.seq = sit.get32();
279  info.drops = sit.get64();
280  info.parentHash = sit.get256();
281  info.txHash = sit.get256();
282  info.accountHash = sit.get256();
283  info.parentCloseTime =
287  info.closeFlags = sit.get8();
288 
289  if (hasHash)
290  info.hash = sit.get256();
291 
292  return info;
293 }
294 
295 LedgerInfo
296 deserializePrefixedHeader(Slice data, bool hasHash)
297 {
298  return deserializeHeader(data + 4, hasHash);
299 }
300 
301 // See how much of the ledger data is stored locally
302 // Data found in a fetch pack will be stored
303 void
305 {
306  if (!mHaveHeader)
307  {
308  auto makeLedger = [&, this](Blob const& data) {
309  JLOG(journal_.trace()) << "Ledger header found in fetch pack";
310  mLedger = std::make_shared<Ledger>(
312  app_.config(),
314  : app_.getNodeFamily());
315  if (mLedger->info().hash != hash_ ||
316  (mSeq != 0 && mSeq != mLedger->info().seq))
317  {
318  // We know for a fact the ledger can never be acquired
319  JLOG(journal_.warn())
320  << "hash " << hash_ << " seq " << std::to_string(mSeq)
321  << " cannot be a ledger";
322  mLedger.reset();
323  failed_ = true;
324  }
325  };
326 
327  // Try to fetch the ledger header from the DB
328  if (auto nodeObject = srcDB.fetchNodeObject(hash_, mSeq))
329  {
330  JLOG(journal_.trace()) << "Ledger header found in local store";
331 
332  makeLedger(nodeObject->getData());
333  if (failed_)
334  return;
335 
336  // Store the ledger header if the source and destination differ
337  auto& dstDB{mLedger->stateMap().family().db()};
338  if (std::addressof(dstDB) != std::addressof(srcDB))
339  {
340  Blob blob{nodeObject->getData()};
341  dstDB.store(
342  hotLEDGER, std::move(blob), hash_, mLedger->info().seq);
343  }
344  }
345  else
346  {
347  // Try to fetch the ledger header from a fetch pack
348  auto data = app_.getLedgerMaster().getFetchPack(hash_);
349  if (!data)
350  return;
351 
352  JLOG(journal_.trace()) << "Ledger header found in fetch pack";
353 
354  makeLedger(*data);
355  if (failed_)
356  return;
357 
358  // Store the ledger header in the ledger's database
359  mLedger->stateMap().family().db().store(
360  hotLEDGER, std::move(*data), hash_, mLedger->info().seq);
361  }
362 
363  if (mSeq == 0)
364  mSeq = mLedger->info().seq;
365  mLedger->stateMap().setLedgerSeq(mSeq);
366  mLedger->txMap().setLedgerSeq(mSeq);
367  mHaveHeader = true;
368  }
369 
370  if (!mHaveTransactions)
371  {
372  if (mLedger->info().txHash.isZero())
373  {
374  JLOG(journal_.trace()) << "No TXNs to fetch";
375  mHaveTransactions = true;
376  }
377  else
378  {
379  TransactionStateSF filter(
380  mLedger->txMap().family().db(), app_.getLedgerMaster());
381  if (mLedger->txMap().fetchRoot(
382  SHAMapHash{mLedger->info().txHash}, &filter))
383  {
384  if (neededTxHashes(1, &filter).empty())
385  {
386  JLOG(journal_.trace()) << "Had full txn map locally";
387  mHaveTransactions = true;
388  }
389  }
390  }
391  }
392 
393  if (!mHaveState)
394  {
395  if (mLedger->info().accountHash.isZero())
396  {
397  JLOG(journal_.fatal())
398  << "We are acquiring a ledger with a zero account hash";
399  failed_ = true;
400  return;
401  }
402  AccountStateSF filter(
403  mLedger->stateMap().family().db(), app_.getLedgerMaster());
404  if (mLedger->stateMap().fetchRoot(
405  SHAMapHash{mLedger->info().accountHash}, &filter))
406  {
407  if (neededStateHashes(1, &filter).empty())
408  {
409  JLOG(journal_.trace()) << "Had full AS map locally";
410  mHaveState = true;
411  }
412  }
413  }
414 
416  {
417  JLOG(journal_.debug()) << "Had everything locally";
418  complete_ = true;
419  mLedger->setImmutable(app_.config());
420  }
421 }
422 
425 void
427 {
428  mRecentNodes.clear();
429 
430  if (isDone())
431  {
432  JLOG(journal_.info()) << "Already done " << hash_;
433  return;
434  }
435 
437  {
438  if (mSeq != 0)
439  {
440  JLOG(journal_.warn())
441  << timeouts_ << " timeouts for ledger " << mSeq;
442  }
443  else
444  {
445  JLOG(journal_.warn())
446  << timeouts_ << " timeouts for ledger " << hash_;
447  }
448  failed_ = true;
449  done();
450  return;
451  }
452 
453  if (!wasProgress)
454  {
455  checkLocal();
456 
457  mByHash = true;
458 
459  std::size_t pc = getPeerCount();
460  JLOG(journal_.debug())
461  << "No progress(" << pc << ") for ledger " << hash_;
462 
463  // addPeers triggers if the reason is not HISTORY
464  // So if the reason IS HISTORY, need to trigger after we add
465  // otherwise, we need to trigger before we add
466  // so each peer gets triggered once
467  if (mReason != Reason::HISTORY)
469  addPeers();
470  if (mReason == Reason::HISTORY)
472  }
473 }
474 
476 void
478 {
479  mPeerSet->addPeers(
481  [this](auto peer) { return peer->hasLedger(hash_, mSeq); },
482  [this](auto peer) {
483  // For historical nodes, do not trigger too soon
484  // since a fetch pack is probably coming
485  if (mReason != Reason::HISTORY)
487  });
488 }
489 
492 {
493  return shared_from_this();
494 }
495 
496 void
498 {
499  if (mSignaled)
500  return;
501 
502  mSignaled = true;
503  touch();
504 
505  JLOG(journal_.debug()) << "Acquire " << hash_ << (failed_ ? " fail " : " ")
506  << ((timeouts_ == 0)
507  ? std::string()
508  : (std::string("timeouts:") +
509  std::to_string(timeouts_) + " "))
510  << mStats.get();
511 
512  assert(complete_ || failed_);
513 
514  if (complete_ && !failed_ && mLedger)
515  {
516  mLedger->setImmutable(app_.config());
517  switch (mReason)
518  {
519  case Reason::SHARD:
521  [[fallthrough]];
522  case Reason::HISTORY:
524  break;
525  default:
527  break;
528  }
529  }
530 
531  // We hold the PeerSet lock, so must dispatch
533  jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() {
534  if (self->complete_ && !self->failed_)
535  {
536  self->app_.getLedgerMaster().checkAccept(self->getLedger());
537  self->app_.getLedgerMaster().tryAdvance();
538  }
539  else
540  self->app_.getInboundLedgers().logFailure(
541  self->hash_, self->mSeq);
542  });
543 }
544 
547 void
549 {
550  ScopedLockType sl(mtx_);
551 
552  if (isDone())
553  {
554  JLOG(journal_.debug())
555  << "Trigger on ledger: " << hash_ << (complete_ ? " completed" : "")
556  << (failed_ ? " failed" : "");
557  return;
558  }
559 
560  if (auto stream = journal_.trace())
561  {
562  if (peer)
563  stream << "Trigger acquiring ledger " << hash_ << " from " << peer;
564  else
565  stream << "Trigger acquiring ledger " << hash_;
566 
567  if (complete_ || failed_)
568  stream << "complete=" << complete_ << " failed=" << failed_;
569  else
570  stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions
571  << " as=" << mHaveState;
572  }
573 
574  if (!mHaveHeader)
575  {
576  tryDB(
578  : app_.getNodeFamily().db());
579  if (failed_)
580  {
581  JLOG(journal_.warn()) << " failed local for " << hash_;
582  return;
583  }
584  }
585 
586  protocol::TMGetLedger tmGL;
587  tmGL.set_ledgerhash(hash_.begin(), hash_.size());
588 
589  if (timeouts_ != 0)
590  {
591  // Be more aggressive if we've timed out at least once
592  tmGL.set_querytype(protocol::qtINDIRECT);
593 
594  if (!progress_ && !failed_ && mByHash &&
596  {
597  auto need = getNeededHashes();
598 
599  if (!need.empty())
600  {
601  protocol::TMGetObjectByHash tmBH;
602  bool typeSet = false;
603  tmBH.set_query(true);
604  tmBH.set_ledgerhash(hash_.begin(), hash_.size());
605  for (auto const& p : need)
606  {
607  JLOG(journal_.debug()) << "Want: " << p.second;
608 
609  if (!typeSet)
610  {
611  tmBH.set_type(p.first);
612  typeSet = true;
613  }
614 
615  if (p.first == tmBH.type())
616  {
617  protocol::TMIndexedObject* io = tmBH.add_objects();
618  io->set_hash(p.second.begin(), p.second.size());
619  if (mSeq != 0)
620  io->set_ledgerseq(mSeq);
621  }
622  }
623 
624  auto packet =
625  std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
626  auto const& peerIds = mPeerSet->getPeerIds();
628  peerIds.begin(), peerIds.end(), [this, &packet](auto id) {
629  if (auto p = app_.overlay().findPeerByShortID(id))
630  {
631  mByHash = false;
632  p->send(packet);
633  }
634  });
635  }
636  else
637  {
638  JLOG(journal_.info())
639  << "getNeededHashes says acquire is complete";
640  mHaveHeader = true;
641  mHaveTransactions = true;
642  mHaveState = true;
643  complete_ = true;
644  }
645  }
646  }
647 
648  // We can't do much without the header data because we don't know the
649  // state or transaction root hashes.
650  if (!mHaveHeader && !failed_)
651  {
652  tmGL.set_itype(protocol::liBASE);
653  if (mSeq != 0)
654  tmGL.set_ledgerseq(mSeq);
655  JLOG(journal_.trace()) << "Sending header request to "
656  << (peer ? "selected peer" : "all peers");
657  mPeerSet->sendRequest(tmGL, peer);
658  return;
659  }
660 
661  if (mLedger)
662  tmGL.set_ledgerseq(mLedger->info().seq);
663 
664  if (reason != TriggerReason::reply)
665  {
666  // If we're querying blind, don't query deep
667  tmGL.set_querydepth(0);
668  }
669  else if (peer && peer->isHighLatency())
670  {
671  // If the peer has high latency, query extra deep
672  tmGL.set_querydepth(2);
673  }
674  else
675  tmGL.set_querydepth(1);
676 
677  // Get the state data first because it's the most likely to be useful
678  // if we wind up abandoning this fetch.
679  if (mHaveHeader && !mHaveState && !failed_)
680  {
681  assert(mLedger);
682 
683  if (!mLedger->stateMap().isValid())
684  {
685  failed_ = true;
686  }
687  else if (mLedger->stateMap().getHash().isZero())
688  {
689  // we need the root node
690  tmGL.set_itype(protocol::liAS_NODE);
691  *tmGL.add_nodeids() = SHAMapNodeID().getRawString();
692  JLOG(journal_.trace()) << "Sending AS root request to "
693  << (peer ? "selected peer" : "all peers");
694  mPeerSet->sendRequest(tmGL, peer);
695  return;
696  }
697  else
698  {
699  AccountStateSF filter(
700  mLedger->stateMap().family().db(), app_.getLedgerMaster());
701 
702  // Release the lock while we process the large state map
703  sl.unlock();
704  auto nodes =
705  mLedger->stateMap().getMissingNodes(missingNodesFind, &filter);
706  sl.lock();
707 
708  // Make sure nothing happened while we released the lock
709  if (!failed_ && !complete_ && !mHaveState)
710  {
711  if (nodes.empty())
712  {
713  if (!mLedger->stateMap().isValid())
714  failed_ = true;
715  else
716  {
717  mHaveState = true;
718 
719  if (mHaveTransactions)
720  complete_ = true;
721  }
722  }
723  else
724  {
725  filterNodes(nodes, reason);
726 
727  if (!nodes.empty())
728  {
729  tmGL.set_itype(protocol::liAS_NODE);
730  for (auto const& id : nodes)
731  {
732  *(tmGL.add_nodeids()) = id.first.getRawString();
733  }
734 
735  JLOG(journal_.trace())
736  << "Sending AS node request (" << nodes.size()
737  << ") to "
738  << (peer ? "selected peer" : "all peers");
739  mPeerSet->sendRequest(tmGL, peer);
740  return;
741  }
742  else
743  {
744  JLOG(journal_.trace()) << "All AS nodes filtered";
745  }
746  }
747  }
748  }
749  }
750 
751  if (mHaveHeader && !mHaveTransactions && !failed_)
752  {
753  assert(mLedger);
754 
755  if (!mLedger->txMap().isValid())
756  {
757  failed_ = true;
758  }
759  else if (mLedger->txMap().getHash().isZero())
760  {
761  // we need the root node
762  tmGL.set_itype(protocol::liTX_NODE);
763  *(tmGL.add_nodeids()) = SHAMapNodeID().getRawString();
764  JLOG(journal_.trace()) << "Sending TX root request to "
765  << (peer ? "selected peer" : "all peers");
766  mPeerSet->sendRequest(tmGL, peer);
767  return;
768  }
769  else
770  {
771  TransactionStateSF filter(
772  mLedger->txMap().family().db(), app_.getLedgerMaster());
773 
774  auto nodes =
775  mLedger->txMap().getMissingNodes(missingNodesFind, &filter);
776 
777  if (nodes.empty())
778  {
779  if (!mLedger->txMap().isValid())
780  failed_ = true;
781  else
782  {
783  mHaveTransactions = true;
784 
785  if (mHaveState)
786  complete_ = true;
787  }
788  }
789  else
790  {
791  filterNodes(nodes, reason);
792 
793  if (!nodes.empty())
794  {
795  tmGL.set_itype(protocol::liTX_NODE);
796  for (auto const& n : nodes)
797  {
798  *(tmGL.add_nodeids()) = n.first.getRawString();
799  }
800  JLOG(journal_.trace())
801  << "Sending TX node request (" << nodes.size()
802  << ") to " << (peer ? "selected peer" : "all peers");
803  mPeerSet->sendRequest(tmGL, peer);
804  return;
805  }
806  else
807  {
808  JLOG(journal_.trace()) << "All TX nodes filtered";
809  }
810  }
811  }
812  }
813 
814  if (complete_ || failed_)
815  {
816  JLOG(journal_.debug())
817  << "Done:" << (complete_ ? " complete" : "")
818  << (failed_ ? " failed " : " ") << mLedger->info().seq;
819  sl.unlock();
820  done();
821  }
822 }
823 
824 void
825 InboundLedger::filterNodes(
827  TriggerReason reason)
828 {
829  // Sort nodes so that the ones we haven't recently
830  // requested come before the ones we have.
831  auto dup = std::stable_partition(
832  nodes.begin(), nodes.end(), [this](auto const& item) {
833  return mRecentNodes.count(item.second) == 0;
834  });
835 
836  // If everything is a duplicate we don't want to send
837  // any query at all except on a timeout where we need
838  // to query everyone:
839  if (dup == nodes.begin())
840  {
841  JLOG(journal_.trace()) << "filterNodes: all duplicates";
842 
843  if (reason != TriggerReason::timeout)
844  {
845  nodes.clear();
846  return;
847  }
848  }
849  else
850  {
851  JLOG(journal_.trace()) << "filterNodes: pruning duplicates";
852 
853  nodes.erase(dup, nodes.end());
854  }
855 
856  std::size_t const limit =
857  (reason == TriggerReason::reply) ? reqNodesReply : reqNodes;
858 
859  if (nodes.size() > limit)
860  nodes.resize(limit);
861 
862  for (auto const& n : nodes)
863  mRecentNodes.insert(n.second);
864 }
865 
869 // data must not have hash prefix
870 bool
871 InboundLedger::takeHeader(std::string const& data)
872 {
873  // Return value: true=normal, false=bad data
874  JLOG(journal_.trace()) << "got header acquiring ledger " << hash_;
875 
876  if (complete_ || failed_ || mHaveHeader)
877  return true;
878 
879  auto* f = mReason == Reason::SHARD ? app_.getShardFamily()
880  : &app_.getNodeFamily();
881  mLedger = std::make_shared<Ledger>(
882  deserializeHeader(makeSlice(data)), app_.config(), *f);
883  if (mLedger->info().hash != hash_ ||
884  (mSeq != 0 && mSeq != mLedger->info().seq))
885  {
886  JLOG(journal_.warn())
887  << "Acquire hash mismatch: " << mLedger->info().hash
888  << "!=" << hash_;
889  mLedger.reset();
890  return false;
891  }
892  if (mSeq == 0)
893  mSeq = mLedger->info().seq;
894  mLedger->stateMap().setLedgerSeq(mSeq);
895  mLedger->txMap().setLedgerSeq(mSeq);
896  mHaveHeader = true;
897 
898  Serializer s(data.size() + 4);
899  s.add32(HashPrefix::ledgerMaster);
900  s.addRaw(data.data(), data.size());
901  f->db().store(hotLEDGER, std::move(s.modData()), hash_, mSeq);
902 
903  if (mLedger->info().txHash.isZero())
904  mHaveTransactions = true;
905 
906  if (mLedger->info().accountHash.isZero())
907  mHaveState = true;
908 
909  mLedger->txMap().setSynching();
910  mLedger->stateMap().setSynching();
911 
912  return true;
913 }
914 
918 void
919 InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
920 {
921  if (!mHaveHeader)
922  {
923  JLOG(journal_.warn()) << "Missing ledger header";
924  san.incInvalid();
925  return;
926  }
927  if (packet.type() == protocol::liTX_NODE)
928  {
929  if (mHaveTransactions || failed_)
930  {
931  san.incDuplicate();
932  return;
933  }
934  }
935  else if (mHaveState || failed_)
936  {
937  san.incDuplicate();
938  return;
939  }
940 
941  auto [map, rootHash, filter] = [&]()
943  if (packet.type() == protocol::liTX_NODE)
944  return {
945  mLedger->txMap(),
946  SHAMapHash{mLedger->info().txHash},
947  std::make_unique<TransactionStateSF>(
948  mLedger->txMap().family().db(), app_.getLedgerMaster())};
949  return {
950  mLedger->stateMap(),
951  SHAMapHash{mLedger->info().accountHash},
952  std::make_unique<AccountStateSF>(
953  mLedger->stateMap().family().db(), app_.getLedgerMaster())};
954  }();
955 
956  try
957  {
958  auto const f = filter.get();
959 
960  for (auto const& node : packet.nodes())
961  {
962  auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
963 
964  if (!nodeID)
965  throw std::runtime_error("data does not properly deserialize");
966 
967  if (nodeID->isRoot())
968  {
969  san += map.addRootNode(rootHash, makeSlice(node.nodedata()), f);
970  }
971  else
972  {
973  san += map.addKnownNode(*nodeID, makeSlice(node.nodedata()), f);
974  }
975 
976  if (!san.isGood())
977  {
978  JLOG(journal_.warn()) << "Received bad node data";
979  return;
980  }
981  }
982  }
983  catch (std::exception const& e)
984  {
985  JLOG(journal_.error()) << "Received bad node data: " << e.what();
986  san.incInvalid();
987  return;
988  }
989 
990  if (!map.isSynching())
991  {
992  if (packet.type() == protocol::liTX_NODE)
993  mHaveTransactions = true;
994  else
995  mHaveState = true;
996 
997  if (mHaveTransactions && mHaveState)
998  {
999  complete_ = true;
1000  done();
1001  }
1002  }
1003 }
1004 
1008 bool
1009 InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
1010 {
1011  if (failed_ || mHaveState)
1012  {
1013  san.incDuplicate();
1014  return true;
1015  }
1016 
1017  if (!mHaveHeader)
1018  {
1019  assert(false);
1020  return false;
1021  }
1022 
1023  AccountStateSF filter(
1024  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1025  san += mLedger->stateMap().addRootNode(
1026  SHAMapHash{mLedger->info().accountHash}, data, &filter);
1027  return san.isGood();
1028 }
1029 
1033 bool
1034 InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san)
1035 {
1036  if (failed_ || mHaveTransactions)
1037  {
1038  san.incDuplicate();
1039  return true;
1040  }
1041 
1042  if (!mHaveHeader)
1043  {
1044  assert(false);
1045  return false;
1046  }
1047 
1048  TransactionStateSF filter(
1049  mLedger->txMap().family().db(), app_.getLedgerMaster());
1050  san += mLedger->txMap().addRootNode(
1051  SHAMapHash{mLedger->info().txHash}, data, &filter);
1052  return san.isGood();
1053 }
1054 
1056 InboundLedger::getNeededHashes()
1057 {
1059 
1060  if (!mHaveHeader)
1061  {
1062  ret.push_back(
1063  std::make_pair(protocol::TMGetObjectByHash::otLEDGER, hash_));
1064  return ret;
1065  }
1066 
1067  if (!mHaveState)
1068  {
1069  AccountStateSF filter(
1070  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1071  for (auto const& h : neededStateHashes(4, &filter))
1072  {
1073  ret.push_back(
1074  std::make_pair(protocol::TMGetObjectByHash::otSTATE_NODE, h));
1075  }
1076  }
1077 
1078  if (!mHaveTransactions)
1079  {
1080  TransactionStateSF filter(
1081  mLedger->txMap().family().db(), app_.getLedgerMaster());
1082  for (auto const& h : neededTxHashes(4, &filter))
1083  {
1085  protocol::TMGetObjectByHash::otTRANSACTION_NODE, h));
1086  }
1087  }
1088 
1089  return ret;
1090 }
1091 
1095 bool
1096 InboundLedger::gotData(
1097  std::weak_ptr<Peer> peer,
1099 {
1100  std::lock_guard sl(mReceivedDataLock);
1101 
1102  if (isDone())
1103  return false;
1104 
1105  mReceivedData.emplace_back(peer, data);
1106 
1107  if (mReceiveDispatched)
1108  return false;
1109 
1110  mReceiveDispatched = true;
1111  return true;
1112 }
1113 
1117 // VFALCO NOTE, it is not necessary to pass the entire Peer,
1118 // we can get away with just a Resource::Consumer endpoint.
1119 //
1120 // TODO Change peer to Consumer
1121 //
1122 int
1123 InboundLedger::processData(
1124  std::shared_ptr<Peer> peer,
1125  protocol::TMLedgerData& packet)
1126 {
1127  if (packet.type() == protocol::liBASE)
1128  {
1129  if (packet.nodes().empty())
1130  {
1131  JLOG(journal_.warn()) << peer->id() << ": empty header data";
1132  peer->charge(Resource::feeInvalidRequest);
1133  return -1;
1134  }
1135 
1136  SHAMapAddNode san;
1137 
1138  ScopedLockType sl(mtx_);
1139 
1140  try
1141  {
1142  if (!mHaveHeader)
1143  {
1144  if (!takeHeader(packet.nodes(0).nodedata()))
1145  {
1146  JLOG(journal_.warn()) << "Got invalid header data";
1147  peer->charge(Resource::feeInvalidRequest);
1148  return -1;
1149  }
1150 
1151  san.incUseful();
1152  }
1153 
1154  if (!mHaveState && (packet.nodes().size() > 1) &&
1155  !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
1156  {
1157  JLOG(journal_.warn()) << "Included AS root invalid";
1158  }
1159 
1160  if (!mHaveTransactions && (packet.nodes().size() > 2) &&
1161  !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
1162  {
1163  JLOG(journal_.warn()) << "Included TX root invalid";
1164  }
1165  }
1166  catch (std::exception const& ex)
1167  {
1168  JLOG(journal_.warn())
1169  << "Included AS/TX root invalid: " << ex.what();
1170  peer->charge(Resource::feeBadData);
1171  return -1;
1172  }
1173 
1174  if (san.isUseful())
1175  progress_ = true;
1176 
1177  mStats += san;
1178  return san.getGood();
1179  }
1180 
1181  if ((packet.type() == protocol::liTX_NODE) ||
1182  (packet.type() == protocol::liAS_NODE))
1183  {
1184  std::string type = packet.type() == protocol::liTX_NODE ? "liTX_NODE: "
1185  : "liAS_NODE: ";
1186 
1187  if (packet.nodes().empty())
1188  {
1189  JLOG(journal_.info()) << peer->id() << ": response with no nodes";
1190  peer->charge(Resource::feeInvalidRequest);
1191  return -1;
1192  }
1193 
1194  ScopedLockType sl(mtx_);
1195 
1196  // Verify node IDs and data are complete
1197  for (auto const& node : packet.nodes())
1198  {
1199  if (!node.has_nodeid() || !node.has_nodedata())
1200  {
1201  JLOG(journal_.warn()) << "Got bad node";
1202  peer->charge(Resource::feeInvalidRequest);
1203  return -1;
1204  }
1205  }
1206 
1207  SHAMapAddNode san;
1208  receiveNode(packet, san);
1209 
1210  JLOG(journal_.debug())
1211  << "Ledger "
1212  << ((packet.type() == protocol::liTX_NODE) ? "TX" : "AS")
1213  << " node stats: " << san.get();
1214 
1215  if (san.isUseful())
1216  progress_ = true;
1217 
1218  mStats += san;
1219  return san.getGood();
1220  }
1221 
1222  return -1;
1223 }
1224 
1225 namespace detail {
1226 // Track the amount of useful data that each peer returns
1228 {
1229  // Map from peer to amount of useful the peer returned
1231  // The largest amount of useful data that any peer returned
1232  int maxCount = 0;
1233 
1234  // Update the data count for a peer
1235  void
1236  update(std::shared_ptr<Peer>&& peer, int dataCount)
1237  {
1238  if (dataCount <= 0)
1239  return;
1240  maxCount = std::max(maxCount, dataCount);
1241  auto i = counts.find(peer);
1242  if (i == counts.end())
1243  {
1244  counts.emplace(std::move(peer), dataCount);
1245  return;
1246  }
1247  i->second = std::max(i->second, dataCount);
1248  }
1249 
1250  // Prune all the peers that didn't return enough data.
1251  void
1253  {
1254  // Remove all the peers that didn't return at least half as much data as
1255  // the best peer
1256  auto const thresh = maxCount / 2;
1257  auto i = counts.begin();
1258  while (i != counts.end())
1259  {
1260  if (i->second < thresh)
1261  i = counts.erase(i);
1262  else
1263  ++i;
1264  }
1265  }
1266 
1267  // call F with the `peer` parameter with a random sample of at most n values
1268  // of the counts vector.
1269  template <class F>
1270  void
1272  {
1273  if (counts.empty())
1274  return;
1275 
1276  auto outFunc = [&f](auto&& v) { f(v.first); };
1278 #if _MSC_VER
1280  s.reserve(n);
1281  std::sample(
1282  counts.begin(), counts.end(), std::back_inserter(s), n, rng);
1283  for (auto& v : s)
1284  {
1285  outFunc(v);
1286  }
1287 #else
1288  std::sample(
1289  counts.begin(),
1290  counts.end(),
1291  boost::make_function_output_iterator(outFunc),
1292  n,
1293  rng);
1294 #endif
1295  }
1296 };
1297 } // namespace detail
1298 
1302 void
1303 InboundLedger::runData()
1304 {
1305  // Maximum number of peers to request data from
1306  constexpr std::size_t maxUsefulPeers = 6;
1307 
1308  decltype(mReceivedData) data;
1309 
1310  // Reserve some memory so the first couple iterations don't reallocate
1311  data.reserve(8);
1312 
1313  detail::PeerDataCounts dataCounts;
1314 
1315  for (;;)
1316  {
1317  data.clear();
1318 
1319  {
1320  std::lock_guard sl(mReceivedDataLock);
1321 
1322  if (mReceivedData.empty())
1323  {
1324  mReceiveDispatched = false;
1325  break;
1326  }
1327 
1328  data.swap(mReceivedData);
1329  }
1330 
1331  for (auto& entry : data)
1332  {
1333  if (auto peer = entry.first.lock())
1334  {
1335  int count = processData(peer, *(entry.second));
1336  dataCounts.update(std::move(peer), count);
1337  }
1338  }
1339  }
1340 
1341  // Select a random sample of the peers that gives us the most nodes that are
1342  // useful
1343  dataCounts.prune();
1344  dataCounts.sampleN(maxUsefulPeers, [&](std::shared_ptr<Peer> const& peer) {
1345  trigger(peer, TriggerReason::reply);
1346  });
1347 }
1348 
1350 InboundLedger::getJson(int)
1351 {
1353 
1354  ScopedLockType sl(mtx_);
1355 
1356  ret[jss::hash] = to_string(hash_);
1357 
1358  if (complete_)
1359  ret[jss::complete] = true;
1360 
1361  if (failed_)
1362  ret[jss::failed] = true;
1363 
1364  if (!complete_ && !failed_)
1365  ret[jss::peers] = static_cast<int>(mPeerSet->getPeerIds().size());
1366 
1367  ret[jss::have_header] = mHaveHeader;
1368 
1369  if (mHaveHeader)
1370  {
1371  ret[jss::have_state] = mHaveState;
1372  ret[jss::have_transactions] = mHaveTransactions;
1373  }
1374 
1375  ret[jss::timeouts] = timeouts_;
1376 
1377  if (mHaveHeader && !mHaveState)
1378  {
1380  for (auto const& h : neededStateHashes(16, nullptr))
1381  {
1382  hv.append(to_string(h));
1383  }
1384  ret[jss::needed_state_hashes] = hv;
1385  }
1386 
1387  if (mHaveHeader && !mHaveTransactions)
1388  {
1390  for (auto const& h : neededTxHashes(16, nullptr))
1391  {
1392  hv.append(to_string(h));
1393  }
1394  ret[jss::needed_transaction_hashes] = hv;
1395  }
1396 
1397  return ret;
1398 }
1399 
1400 } // namespace ripple
ripple::InboundLedger::mRecentNodes
std::set< uint256 > mRecentNodes
Definition: InboundLedger.h:187
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::Application
Definition: Application.h:115
ripple::SHAMapAddNode
Definition: SHAMapAddNode.h:28
ripple::Application::getNodeFamily
virtual Family & getNodeFamily()=0
ripple::SHAMapAddNode::get
std::string get() const
Definition: SHAMapAddNode.h:156
ripple::InboundLedger::Reason::HISTORY
@ HISTORY
ripple::InboundLedger::getNeededHashes
std::vector< neededHash_t > getNeededHashes()
Definition: InboundLedger.cpp:1056
ripple::InboundLedger::mReason
const Reason mReason
Definition: InboundLedger.h:185
std::for_each
T for_each(T... args)
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:241
ripple::InboundLedger::TriggerReason
TriggerReason
Definition: InboundLedger.h:123
ripple::reqNodes
@ reqNodes
Definition: InboundLedger.cpp:71
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:51
std::string
STL class.
ripple::InboundLedger::Reason::CONSENSUS
@ CONSENSUS
std::shared_ptr
STL class.
ripple::InboundLedger::mHaveState
bool mHaveState
Definition: InboundLedger.h:180
ripple::LedgerInfo::parentHash
uint256 parentHash
Definition: ReadView.h:104
ripple::SHAMap::getHash
SHAMapHash getHash() const
Definition: SHAMap.cpp:843
std::exception
STL class.
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::InboundLedger::TriggerReason::added
@ added
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:178
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:44
Json::arrayValue
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
ripple::InboundLedger::mSignaled
bool mSignaled
Definition: InboundLedger.h:182
std::pair
std::vector::reserve
T reserve(T... args)
ripple::LedgerInfo::hash
uint256 hash
Definition: ReadView.h:101
ripple::ledgerAcquireTimeout
constexpr auto ledgerAcquireTimeout
Definition: InboundLedger.cpp:75
std::vector
STL class.
std::unordered_map::find
T find(T... args)
ripple::InboundLedger::update
void update(std::uint32_t seq)
Definition: InboundLedger.cpp:180
ripple::InboundLedger::touch
void touch()
Definition: InboundLedger.h:111
std::back_inserter
T back_inserter(T... args)
std::chrono::duration
ripple::peerCountStart
@ peerCountStart
Definition: InboundLedger.cpp:47
ripple::InboundLedger::~InboundLedger
~InboundLedger()
Definition: InboundLedger.cpp:213
ripple::TimeoutCounter::queueJob
void queueJob(ScopedLockType &)
Queue a job to call invokeOnTimer().
Definition: TimeoutCounter.cpp:69
ripple::TimeoutCounter::progress_
bool progress_
Whether forward progress has been made.
Definition: TimeoutCounter.h:134
random
ripple::InboundLedger::mByHash
bool mByHash
Definition: InboundLedger.h:183
std::unordered_map::emplace
T emplace(T... args)
ripple::neededHashes
static std::vector< uint256 > neededHashes(uint256 const &root, SHAMap &map, int max, SHAMapSyncFilter *filter)
Definition: InboundLedger.cpp:234
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::SHAMapHash::isZero
bool isZero() const
Definition: SHAMapHash.h:53
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::InboundLedger::neededStateHashes
std::vector< uint256 > neededStateHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:265
ripple::detail::PeerDataCounts::prune
void prune()
Definition: InboundLedger.cpp:1252
std::tuple
ripple::AccountStateSF
Definition: AccountStateSF.h:31
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::InboundLedger::mPeerSet
std::unique_ptr< PeerSet > mPeerSet
Definition: InboundLedger.h:197
ripple::LedgerInfo::seq
LedgerIndex seq
Definition: ReadView.h:93
std::minstd_rand
ripple::TimeoutCounter
This class is an "active" object.
Definition: TimeoutCounter.h:66
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:296
ripple::Family::db
virtual NodeStore::Database & db()=0
ripple::LedgerInfo::txHash
uint256 txHash
Definition: ReadView.h:102
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapHash.h:32
algorithm
ripple::jtLEDGER_DATA
@ jtLEDGER_DATA
Definition: Job.h:67
ripple::TimeoutCounter::mtx_
std::recursive_mutex mtx_
Definition: TimeoutCounter.h:125
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::base_uint::size
constexpr static std::size_t size()
Definition: base_uint.h:518
std::unique_lock::unlock
T unlock(T... args)
ripple::InboundLedger::neededTxHashes
std::vector< uint256 > neededTxHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:259
std::vector::push_back
T push_back(T... args)
ripple::LedgerInfo::closeTime
NetClock::time_point closeTime
Definition: ReadView.h:124
ripple::base_uint< 256 >
std::sample
T sample(T... args)
ripple::reqNodesReply
@ reqNodesReply
Definition: InboundLedger.cpp:67
std::addressof
T addressof(T... args)
Json::Value::append
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:882
ripple::InboundLedger::mLedger
std::shared_ptr< Ledger > mLedger
Definition: InboundLedger.h:178
std::enable_shared_from_this< InboundLedger >::shared_from_this
T shared_from_this(T... args)
ripple::TimeoutCounter::app_
Application & app_
Definition: TimeoutCounter.h:123
ripple::SerialIter::get8
unsigned char get8()
Definition: Serializer.cpp:362
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:531
ripple::SHAMapAddNode::isUseful
bool isUseful() const
Definition: SHAMapAddNode.h:116
ripple::InboundLedger::getPeerCount
std::size_t getPeerCount() const
Definition: InboundLedger.cpp:171
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::SerialIter::get256
uint256 get256()
Definition: Serializer.h:376
ripple::SerialIter::get64
std::uint64_t get64()
Definition: Serializer.cpp:399
std::random_device
ripple::InboundLedger::addPeers
void addPeers()
Add more peers to the set, if possible.
Definition: InboundLedger.cpp:477
ripple::LedgerMaster::getFetchPack
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
Definition: LedgerMaster.cpp:2141
ripple::peerCountAdd
@ peerCountAdd
Definition: InboundLedger.cpp:51
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::TimeoutCounter::failed_
bool failed_
Definition: TimeoutCounter.h:132
ripple::InboundLedgers::gotStaleData
virtual void gotStaleData(std::shared_ptr< protocol::TMLedgerData > packet)=0
ripple::InboundLedger::InboundLedger
InboundLedger(Application &app, uint256 const &hash, std::uint32_t seq, Reason reason, clock_type &, std::unique_ptr< PeerSet > peerSet)
Definition: InboundLedger.cpp:77
ripple::Application::config
virtual Config & config()=0
ripple::InboundLedgers::onLedgerFetched
virtual void onLedgerFetched()=0
Called when a complete ledger is obtained.
ripple::SHAMapAddNode::isGood
bool isGood() const
Definition: SHAMapAddNode.h:132
std::unique_lock< std::recursive_mutex >
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
ripple::LedgerInfo::closeFlags
int closeFlags
Definition: ReadView.h:115
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::InboundLedger::tryDB
void tryDB(NodeStore::Database &srcDB)
Definition: InboundLedger.cpp:304
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
std::unordered_map::erase
T erase(T... args)
std::runtime_error
STL class.
ripple::SerialIter
Definition: Serializer.h:310
ripple::detail::PeerDataCounts
Definition: InboundLedger.cpp:1227
std::uint32_t
ripple::missingNodesFind
@ missingNodesFind
Definition: InboundLedger.cpp:63
ripple::TimeoutCounter::isDone
bool isDone() const
Definition: TimeoutCounter.h:116
ripple::InboundLedger::mHaveHeader
bool mHaveHeader
Definition: InboundLedger.h:179
beast::abstract_clock< std::chrono::steady_clock >
ripple::SHAMap::getMissingNodes
std::vector< std::pair< SHAMapNodeID, uint256 > > getMissingNodes(int maxNodes, SHAMapSyncFilter *filter)
Check for nodes in the SHAMap not available.
Definition: SHAMapSync.cpp:317
ripple::LedgerMaster::checkAccept
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
Definition: LedgerMaster.cpp:1040
ripple::LedgerInfo::drops
XRPAmount drops
Definition: ReadView.h:106
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::InboundLedger::TriggerReason::timeout
@ timeout
ripple::InboundLedger::pmDowncast
std::weak_ptr< TimeoutCounter > pmDowncast() override
Return a weak pointer to this.
Definition: InboundLedger.cpp:491
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::deserializeHeader
LedgerInfo deserializeHeader(Slice data, bool hasHash)
Deserialize a ledger header from a byte array.
Definition: InboundLedger.cpp:272
ripple::InboundLedger::mStats
SHAMapAddNode mStats
Definition: InboundLedger.h:189
ripple::TimeoutCounter::hash_
const uint256 hash_
The hash of the object (in practice, always a ledger) we are trying to fetch.
Definition: TimeoutCounter.h:129
ripple::detail::PeerDataCounts::update
void update(std::shared_ptr< Peer > &&peer, int dataCount)
Definition: InboundLedger.cpp:1236
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::base_uint::begin
iterator begin()
Definition: base_uint.h:132
std::unordered_map::begin
T begin(T... args)
ripple::InboundLedger::mSeq
std::uint32_t mSeq
Definition: InboundLedger.h:184
ripple::LedgerInfo::closeTimeResolution
NetClock::duration closeTimeResolution
Definition: ReadView.h:118
ripple::detail::PeerDataCounts::sampleN
void sampleN(std::size_t n, F &&f)
Definition: InboundLedger.cpp:1271
ripple::NodeStore::DatabaseShard::setStored
virtual void setStored(std::shared_ptr< Ledger const > const &ledger)=0
Notifies the database that the given ledger has been fully acquired and stored.
std::count_if
T count_if(T... args)
ripple::InboundLedger::done
void done()
Definition: InboundLedger.cpp:497
std::unordered_map::empty
T empty(T... args)
ripple::detail::PeerDataCounts::counts
std::unordered_map< std::shared_ptr< Peer >, int > counts
Definition: InboundLedger.cpp:1230
ripple::InboundLedger::trigger
void trigger(std::shared_ptr< Peer > const &, TriggerReason)
Request more nodes, perhaps from a specific peer.
Definition: InboundLedger.cpp:548
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::SHAMapAddNode::incInvalid
void incInvalid()
Definition: SHAMapAddNode.h:80
std::size_t
ripple::SHAMapAddNode::incUseful
void incUseful()
Definition: SHAMapAddNode.h:86
ripple::hotLEDGER
@ hotLEDGER
Definition: NodeObject.h:34
std::make_pair
T make_pair(T... args)
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::LedgerInfo
Information about the notional ledger backing the view.
Definition: ReadView.h:85
ripple::SHAMapAddNode::getGood
int getGood() const
Definition: SHAMapAddNode.h:104
ripple::LedgerMaster::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
Definition: LedgerMaster.cpp:535
std::unordered_map::end
T end(T... args)
ripple::InboundLedger::Reason
Reason
Definition: InboundLedger.h:43
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition: Database.cpp:252
ripple::SHAMapAddNode::incDuplicate
void incDuplicate()
Definition: SHAMapAddNode.h:92
ripple::TimeoutCounter::complete_
bool complete_
Definition: TimeoutCounter.h:131
std::max
T max(T... args)
ripple::SerialIter::get32
std::uint32_t get32()
Definition: Serializer.cpp:386
ripple::ledgerBecomeAggressiveThreshold
@ ledgerBecomeAggressiveThreshold
Definition: InboundLedger.cpp:59
ripple::SHAMapSyncFilter
Definition: SHAMapSyncFilter.h:30
ripple::TimeoutCounter::timeouts_
int timeouts_
Definition: TimeoutCounter.h:130
std::unique_ptr
STL class.
std::stable_partition
T stable_partition(T... args)
ripple::InboundLedger::Reason::SHARD
@ SHARD
std::unordered_map
STL class.
ripple::InboundLedger::onTimer
void onTimer(bool progress, ScopedLockType &peerSetLock) override
Called with a lock by the PeerSet when the timer expires.
Definition: InboundLedger.cpp:426
ripple::InboundLedger::checkLocal
bool checkLocal()
Definition: InboundLedger.cpp:193
ripple::TransactionStateSF
Definition: TransactionStateSF.h:31
ripple::ledgerTimeoutRetriesMax
@ ledgerTimeoutRetriesMax
Definition: InboundLedger.cpp:55
ripple::InboundLedger::mHaveTransactions
bool mHaveTransactions
Definition: InboundLedger.h:181
ripple::InboundLedger::mReceivedData
std::vector< std::pair< std::weak_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData > > > mReceivedData
Definition: InboundLedger.h:195
ripple::InboundLedger::init
void init(ScopedLockType &collectionLock)
Definition: InboundLedger.cpp:106
ripple::LedgerInfo::accountHash
uint256 accountHash
Definition: ReadView.h:103
ripple::TimeoutCounter::journal_
beast::Journal journal_
Definition: TimeoutCounter.h:124
std::exception::what
T what(T... args)
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::LedgerInfo::parentCloseTime
NetClock::time_point parentCloseTime
Definition: ReadView.h:94