rippled
InboundLedger.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/AccountStateSF.h>
21 #include <ripple/app/ledger/InboundLedger.h>
22 #include <ripple/app/ledger/InboundLedgers.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/ledger/TransactionStateSF.h>
25 #include <ripple/app/main/Application.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/basics/Log.h>
28 #include <ripple/core/JobQueue.h>
29 #include <ripple/nodestore/DatabaseShard.h>
30 #include <ripple/overlay/Overlay.h>
31 #include <ripple/protocol/HashPrefix.h>
32 #include <ripple/protocol/jss.h>
33 #include <ripple/resource/Fees.h>
34 #include <ripple/shamap/SHAMapNodeID.h>
35 
36 #include <algorithm>
37 
38 namespace ripple {
39 
40 using namespace std::chrono_literals;
41 
42 enum {
43  // Number of peers to start with
45 
46  // Number of peers to add on a timeout
47  ,
49 
50  // how many timeouts before we give up
51  ,
53 
54  // how many timeouts before we get aggressive
55  ,
57 
58  // Number of nodes to find initially
59  ,
61 
62  // Number of nodes to request for a reply
63  ,
65 
66  // Number of nodes to request blindly
67  ,
69 };
70 
71 // millisecond for each ledger timeout
72 auto constexpr ledgerAcquireTimeout = 2500ms;
73 
75  Application& app,
76  uint256 const& hash,
77  std::uint32_t seq,
78  Reason reason,
79  clock_type& clock)
80  : PeerSet(app, hash, ledgerAcquireTimeout, app.journal("InboundLedger"))
81  , m_clock(clock)
82  , mHaveHeader(false)
83  , mHaveState(false)
84  , mHaveTransactions(false)
85  , mSignaled(false)
86  , mByHash(true)
87  , mSeq(seq)
88  , mReason(reason)
89  , mReceiveDispatched(false)
90 {
91  JLOG(m_journal.trace()) << "Acquiring ledger " << mHash;
92  touch();
93 }
94 
95 void
97 {
99  collectionLock.unlock();
100 
101  tryDB(app_.getNodeFamily().db());
102  if (mFailed)
103  return;
104 
105  if (!mComplete)
106  {
107  auto shardStore = app_.getShardStore();
108  if (mReason == Reason::SHARD)
109  {
110  if (!shardStore)
111  {
112  JLOG(m_journal.error())
113  << "Acquiring shard with no shard store available";
114  mFailed = true;
115  return;
116  }
117 
118  mHaveHeader = false;
119  mHaveTransactions = false;
120  mHaveState = false;
121  mLedger.reset();
122 
123  tryDB(app_.getShardFamily()->db());
124  if (mFailed)
125  return;
126  }
127  else if (shardStore && mSeq >= shardStore->earliestLedgerSeq())
128  {
129  if (auto l = shardStore->fetchLedger(mHash, mSeq))
130  {
131  mHaveHeader = true;
132  mHaveTransactions = true;
133  mHaveState = true;
134  mComplete = true;
135  mLedger = std::move(l);
136  }
137  }
138  }
139  if (!mComplete)
140  {
141  addPeers();
142  queueJob();
143  return;
144  }
145 
146  JLOG(m_journal.debug()) << "Acquiring ledger we already have in "
147  << " local store. " << mHash;
148  mLedger->setImmutable(app_.config());
149 
151  return;
152 
154 
155  // Check if this could be a newer fully-validated ledger
156  if (mReason == Reason::CONSENSUS)
158 }
159 
162 {
163  return std::count_if(mPeers.begin(), mPeers.end(), [this](auto id) {
164  return app_.overlay().findPeerByShortID(id) != nullptr;
165  });
166 }
167 
168 void
170 {
172  {
173  JLOG(m_journal.debug()) << "Deferring InboundLedger timer due to load";
174  setTimer();
175  return;
176  }
177 
179  jtLEDGER_DATA, "InboundLedger", [ptr = shared_from_this()](Job&) {
180  ptr->invokeOnTimer();
181  });
182 }
183 
184 void
186 {
187  ScopedLockType sl(mLock);
188 
189  // If we didn't know the sequence number, but now do, save it
190  if ((seq != 0) && (mSeq == 0))
191  mSeq = seq;
192 
193  // Prevent this from being swept
194  touch();
195 }
196 
197 bool
199 {
200  ScopedLockType sl(mLock);
201  if (!isDone())
202  {
203  if (mLedger)
204  tryDB(mLedger->stateMap().family().db());
205  else if (mReason == Reason::SHARD)
206  tryDB(app_.getShardFamily()->db());
207  else
208  tryDB(app_.getNodeFamily().db());
209  if (mFailed || mComplete)
210  {
211  done();
212  return true;
213  }
214  }
215  return false;
216 }
217 
219 {
220  // Save any received AS data not processed. It could be useful
221  // for populating a different ledger
222  for (auto& entry : mReceivedData)
223  {
224  if (entry.second->type() == protocol::liAS_NODE)
225  app_.getInboundLedgers().gotStaleData(entry.second);
226  }
227  if (!isDone())
228  {
229  JLOG(m_journal.debug())
230  << "Acquire " << mHash << " abort "
231  << ((mTimeouts == 0) ? std::string()
232  : (std::string("timeouts:") +
233  std::to_string(mTimeouts) + " "))
234  << mStats.get();
235  }
236 }
237 
240 {
242 
243  if (mLedger->info().txHash.isNonZero())
244  {
245  if (mLedger->txMap().getHash().isZero())
246  ret.push_back(mLedger->info().txHash);
247  else
248  ret = mLedger->txMap().getNeededHashes(max, filter);
249  }
250 
251  return ret;
252 }
253 
256 {
258 
259  if (mLedger->info().accountHash.isNonZero())
260  {
261  if (mLedger->stateMap().getHash().isZero())
262  ret.push_back(mLedger->info().accountHash);
263  else
264  ret = mLedger->stateMap().getNeededHashes(max, filter);
265  }
266 
267  return ret;
268 }
269 
272 {
273  SerialIter sit(data.data(), data.size());
274 
275  LedgerInfo info;
276 
277  info.seq = sit.get32();
278  info.drops = sit.get64();
279  info.parentHash = sit.get256();
280  info.txHash = sit.get256();
281  info.accountHash = sit.get256();
282  info.parentCloseTime =
286  info.closeFlags = sit.get8();
287 
288  return info;
289 }
290 
291 LedgerInfo
293 {
294  return deserializeHeader(data + 4);
295 }
296 
297 // See how much of the ledger data is stored locally
298 // Data found in a fetch pack will be stored
299 void
301 {
302  if (!mHaveHeader)
303  {
304  auto makeLedger = [&, this](Blob const& data) {
305  JLOG(m_journal.trace()) << "Ledger header found in fetch pack";
306  mLedger = std::make_shared<Ledger>(
308  app_.config(),
310  : app_.getNodeFamily());
311  if (mLedger->info().hash != mHash ||
312  (mSeq != 0 && mSeq != mLedger->info().seq))
313  {
314  // We know for a fact the ledger can never be acquired
315  JLOG(m_journal.warn())
316  << "hash " << mHash << " seq " << std::to_string(mSeq)
317  << " cannot be a ledger";
318  mLedger.reset();
319  mFailed = true;
320  }
321  };
322 
323  // Try to fetch the ledger header from the DB
324  if (auto node = srcDB.fetch(mHash, mSeq))
325  {
326  JLOG(m_journal.trace()) << "Ledger header found in local store";
327 
328  makeLedger(node->getData());
329  if (mFailed)
330  return;
331 
332  // Store the ledger header if the source and destination differ
333  auto& dstDB{mLedger->stateMap().family().db()};
334  if (std::addressof(dstDB) != std::addressof(srcDB))
335  {
336  Blob blob{node->getData()};
337  dstDB.store(
338  hotLEDGER, std::move(blob), mHash, mLedger->info().seq);
339  }
340  }
341  else
342  {
343  // Try to fetch the ledger header from a fetch pack
344  auto data = app_.getLedgerMaster().getFetchPack(mHash);
345  if (!data)
346  return;
347 
348  JLOG(m_journal.trace()) << "Ledger header found in fetch pack";
349 
350  makeLedger(*data);
351  if (mFailed)
352  return;
353 
354  // Store the ledger header in the ledger's database
355  mLedger->stateMap().family().db().store(
356  hotLEDGER, std::move(*data), mHash, mLedger->info().seq);
357  }
358 
359  if (mSeq == 0)
360  mSeq = mLedger->info().seq;
361  mLedger->stateMap().setLedgerSeq(mSeq);
362  mLedger->txMap().setLedgerSeq(mSeq);
363  mHaveHeader = true;
364  }
365 
366  if (!mHaveTransactions)
367  {
368  if (mLedger->info().txHash.isZero())
369  {
370  JLOG(m_journal.trace()) << "No TXNs to fetch";
371  mHaveTransactions = true;
372  }
373  else
374  {
375  TransactionStateSF filter(
376  mLedger->txMap().family().db(), app_.getLedgerMaster());
377  if (mLedger->txMap().fetchRoot(
378  SHAMapHash{mLedger->info().txHash}, &filter))
379  {
380  if (neededTxHashes(1, &filter).empty())
381  {
382  JLOG(m_journal.trace()) << "Had full txn map locally";
383  mHaveTransactions = true;
384  }
385  }
386  }
387  }
388 
389  if (!mHaveState)
390  {
391  if (mLedger->info().accountHash.isZero())
392  {
393  JLOG(m_journal.fatal())
394  << "We are acquiring a ledger with a zero account hash";
395  mFailed = true;
396  return;
397  }
398  AccountStateSF filter(
399  mLedger->stateMap().family().db(), app_.getLedgerMaster());
400  if (mLedger->stateMap().fetchRoot(
401  SHAMapHash{mLedger->info().accountHash}, &filter))
402  {
403  if (neededStateHashes(1, &filter).empty())
404  {
405  JLOG(m_journal.trace()) << "Had full AS map locally";
406  mHaveState = true;
407  }
408  }
409  }
410 
412  {
413  JLOG(m_journal.debug()) << "Had everything locally";
414  mComplete = true;
415  mLedger->setImmutable(app_.config());
416  }
417 }
418 
421 void
423 {
424  mRecentNodes.clear();
425 
426  if (isDone())
427  {
428  JLOG(m_journal.info()) << "Already done " << mHash;
429  return;
430  }
431 
433  {
434  if (mSeq != 0)
435  {
436  JLOG(m_journal.warn())
437  << mTimeouts << " timeouts for ledger " << mSeq;
438  }
439  else
440  {
441  JLOG(m_journal.warn())
442  << mTimeouts << " timeouts for ledger " << mHash;
443  }
444  mFailed = true;
445  done();
446  return;
447  }
448 
449  if (!wasProgress)
450  {
451  checkLocal();
452 
453  mByHash = true;
454 
455  std::size_t pc = getPeerCount();
456  JLOG(m_journal.debug())
457  << "No progress(" << pc << ") for ledger " << mHash;
458 
459  // addPeers triggers if the reason is not HISTORY
460  // So if the reason IS HISTORY, need to trigger after we add
461  // otherwise, we need to trigger before we add
462  // so each peer gets triggered once
463  if (mReason != Reason::HISTORY)
465  addPeers();
466  if (mReason == Reason::HISTORY)
468  }
469 }
470 
472 void
474 {
477  [this](auto peer) { return peer->hasLedger(mHash, mSeq); });
478 }
479 
482 {
483  return shared_from_this();
484 }
485 
486 void
488 {
489  if (mSignaled)
490  return;
491 
492  mSignaled = true;
493  touch();
494 
495  JLOG(m_journal.debug())
496  << "Acquire " << mHash << (mFailed ? " fail " : " ")
497  << ((mTimeouts == 0)
498  ? std::string()
499  : (std::string("timeouts:") + std::to_string(mTimeouts) + " "))
500  << mStats.get();
501 
502  assert(mComplete || mFailed);
503 
504  if (mComplete && !mFailed && mLedger)
505  {
506  mLedger->setImmutable(app_.config());
507  switch (mReason)
508  {
509  case Reason::SHARD:
511  [[fallthrough]];
512  case Reason::HISTORY:
514  break;
515  default:
517  break;
518  }
519  }
520 
521  // We hold the PeerSet lock, so must dispatch
523  jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()](Job&) {
524  if (self->mComplete && !self->mFailed)
525  {
526  self->app_.getLedgerMaster().checkAccept(self->getLedger());
527  self->app_.getLedgerMaster().tryAdvance();
528  }
529  else
530  self->app_.getInboundLedgers().logFailure(
531  self->mHash, self->mSeq);
532  });
533 }
534 
537 void
539 {
540  ScopedLockType sl(mLock);
541 
542  if (isDone())
543  {
544  JLOG(m_journal.debug())
545  << "Trigger on ledger: " << mHash << (mComplete ? " completed" : "")
546  << (mFailed ? " failed" : "");
547  return;
548  }
549 
550  if (auto stream = m_journal.trace())
551  {
552  if (peer)
553  stream << "Trigger acquiring ledger " << mHash << " from " << peer;
554  else
555  stream << "Trigger acquiring ledger " << mHash;
556 
557  if (mComplete || mFailed)
558  stream << "complete=" << mComplete << " failed=" << mFailed;
559  else
560  stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions
561  << " as=" << mHaveState;
562  }
563 
564  if (!mHaveHeader)
565  {
566  tryDB(
568  : app_.getNodeFamily().db());
569  if (mFailed)
570  {
571  JLOG(m_journal.warn()) << " failed local for " << mHash;
572  return;
573  }
574  }
575 
576  protocol::TMGetLedger tmGL;
577  tmGL.set_ledgerhash(mHash.begin(), mHash.size());
578 
579  if (mTimeouts != 0)
580  {
581  // Be more aggressive if we've timed out at least once
582  tmGL.set_querytype(protocol::qtINDIRECT);
583 
584  if (!mProgress && !mFailed && mByHash &&
586  {
587  auto need = getNeededHashes();
588 
589  if (!need.empty())
590  {
591  protocol::TMGetObjectByHash tmBH;
592  bool typeSet = false;
593  tmBH.set_query(true);
594  tmBH.set_ledgerhash(mHash.begin(), mHash.size());
595  for (auto const& p : need)
596  {
597  JLOG(m_journal.warn()) << "Want: " << p.second;
598 
599  if (!typeSet)
600  {
601  tmBH.set_type(p.first);
602  typeSet = true;
603  }
604 
605  if (p.first == tmBH.type())
606  {
607  protocol::TMIndexedObject* io = tmBH.add_objects();
608  io->set_hash(p.second.begin(), p.second.size());
609  if (mSeq != 0)
610  io->set_ledgerseq(mSeq);
611  }
612  }
613 
614  auto packet =
615  std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
616 
617  for (auto id : mPeers)
618  {
619  if (auto p = app_.overlay().findPeerByShortID(id))
620  {
621  mByHash = false;
622  p->send(packet);
623  }
624  }
625  }
626  else
627  {
628  JLOG(m_journal.info())
629  << "getNeededHashes says acquire is complete";
630  mHaveHeader = true;
631  mHaveTransactions = true;
632  mHaveState = true;
633  mComplete = true;
634  }
635  }
636  }
637 
638  // We can't do much without the header data because we don't know the
639  // state or transaction root hashes.
640  if (!mHaveHeader && !mFailed)
641  {
642  tmGL.set_itype(protocol::liBASE);
643  if (mSeq != 0)
644  tmGL.set_ledgerseq(mSeq);
645  JLOG(m_journal.trace()) << "Sending header request to "
646  << (peer ? "selected peer" : "all peers");
647  sendRequest(tmGL, peer);
648  return;
649  }
650 
651  if (mLedger)
652  tmGL.set_ledgerseq(mLedger->info().seq);
653 
654  if (reason != TriggerReason::reply)
655  {
656  // If we're querying blind, don't query deep
657  tmGL.set_querydepth(0);
658  }
659  else if (peer && peer->isHighLatency())
660  {
661  // If the peer has high latency, query extra deep
662  tmGL.set_querydepth(2);
663  }
664  else
665  tmGL.set_querydepth(1);
666 
667  // Get the state data first because it's the most likely to be useful
668  // if we wind up abandoning this fetch.
669  if (mHaveHeader && !mHaveState && !mFailed)
670  {
671  assert(mLedger);
672 
673  if (!mLedger->stateMap().isValid())
674  {
675  mFailed = true;
676  }
677  else if (mLedger->stateMap().getHash().isZero())
678  {
679  // we need the root node
680  tmGL.set_itype(protocol::liAS_NODE);
681  *tmGL.add_nodeids() = SHAMapNodeID().getRawString();
682  JLOG(m_journal.trace()) << "Sending AS root request to "
683  << (peer ? "selected peer" : "all peers");
684  sendRequest(tmGL, peer);
685  return;
686  }
687  else
688  {
689  AccountStateSF filter(
690  mLedger->stateMap().family().db(), app_.getLedgerMaster());
691 
692  // Release the lock while we process the large state map
693  sl.unlock();
694  auto nodes =
695  mLedger->stateMap().getMissingNodes(missingNodesFind, &filter);
696  sl.lock();
697 
698  // Make sure nothing happened while we released the lock
699  if (!mFailed && !mComplete && !mHaveState)
700  {
701  if (nodes.empty())
702  {
703  if (!mLedger->stateMap().isValid())
704  mFailed = true;
705  else
706  {
707  mHaveState = true;
708 
709  if (mHaveTransactions)
710  mComplete = true;
711  }
712  }
713  else
714  {
715  filterNodes(nodes, reason);
716 
717  if (!nodes.empty())
718  {
719  tmGL.set_itype(protocol::liAS_NODE);
720  for (auto const& id : nodes)
721  {
722  *(tmGL.add_nodeids()) = id.first.getRawString();
723  }
724 
725  JLOG(m_journal.trace())
726  << "Sending AS node request (" << nodes.size()
727  << ") to "
728  << (peer ? "selected peer" : "all peers");
729  sendRequest(tmGL, peer);
730  return;
731  }
732  else
733  {
734  JLOG(m_journal.trace()) << "All AS nodes filtered";
735  }
736  }
737  }
738  }
739  }
740 
742  {
743  assert(mLedger);
744 
745  if (!mLedger->txMap().isValid())
746  {
747  mFailed = true;
748  }
749  else if (mLedger->txMap().getHash().isZero())
750  {
751  // we need the root node
752  tmGL.set_itype(protocol::liTX_NODE);
753  *(tmGL.add_nodeids()) = SHAMapNodeID().getRawString();
754  JLOG(m_journal.trace()) << "Sending TX root request to "
755  << (peer ? "selected peer" : "all peers");
756  sendRequest(tmGL, peer);
757  return;
758  }
759  else
760  {
761  TransactionStateSF filter(
762  mLedger->txMap().family().db(), app_.getLedgerMaster());
763 
764  auto nodes =
765  mLedger->txMap().getMissingNodes(missingNodesFind, &filter);
766 
767  if (nodes.empty())
768  {
769  if (!mLedger->txMap().isValid())
770  mFailed = true;
771  else
772  {
773  mHaveTransactions = true;
774 
775  if (mHaveState)
776  mComplete = true;
777  }
778  }
779  else
780  {
781  filterNodes(nodes, reason);
782 
783  if (!nodes.empty())
784  {
785  tmGL.set_itype(protocol::liTX_NODE);
786  for (auto const& n : nodes)
787  {
788  *(tmGL.add_nodeids()) = n.first.getRawString();
789  }
790  JLOG(m_journal.trace())
791  << "Sending TX node request (" << nodes.size()
792  << ") to " << (peer ? "selected peer" : "all peers");
793  sendRequest(tmGL, peer);
794  return;
795  }
796  else
797  {
798  JLOG(m_journal.trace()) << "All TX nodes filtered";
799  }
800  }
801  }
802  }
803 
804  if (mComplete || mFailed)
805  {
806  JLOG(m_journal.debug())
807  << "Done:" << (mComplete ? " complete" : "")
808  << (mFailed ? " failed " : " ") << mLedger->info().seq;
809  sl.unlock();
810  done();
811  }
812 }
813 
814 void
817  TriggerReason reason)
818 {
819  // Sort nodes so that the ones we haven't recently
820  // requested come before the ones we have.
821  auto dup = std::stable_partition(
822  nodes.begin(), nodes.end(), [this](auto const& item) {
823  return mRecentNodes.count(item.second) == 0;
824  });
825 
826  // If everything is a duplicate we don't want to send
827  // any query at all except on a timeout where we need
828  // to query everyone:
829  if (dup == nodes.begin())
830  {
831  JLOG(m_journal.trace()) << "filterNodes: all duplicates";
832 
833  if (reason != TriggerReason::timeout)
834  {
835  nodes.clear();
836  return;
837  }
838  }
839  else
840  {
841  JLOG(m_journal.trace()) << "filterNodes: pruning duplicates";
842 
843  nodes.erase(dup, nodes.end());
844  }
845 
846  std::size_t const limit =
848 
849  if (nodes.size() > limit)
850  nodes.resize(limit);
851 
852  for (auto const& n : nodes)
853  mRecentNodes.insert(n.second);
854 }
855 
859 // data must not have hash prefix
860 bool
862 {
863  // Return value: true=normal, false=bad data
864  JLOG(m_journal.trace()) << "got header acquiring ledger " << mHash;
865 
866  if (mComplete || mFailed || mHaveHeader)
867  return true;
868 
869  auto* f = mReason == Reason::SHARD ? app_.getShardFamily()
870  : &app_.getNodeFamily();
871  mLedger = std::make_shared<Ledger>(
872  deserializeHeader(makeSlice(data)), app_.config(), *f);
873  if (mLedger->info().hash != mHash ||
874  (mSeq != 0 && mSeq != mLedger->info().seq))
875  {
876  JLOG(m_journal.warn())
877  << "Acquire hash mismatch: " << mLedger->info().hash
878  << "!=" << mHash;
879  mLedger.reset();
880  return false;
881  }
882  if (mSeq == 0)
883  mSeq = mLedger->info().seq;
884  mLedger->stateMap().setLedgerSeq(mSeq);
885  mLedger->txMap().setLedgerSeq(mSeq);
886  mHaveHeader = true;
887 
888  Serializer s(data.size() + 4);
890  s.addRaw(data.data(), data.size());
891  f->db().store(hotLEDGER, std::move(s.modData()), mHash, mSeq);
892 
893  if (mLedger->info().txHash.isZero())
894  mHaveTransactions = true;
895 
896  if (mLedger->info().accountHash.isZero())
897  mHaveState = true;
898 
899  mLedger->txMap().setSynching();
900  mLedger->stateMap().setSynching();
901 
902  return true;
903 }
904 
908 void
909 InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
910 {
911  if (!mHaveHeader)
912  {
913  JLOG(m_journal.warn()) << "Missing ledger header";
914  san.incInvalid();
915  return;
916  }
917  if (packet.type() == protocol::liTX_NODE)
918  {
919  if (mHaveTransactions || mFailed)
920  {
921  san.incDuplicate();
922  return;
923  }
924  }
925  else if (mHaveState || mFailed)
926  {
927  san.incDuplicate();
928  return;
929  }
930 
931  auto [map, rootHash, filter] = [&]()
933  if (packet.type() == protocol::liTX_NODE)
934  return {
935  mLedger->txMap(),
936  SHAMapHash{mLedger->info().txHash},
937  std::make_unique<TransactionStateSF>(
938  mLedger->txMap().family().db(), app_.getLedgerMaster())};
939  return {
940  mLedger->stateMap(),
941  SHAMapHash{mLedger->info().accountHash},
942  std::make_unique<AccountStateSF>(
943  mLedger->stateMap().family().db(), app_.getLedgerMaster())};
944  }();
945 
946  try
947  {
948  for (auto const& node : packet.nodes())
949  {
950  SHAMapNodeID const nodeID(
951  node.nodeid().data(), node.nodeid().size());
952  if (nodeID.isRoot())
953  san += map.addRootNode(
954  rootHash, makeSlice(node.nodedata()), filter.get());
955  else
956  san += map.addKnownNode(
957  nodeID, makeSlice(node.nodedata()), filter.get());
958 
959  if (!san.isGood())
960  {
961  JLOG(m_journal.warn()) << "Received bad node data";
962  return;
963  }
964  }
965  }
966  catch (std::exception const& e)
967  {
968  JLOG(m_journal.error()) << "Received bad node data: " << e.what();
969  san.incInvalid();
970  return;
971  }
972 
973  if (!map.isSynching())
974  {
975  if (packet.type() == protocol::liTX_NODE)
976  mHaveTransactions = true;
977  else
978  mHaveState = true;
979 
981  {
982  mComplete = true;
983  done();
984  }
985  }
986 }
987 
991 bool
993 {
994  if (mFailed || mHaveState)
995  {
996  san.incDuplicate();
997  return true;
998  }
999 
1000  if (!mHaveHeader)
1001  {
1002  assert(false);
1003  return false;
1004  }
1005 
1006  AccountStateSF filter(
1007  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1008  san += mLedger->stateMap().addRootNode(
1009  SHAMapHash{mLedger->info().accountHash}, data, &filter);
1010  return san.isGood();
1011 }
1012 
1016 bool
1018 {
1019  if (mFailed || mHaveTransactions)
1020  {
1021  san.incDuplicate();
1022  return true;
1023  }
1024 
1025  if (!mHaveHeader)
1026  {
1027  assert(false);
1028  return false;
1029  }
1030 
1031  TransactionStateSF filter(
1032  mLedger->txMap().family().db(), app_.getLedgerMaster());
1033  san += mLedger->txMap().addRootNode(
1034  SHAMapHash{mLedger->info().txHash}, data, &filter);
1035  return san.isGood();
1036 }
1037 
1040 {
1042 
1043  if (!mHaveHeader)
1044  {
1045  ret.push_back(
1046  std::make_pair(protocol::TMGetObjectByHash::otLEDGER, mHash));
1047  return ret;
1048  }
1049 
1050  if (!mHaveState)
1051  {
1052  AccountStateSF filter(
1053  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1054  for (auto const& h : neededStateHashes(4, &filter))
1055  {
1056  ret.push_back(
1057  std::make_pair(protocol::TMGetObjectByHash::otSTATE_NODE, h));
1058  }
1059  }
1060 
1061  if (!mHaveTransactions)
1062  {
1063  TransactionStateSF filter(
1064  mLedger->txMap().family().db(), app_.getLedgerMaster());
1065  for (auto const& h : neededTxHashes(4, &filter))
1066  {
1068  protocol::TMGetObjectByHash::otTRANSACTION_NODE, h));
1069  }
1070  }
1071 
1072  return ret;
1073 }
1074 
1078 bool
1080  std::weak_ptr<Peer> peer,
1082 {
1084 
1085  if (isDone())
1086  return false;
1087 
1088  mReceivedData.emplace_back(peer, data);
1089 
1090  if (mReceiveDispatched)
1091  return false;
1092 
1093  mReceiveDispatched = true;
1094  return true;
1095 }
1096 
1100 // VFALCO NOTE, it is not necessary to pass the entire Peer,
1101 // we can get away with just a Resource::Consumer endpoint.
1102 //
1103 // TODO Change peer to Consumer
1104 //
1105 int
1107  std::shared_ptr<Peer> peer,
1108  protocol::TMLedgerData& packet)
1109 {
1110  ScopedLockType sl(mLock);
1111 
1112  if (packet.type() == protocol::liBASE)
1113  {
1114  if (packet.nodes_size() < 1)
1115  {
1116  JLOG(m_journal.warn()) << "Got empty header data";
1117  peer->charge(Resource::feeInvalidRequest);
1118  return -1;
1119  }
1120 
1121  SHAMapAddNode san;
1122 
1123  try
1124  {
1125  if (!mHaveHeader)
1126  {
1127  if (!takeHeader(packet.nodes(0).nodedata()))
1128  {
1129  JLOG(m_journal.warn()) << "Got invalid header data";
1130  peer->charge(Resource::feeInvalidRequest);
1131  return -1;
1132  }
1133 
1134  san.incUseful();
1135  }
1136 
1137  if (!mHaveState && (packet.nodes().size() > 1) &&
1138  !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
1139  {
1140  JLOG(m_journal.warn()) << "Included AS root invalid";
1141  }
1142 
1143  if (!mHaveTransactions && (packet.nodes().size() > 2) &&
1144  !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
1145  {
1146  JLOG(m_journal.warn()) << "Included TX root invalid";
1147  }
1148  }
1149  catch (std::exception const& ex)
1150  {
1151  JLOG(m_journal.warn())
1152  << "Included AS/TX root invalid: " << ex.what();
1153  peer->charge(Resource::feeBadData);
1154  return -1;
1155  }
1156 
1157  if (san.isUseful())
1158  mProgress = true;
1159 
1160  mStats += san;
1161  return san.getGood();
1162  }
1163 
1164  if ((packet.type() == protocol::liTX_NODE) ||
1165  (packet.type() == protocol::liAS_NODE))
1166  {
1167  if (packet.nodes().size() == 0)
1168  {
1169  JLOG(m_journal.info()) << "Got response with no nodes";
1170  peer->charge(Resource::feeInvalidRequest);
1171  return -1;
1172  }
1173 
1174  // Verify node IDs and data are complete
1175  for (auto const& node : packet.nodes())
1176  {
1177  if (!node.has_nodeid() || !node.has_nodedata())
1178  {
1179  JLOG(m_journal.warn()) << "Got bad node";
1180  peer->charge(Resource::feeInvalidRequest);
1181  return -1;
1182  }
1183  }
1184 
1185  SHAMapAddNode san;
1186  receiveNode(packet, san);
1187 
1188  if (packet.type() == protocol::liTX_NODE)
1189  {
1190  JLOG(m_journal.debug()) << "Ledger TX node stats: " << san.get();
1191  }
1192  else
1193  {
1194  JLOG(m_journal.debug()) << "Ledger AS node stats: " << san.get();
1195  }
1196 
1197  if (san.isUseful())
1198  mProgress = true;
1199 
1200  mStats += san;
1201  return san.getGood();
1202  }
1203 
1204  return -1;
1205 }
1206 
1210 void
1212 {
1213  std::shared_ptr<Peer> chosenPeer;
1214  int chosenPeerCount = -1;
1215 
1217 
1218  for (;;)
1219  {
1220  data.clear();
1221  {
1223 
1224  if (mReceivedData.empty())
1225  {
1226  mReceiveDispatched = false;
1227  break;
1228  }
1229 
1230  data.swap(mReceivedData);
1231  }
1232 
1233  // Select the peer that gives us the most nodes that are useful,
1234  // breaking ties in favor of the peer that responded first.
1235  for (auto& entry : data)
1236  {
1237  if (auto peer = entry.first.lock())
1238  {
1239  int count = processData(peer, *(entry.second));
1240  if (count > chosenPeerCount)
1241  {
1242  chosenPeerCount = count;
1243  chosenPeer = std::move(peer);
1244  }
1245  }
1246  }
1247  }
1248 
1249  if (chosenPeer)
1250  trigger(chosenPeer, TriggerReason::reply);
1251 }
1252 
1255 {
1257 
1258  ScopedLockType sl(mLock);
1259 
1260  ret[jss::hash] = to_string(mHash);
1261 
1262  if (mComplete)
1263  ret[jss::complete] = true;
1264 
1265  if (mFailed)
1266  ret[jss::failed] = true;
1267 
1268  if (!mComplete && !mFailed)
1269  ret[jss::peers] = static_cast<int>(mPeers.size());
1270 
1271  ret[jss::have_header] = mHaveHeader;
1272 
1273  if (mHaveHeader)
1274  {
1275  ret[jss::have_state] = mHaveState;
1276  ret[jss::have_transactions] = mHaveTransactions;
1277  }
1278 
1279  ret[jss::timeouts] = mTimeouts;
1280 
1281  if (mHaveHeader && !mHaveState)
1282  {
1284  for (auto const& h : neededStateHashes(16, nullptr))
1285  {
1286  hv.append(to_string(h));
1287  }
1288  ret[jss::needed_state_hashes] = hv;
1289  }
1290 
1292  {
1294  for (auto const& h : neededTxHashes(16, nullptr))
1295  {
1296  hv.append(to_string(h));
1297  }
1298  ret[jss::needed_transaction_hashes] = hv;
1299  }
1300 
1301  return ret;
1302 }
1303 
1304 } // namespace ripple
ripple::InboundLedger::mRecentNodes
std::set< uint256 > mRecentNodes
Definition: InboundLedger.h:206
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:97
ripple::SHAMapAddNode
Definition: SHAMapAddNode.h:28
ripple::Application::getNodeFamily
virtual Family & getNodeFamily()=0
ripple::SHAMapAddNode::get
std::string get() const
Definition: SHAMapAddNode.h:156
ripple::InboundLedger::Reason::HISTORY
@ HISTORY
ripple::InboundLedger::getNeededHashes
std::vector< neededHash_t > getNeededHashes()
Definition: InboundLedger.cpp:1039
ripple::HashPrefix::ledgerMaster
@ ledgerMaster
ledger master data for signing
ripple::InboundLedger::mReason
const Reason mReason
Definition: InboundLedger.h:204
std::unique_lock::lock
T lock(T... args)
ripple::InboundLedger::getJson
Json::Value getJson(int)
Return a Json::objectValue.
Definition: InboundLedger.cpp:1254
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::InboundLedger::TriggerReason
TriggerReason
Definition: InboundLedger.h:130
ripple::reqNodes
@ reqNodes
Definition: InboundLedger.cpp:68
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:53
std::string
STL class.
ripple::InboundLedger::Reason::CONSENSUS
@ CONSENSUS
std::shared_ptr
STL class.
ripple::InboundLedger::mHaveState
bool mHaveState
Definition: InboundLedger.h:199
ripple::LedgerInfo::parentHash
uint256 parentHash
Definition: ReadView.h:99
ripple::PeerSet::mProgress
bool mProgress
Whether forward progress has been made.
Definition: PeerSet.h:117
std::exception
STL class.
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::PeerSet::isDone
bool isDone() const
Definition: PeerSet.h:84
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:44
Json::arrayValue
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
ripple::InboundLedger::mSignaled
bool mSignaled
Definition: InboundLedger.h:201
std::pair
ripple::ledgerAcquireTimeout
constexpr auto ledgerAcquireTimeout
Definition: InboundLedger.cpp:72
std::vector
STL class.
ripple::InboundLedger::update
void update(std::uint32_t seq)
Definition: InboundLedger.cpp:185
ripple::InboundLedger::touch
void touch()
Definition: InboundLedger.h:118
ripple::InboundLedger::InboundLedger
InboundLedger(Application &app, uint256 const &hash, std::uint32_t seq, Reason reason, clock_type &)
Definition: InboundLedger.cpp:74
ripple::PeerSet::mPeers
std::set< Peer::id_t > mPeers
The identifiers of the peers we are tracking.
Definition: PeerSet.h:120
std::chrono::duration
ripple::peerCountStart
@ peerCountStart
Definition: InboundLedger.cpp:44
ripple::InboundLedger::~InboundLedger
~InboundLedger()
Definition: InboundLedger.cpp:218
ripple::InboundLedger::mByHash
bool mByHash
Definition: InboundLedger.h:202
ripple::InboundLedger::filterNodes
void filterNodes(std::vector< std::pair< SHAMapNodeID, uint256 >> &nodes, TriggerReason reason)
Definition: InboundLedger.cpp:815
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
ripple::InboundLedger::processData
int processData(std::shared_ptr< Peer > peer, protocol::TMLedgerData &data)
Process one TMLedgerData Returns the number of useful nodes.
Definition: InboundLedger.cpp:1106
std::lock_guard
STL class.
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerSet::mComplete
bool mComplete
Definition: PeerSet.h:114
ripple::InboundLedger::queueJob
void queueJob() override
Queue a job to call invokeOnTimer().
Definition: InboundLedger.cpp:169
ripple::InboundLedger::neededStateHashes
std::vector< uint256 > neededStateHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:255
std::tuple
ripple::AccountStateSF
Definition: AccountStateSF.h:31
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::LedgerInfo::seq
LedgerIndex seq
Definition: ReadView.h:88
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:42
ripple::SHAMapNodeID
Definition: SHAMapNodeID.h:33
ripple::PeerSet::addPeers
void addPeers(std::size_t limit, std::function< bool(std::shared_ptr< Peer > const &)> score)
Add at most limit peers to this set from the overlay.
Definition: PeerSet.cpp:50
ripple::PeerSet::mHash
const uint256 mHash
The hash of the object (in practice, always a ledger) we are trying to fetch.
Definition: PeerSet.h:112
ripple::deserializeHeader
LedgerInfo deserializeHeader(Slice data)
Deserialize a ledger header from a byte array.
Definition: InboundLedger.cpp:271
ripple::Family::db
virtual NodeStore::Database & db()=0
ripple::LedgerInfo::txHash
uint256 txHash
Definition: ReadView.h:97
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:43
algorithm
ripple::jtLEDGER_DATA
@ jtLEDGER_DATA
Definition: Job.h:47
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::base_uint::size
constexpr static std::size_t size()
Definition: base_uint.h:462
ripple::PeerSet::sendRequest
void sendRequest(const protocol::TMGetLedger &message, std::shared_ptr< Peer > const &peer)
Send a GetLedger message to one or all peers.
Definition: PeerSet.cpp:127
std::unique_lock::unlock
T unlock(T... args)
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:292
ripple::InboundLedger::neededTxHashes
std::vector< uint256 > neededTxHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:239
std::vector::push_back
T push_back(T... args)
ripple::LedgerInfo::closeTime
NetClock::time_point closeTime
Definition: ReadView.h:119
ripple::base_uint< 256 >
ripple::InboundLedger::takeHeader
bool takeHeader(std::string const &data)
Take ledger header data Call with a lock.
Definition: InboundLedger.cpp:861
ripple::JobQueue::getJobCountTotal
int getJobCountTotal(JobType t) const
Jobs waiting plus running at this priority.
Definition: JobQueue.cpp:131
ripple::reqNodesReply
@ reqNodesReply
Definition: InboundLedger.cpp:64
ripple::InboundLedger::gotData
bool gotData(std::weak_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData > const &)
Stash a TMLedgerData received from a peer for later processing Returns 'true' if we need to dispatch.
Definition: InboundLedger.cpp:1079
std::addressof
T addressof(T... args)
Json::Value::append
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:882
ripple::InboundLedger::mLedger
std::shared_ptr< Ledger > mLedger
Definition: InboundLedger.h:197
std::enable_shared_from_this< InboundLedger >::shared_from_this
T shared_from_this(T... args)
ripple::SerialIter::get8
unsigned char get8()
Definition: Serializer.cpp:354
ripple::SHAMapAddNode::isUseful
bool isUseful() const
Definition: SHAMapAddNode.h:116
ripple::InboundLedger::getPeerCount
std::size_t getPeerCount() const
Definition: InboundLedger.cpp:161
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::SerialIter::get256
uint256 get256()
Definition: Serializer.h:374
ripple::SerialIter::get64
std::uint64_t get64()
Definition: Serializer.cpp:391
ripple::InboundLedger::addPeers
void addPeers()
Add more peers to the set, if possible.
Definition: InboundLedger.cpp:473
ripple::peerCountAdd
@ peerCountAdd
Definition: InboundLedger.cpp:48
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::InboundLedgers::gotStaleData
virtual void gotStaleData(std::shared_ptr< protocol::TMLedgerData > packet)=0
ripple::Resource::feeBadData
const Charge feeBadData
ripple::LedgerMaster::getFetchPack
boost::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
Definition: LedgerMaster.cpp:2013
ripple::NodeStore::Database::fetch
virtual std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq)=0
Fetch an object.
ripple::Application::config
virtual Config & config()=0
ripple::InboundLedgers::onLedgerFetched
virtual void onLedgerFetched()=0
Called when a complete ledger is obtained.
ripple::SHAMapAddNode::isGood
bool isGood() const
Definition: SHAMapAddNode.h:132
std::unique_lock< std::recursive_mutex >
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:81
ripple::SHAMapNodeID::getRawString
std::string getRawString() const
Definition: SHAMapNodeID.cpp:92
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
ripple::LedgerInfo::closeFlags
int closeFlags
Definition: ReadView.h:110
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::InboundLedger::tryDB
void tryDB(NodeStore::Database &srcDB)
Definition: InboundLedger.cpp:300
ripple::InboundLedger::takeTxRootNode
bool takeTxRootNode(Slice const &data, SHAMapAddNode &)
Process AS root node received from a peer Call with a lock.
Definition: InboundLedger.cpp:1017
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::Job
Definition: Job.h:82
ripple::SerialIter
Definition: Serializer.h:308
ripple::InboundLedger::pmDowncast
std::weak_ptr< PeerSet > pmDowncast() override
Return a weak pointer to this.
Definition: InboundLedger.cpp:481
std::uint32_t
ripple::missingNodesFind
@ missingNodesFind
Definition: InboundLedger.cpp:60
ripple::InboundLedger::mReceiveDispatched
bool mReceiveDispatched
Definition: InboundLedger.h:213
ripple::InboundLedger::mHaveHeader
bool mHaveHeader
Definition: InboundLedger.h:198
beast::abstract_clock< std::chrono::steady_clock >
ripple::LedgerMaster::checkAccept
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
Definition: LedgerMaster.cpp:996
ripple::LedgerInfo::drops
XRPAmount drops
Definition: ReadView.h:101
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::InboundLedger::TriggerReason::timeout
@ timeout
ripple::InboundLedger::TriggerReason::reply
@ reply
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedger::mStats
SHAMapAddNode mStats
Definition: InboundLedger.h:208
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::base_uint::begin
iterator begin()
Definition: base_uint.h:114
ripple::PeerSet::app_
Application & app_
Definition: PeerSet.h:105
ripple::InboundLedger::mSeq
std::uint32_t mSeq
Definition: InboundLedger.h:203
ripple::InboundLedger::takeAsRootNode
bool takeAsRootNode(Slice const &data, SHAMapAddNode &)
Process AS root node received from a peer Call with a lock.
Definition: InboundLedger.cpp:992
ripple::LedgerInfo::closeTimeResolution
NetClock::duration closeTimeResolution
Definition: ReadView.h:113
ripple::PeerSet
Supports data retrieval by managing a set of peers.
Definition: PeerSet.h:48
ripple::PeerSet::mFailed
bool mFailed
Definition: PeerSet.h:115
ripple::NodeStore::DatabaseShard::setStored
virtual void setStored(std::shared_ptr< Ledger const > const &ledger)=0
Notifies the database that the given ledger has been fully acquired and stored.
ripple::Application::overlay
virtual Overlay & overlay()=0
std::count_if
T count_if(T... args)
ripple::InboundLedger::done
void done()
Definition: InboundLedger.cpp:487
ripple::InboundLedger::trigger
void trigger(std::shared_ptr< Peer > const &, TriggerReason)
Request more nodes, perhaps from a specific peer.
Definition: InboundLedger.cpp:538
ripple::PeerSet::m_journal
beast::Journal m_journal
Definition: PeerSet.h:106
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::SHAMapAddNode::incInvalid
void incInvalid()
Definition: SHAMapAddNode.h:80
std::size_t
ripple::SHAMapAddNode::incUseful
void incUseful()
Definition: SHAMapAddNode.h:86
ripple::hotLEDGER
@ hotLEDGER
Definition: NodeObject.h:34
std::make_pair
T make_pair(T... args)
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::LedgerInfo
Information about the notional ledger backing the view.
Definition: ReadView.h:80
ripple::SHAMapAddNode::getGood
int getGood() const
Definition: SHAMapAddNode.h:104
ripple::LedgerMaster::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
Definition: LedgerMaster.cpp:510
ripple::InboundLedger::Reason
Reason
Definition: InboundLedger.h:51
ripple::SHAMapAddNode::incDuplicate
void incDuplicate()
Definition: SHAMapAddNode.h:92
ripple::InboundLedger::receiveNode
void receiveNode(protocol::TMLedgerData &packet, SHAMapAddNode &)
Process node data received from a peer Call with a lock.
Definition: InboundLedger.cpp:909
ripple::InboundLedger::runData
void runData()
Process pending TMLedgerData Query the 'best' peer.
Definition: InboundLedger.cpp:1211
ripple::PeerSet::mTimeouts
int mTimeouts
Definition: PeerSet.h:113
ripple::SerialIter::get32
std::uint32_t get32()
Definition: Serializer.cpp:378
ripple::InboundLedger::mReceivedData
std::vector< PeerDataPairType > mReceivedData
Definition: InboundLedger.h:212
ripple::ledgerBecomeAggressiveThreshold
@ ledgerBecomeAggressiveThreshold
Definition: InboundLedger.cpp:56
ripple::SHAMapSyncFilter
Definition: SHAMapSyncFilter.h:30
std::unique_ptr
STL class.
std::stable_partition
T stable_partition(T... args)
ripple::InboundLedger::Reason::SHARD
@ SHARD
ripple::InboundLedger::onTimer
void onTimer(bool progress, ScopedLockType &peerSetLock) override
Called with a lock by the PeerSet when the timer expires.
Definition: InboundLedger.cpp:422
ripple::InboundLedger::mReceivedDataLock
std::mutex mReceivedDataLock
Definition: InboundLedger.h:211
ripple::InboundLedger::checkLocal
bool checkLocal()
Definition: InboundLedger.cpp:198
ripple::TransactionStateSF
Definition: TransactionStateSF.h:31
ripple::ledgerTimeoutRetriesMax
@ ledgerTimeoutRetriesMax
Definition: InboundLedger.cpp:52
ripple::PeerSet::setTimer
void setTimer()
Schedule a call to queueJob() after mTimerInterval.
Definition: PeerSet.cpp:87
ripple::SHAMapNodeID::isRoot
bool isRoot() const
Definition: SHAMapNodeID.h:124
ripple::InboundLedger::mHaveTransactions
bool mHaveTransactions
Definition: InboundLedger.h:200
ripple::Overlay::findPeerByShortID
virtual std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id)=0
Returns the peer with the matching short id, or null.
ripple::InboundLedger::init
void init(ScopedLockType &collectionLock)
Definition: InboundLedger.cpp:96
ripple::LedgerInfo::accountHash
uint256 accountHash
Definition: ReadView.h:98
std::exception::what
T what(T... args)
ripple::PeerSet::mLock
std::recursive_mutex mLock
Definition: PeerSet.h:108
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::LedgerInfo::parentCloseTime
NetClock::time_point parentCloseTime
Definition: ReadView.h:89