rippled
InboundLedger.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/AccountStateSF.h>
21 #include <ripple/app/ledger/InboundLedger.h>
22 #include <ripple/app/ledger/InboundLedgers.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/ledger/TransactionStateSF.h>
25 #include <ripple/app/main/Application.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/basics/Log.h>
28 #include <ripple/core/JobQueue.h>
29 #include <ripple/nodestore/DatabaseShard.h>
30 #include <ripple/overlay/Overlay.h>
31 #include <ripple/protocol/HashPrefix.h>
32 #include <ripple/protocol/jss.h>
33 #include <ripple/resource/Fees.h>
34 #include <ripple/shamap/SHAMapNodeID.h>
35 
36 #include <algorithm>
37 
38 namespace ripple {
39 
40 using namespace std::chrono_literals;
41 
42 enum {
43  // Number of peers to start with
45 
46  // Number of peers to add on a timeout
47  ,
49 
50  // how many timeouts before we give up
51  ,
53 
54  // how many timeouts before we get aggressive
55  ,
57 
58  // Number of nodes to find initially
59  ,
61 
62  // Number of nodes to request for a reply
63  ,
65 
66  // Number of nodes to request blindly
67  ,
69 };
70 
71 // millisecond for each ledger timeout
72 auto constexpr ledgerAcquireTimeout = 2500ms;
73 
75  Application& app,
76  uint256 const& hash,
77  std::uint32_t seq,
78  Reason reason,
79  clock_type& clock)
80  : PeerSet(app, hash, ledgerAcquireTimeout, app.journal("InboundLedger"))
81  , m_clock(clock)
82  , mHaveHeader(false)
83  , mHaveState(false)
84  , mHaveTransactions(false)
85  , mSignaled(false)
86  , mByHash(true)
87  , mSeq(seq)
88  , mReason(reason)
89  , mReceiveDispatched(false)
90 {
91  JLOG(m_journal.trace()) << "Acquiring ledger " << mHash;
92  touch();
93 }
94 
95 void
97 {
99  collectionLock.unlock();
100 
101  tryDB(app_.getNodeFamily().db());
102  if (mFailed)
103  return;
104 
105  if (!mComplete)
106  {
107  auto shardStore = app_.getShardStore();
108  if (mReason == Reason::SHARD)
109  {
110  if (!shardStore)
111  {
112  JLOG(m_journal.error())
113  << "Acquiring shard with no shard store available";
114  mFailed = true;
115  return;
116  }
117 
118  mHaveHeader = false;
119  mHaveTransactions = false;
120  mHaveState = false;
121  mLedger.reset();
122 
123  tryDB(app_.getShardFamily()->db());
124  if (mFailed)
125  return;
126  }
127  else if (shardStore && mSeq >= shardStore->earliestLedgerSeq())
128  {
129  if (auto l = shardStore->fetchLedger(mHash, mSeq))
130  {
131  mHaveHeader = true;
132  mHaveTransactions = true;
133  mHaveState = true;
134  mComplete = true;
135  mLedger = std::move(l);
136  }
137  }
138  }
139  if (!mComplete)
140  {
141  addPeers();
142  queueJob();
143  return;
144  }
145 
146  JLOG(m_journal.debug()) << "Acquiring ledger we already have in "
147  << " local store. " << mHash;
148  mLedger->setImmutable(app_.config());
149 
151  return;
152 
154 
155  // Check if this could be a newer fully-validated ledger
156  if (mReason == Reason::CONSENSUS)
158 }
159 
162 {
163  return std::count_if(mPeers.begin(), mPeers.end(), [this](auto id) {
164  return app_.overlay().findPeerByShortID(id) != nullptr;
165  });
166 }
167 
168 void
170 {
172  {
173  JLOG(m_journal.debug()) << "Deferring InboundLedger timer due to load";
174  setTimer();
175  return;
176  }
177 
179  jtLEDGER_DATA, "InboundLedger", [ptr = shared_from_this()](Job&) {
180  ptr->invokeOnTimer();
181  });
182 }
183 
184 void
186 {
187  ScopedLockType sl(mLock);
188 
189  // If we didn't know the sequence number, but now do, save it
190  if ((seq != 0) && (mSeq == 0))
191  mSeq = seq;
192 
193  // Prevent this from being swept
194  touch();
195 }
196 
197 bool
199 {
200  ScopedLockType sl(mLock);
201  if (!isDone())
202  {
203  if (mLedger)
204  tryDB(mLedger->stateMap().family().db());
205  else if (mReason == Reason::SHARD)
206  tryDB(app_.getShardFamily()->db());
207  else
208  tryDB(app_.getNodeFamily().db());
209  if (mFailed || mComplete)
210  {
211  done();
212  return true;
213  }
214  }
215  return false;
216 }
217 
219 {
220  // Save any received AS data not processed. It could be useful
221  // for populating a different ledger
222  for (auto& entry : mReceivedData)
223  {
224  if (entry.second->type() == protocol::liAS_NODE)
225  app_.getInboundLedgers().gotStaleData(entry.second);
226  }
227  if (!isDone())
228  {
229  JLOG(m_journal.debug())
230  << "Acquire " << mHash << " abort "
231  << ((mTimeouts == 0) ? std::string()
232  : (std::string("timeouts:") +
233  std::to_string(mTimeouts) + " "))
234  << mStats.get();
235  }
236 }
237 
240  uint256 const& root,
241  SHAMap& map,
242  int max,
243  SHAMapSyncFilter* filter)
244 {
246 
247  if (!root.isZero())
248  {
249  if (map.getHash().isZero())
250  ret.push_back(root);
251  else
252  {
253  auto mn = map.getMissingNodes(max, filter);
254  ret.reserve(mn.size());
255  for (auto const& n : mn)
256  ret.push_back(n.second);
257  }
258  }
259 
260  return ret;
261 }
262 
265 {
266  return neededHashes(mLedger->info().txHash, mLedger->txMap(), max, filter);
267 }
268 
271 {
272  return neededHashes(
273  mLedger->info().accountHash, mLedger->stateMap(), max, filter);
274 }
275 
277 deserializeHeader(Slice data, bool hasHash)
278 {
279  SerialIter sit(data.data(), data.size());
280 
281  LedgerInfo info;
282 
283  info.seq = sit.get32();
284  info.drops = sit.get64();
285  info.parentHash = sit.get256();
286  info.txHash = sit.get256();
287  info.accountHash = sit.get256();
288  info.parentCloseTime =
292  info.closeFlags = sit.get8();
293 
294  if (hasHash)
295  info.hash = sit.get256();
296 
297  return info;
298 }
299 
300 LedgerInfo
301 deserializePrefixedHeader(Slice data, bool hasHash)
302 {
303  return deserializeHeader(data + 4, hasHash);
304 }
305 
306 // See how much of the ledger data is stored locally
307 // Data found in a fetch pack will be stored
308 void
310 {
311  if (!mHaveHeader)
312  {
313  auto makeLedger = [&, this](Blob const& data) {
314  JLOG(m_journal.trace()) << "Ledger header found in fetch pack";
315  mLedger = std::make_shared<Ledger>(
317  app_.config(),
319  : app_.getNodeFamily());
320  if (mLedger->info().hash != mHash ||
321  (mSeq != 0 && mSeq != mLedger->info().seq))
322  {
323  // We know for a fact the ledger can never be acquired
324  JLOG(m_journal.warn())
325  << "hash " << mHash << " seq " << std::to_string(mSeq)
326  << " cannot be a ledger";
327  mLedger.reset();
328  mFailed = true;
329  }
330  };
331 
332  // Try to fetch the ledger header from the DB
333  if (auto nodeObject = srcDB.fetchNodeObject(mHash, mSeq))
334  {
335  JLOG(m_journal.trace()) << "Ledger header found in local store";
336 
337  makeLedger(nodeObject->getData());
338  if (mFailed)
339  return;
340 
341  // Store the ledger header if the source and destination differ
342  auto& dstDB{mLedger->stateMap().family().db()};
343  if (std::addressof(dstDB) != std::addressof(srcDB))
344  {
345  Blob blob{nodeObject->getData()};
346  dstDB.store(
347  hotLEDGER, std::move(blob), mHash, mLedger->info().seq);
348  }
349  }
350  else
351  {
352  // Try to fetch the ledger header from a fetch pack
353  auto data = app_.getLedgerMaster().getFetchPack(mHash);
354  if (!data)
355  return;
356 
357  JLOG(m_journal.trace()) << "Ledger header found in fetch pack";
358 
359  makeLedger(*data);
360  if (mFailed)
361  return;
362 
363  // Store the ledger header in the ledger's database
364  mLedger->stateMap().family().db().store(
365  hotLEDGER, std::move(*data), mHash, mLedger->info().seq);
366  }
367 
368  if (mSeq == 0)
369  mSeq = mLedger->info().seq;
370  mLedger->stateMap().setLedgerSeq(mSeq);
371  mLedger->txMap().setLedgerSeq(mSeq);
372  mHaveHeader = true;
373  }
374 
375  if (!mHaveTransactions)
376  {
377  if (mLedger->info().txHash.isZero())
378  {
379  JLOG(m_journal.trace()) << "No TXNs to fetch";
380  mHaveTransactions = true;
381  }
382  else
383  {
384  TransactionStateSF filter(
385  mLedger->txMap().family().db(), app_.getLedgerMaster());
386  if (mLedger->txMap().fetchRoot(
387  SHAMapHash{mLedger->info().txHash}, &filter))
388  {
389  if (neededTxHashes(1, &filter).empty())
390  {
391  JLOG(m_journal.trace()) << "Had full txn map locally";
392  mHaveTransactions = true;
393  }
394  }
395  }
396  }
397 
398  if (!mHaveState)
399  {
400  if (mLedger->info().accountHash.isZero())
401  {
402  JLOG(m_journal.fatal())
403  << "We are acquiring a ledger with a zero account hash";
404  mFailed = true;
405  return;
406  }
407  AccountStateSF filter(
408  mLedger->stateMap().family().db(), app_.getLedgerMaster());
409  if (mLedger->stateMap().fetchRoot(
410  SHAMapHash{mLedger->info().accountHash}, &filter))
411  {
412  if (neededStateHashes(1, &filter).empty())
413  {
414  JLOG(m_journal.trace()) << "Had full AS map locally";
415  mHaveState = true;
416  }
417  }
418  }
419 
421  {
422  JLOG(m_journal.debug()) << "Had everything locally";
423  mComplete = true;
424  mLedger->setImmutable(app_.config());
425  }
426 }
427 
430 void
432 {
433  mRecentNodes.clear();
434 
435  if (isDone())
436  {
437  JLOG(m_journal.info()) << "Already done " << mHash;
438  return;
439  }
440 
442  {
443  if (mSeq != 0)
444  {
445  JLOG(m_journal.warn())
446  << mTimeouts << " timeouts for ledger " << mSeq;
447  }
448  else
449  {
450  JLOG(m_journal.warn())
451  << mTimeouts << " timeouts for ledger " << mHash;
452  }
453  mFailed = true;
454  done();
455  return;
456  }
457 
458  if (!wasProgress)
459  {
460  checkLocal();
461 
462  mByHash = true;
463 
464  std::size_t pc = getPeerCount();
465  JLOG(m_journal.debug())
466  << "No progress(" << pc << ") for ledger " << mHash;
467 
468  // addPeers triggers if the reason is not HISTORY
469  // So if the reason IS HISTORY, need to trigger after we add
470  // otherwise, we need to trigger before we add
471  // so each peer gets triggered once
472  if (mReason != Reason::HISTORY)
474  addPeers();
475  if (mReason == Reason::HISTORY)
477  }
478 }
479 
481 void
483 {
486  [this](auto peer) { return peer->hasLedger(mHash, mSeq); });
487 }
488 
491 {
492  return shared_from_this();
493 }
494 
495 void
497 {
498  if (mSignaled)
499  return;
500 
501  mSignaled = true;
502  touch();
503 
504  JLOG(m_journal.debug())
505  << "Acquire " << mHash << (mFailed ? " fail " : " ")
506  << ((mTimeouts == 0)
507  ? std::string()
508  : (std::string("timeouts:") + std::to_string(mTimeouts) + " "))
509  << mStats.get();
510 
511  assert(mComplete || mFailed);
512 
513  if (mComplete && !mFailed && mLedger)
514  {
515  mLedger->setImmutable(app_.config());
516  switch (mReason)
517  {
518  case Reason::SHARD:
520  [[fallthrough]];
521  case Reason::HISTORY:
523  break;
524  default:
526  break;
527  }
528  }
529 
530  // We hold the PeerSet lock, so must dispatch
532  jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()](Job&) {
533  if (self->mComplete && !self->mFailed)
534  {
535  self->app_.getLedgerMaster().checkAccept(self->getLedger());
536  self->app_.getLedgerMaster().tryAdvance();
537  }
538  else
539  self->app_.getInboundLedgers().logFailure(
540  self->mHash, self->mSeq);
541  });
542 }
543 
546 void
548 {
549  ScopedLockType sl(mLock);
550 
551  if (isDone())
552  {
553  JLOG(m_journal.debug())
554  << "Trigger on ledger: " << mHash << (mComplete ? " completed" : "")
555  << (mFailed ? " failed" : "");
556  return;
557  }
558 
559  if (auto stream = m_journal.trace())
560  {
561  if (peer)
562  stream << "Trigger acquiring ledger " << mHash << " from " << peer;
563  else
564  stream << "Trigger acquiring ledger " << mHash;
565 
566  if (mComplete || mFailed)
567  stream << "complete=" << mComplete << " failed=" << mFailed;
568  else
569  stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions
570  << " as=" << mHaveState;
571  }
572 
573  if (!mHaveHeader)
574  {
575  tryDB(
577  : app_.getNodeFamily().db());
578  if (mFailed)
579  {
580  JLOG(m_journal.warn()) << " failed local for " << mHash;
581  return;
582  }
583  }
584 
585  protocol::TMGetLedger tmGL;
586  tmGL.set_ledgerhash(mHash.begin(), mHash.size());
587 
588  if (mTimeouts != 0)
589  {
590  // Be more aggressive if we've timed out at least once
591  tmGL.set_querytype(protocol::qtINDIRECT);
592 
593  if (!mProgress && !mFailed && mByHash &&
595  {
596  auto need = getNeededHashes();
597 
598  if (!need.empty())
599  {
600  protocol::TMGetObjectByHash tmBH;
601  bool typeSet = false;
602  tmBH.set_query(true);
603  tmBH.set_ledgerhash(mHash.begin(), mHash.size());
604  for (auto const& p : need)
605  {
606  JLOG(m_journal.warn()) << "Want: " << p.second;
607 
608  if (!typeSet)
609  {
610  tmBH.set_type(p.first);
611  typeSet = true;
612  }
613 
614  if (p.first == tmBH.type())
615  {
616  protocol::TMIndexedObject* io = tmBH.add_objects();
617  io->set_hash(p.second.begin(), p.second.size());
618  if (mSeq != 0)
619  io->set_ledgerseq(mSeq);
620  }
621  }
622 
623  auto packet =
624  std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
625 
626  for (auto id : mPeers)
627  {
628  if (auto p = app_.overlay().findPeerByShortID(id))
629  {
630  mByHash = false;
631  p->send(packet);
632  }
633  }
634  }
635  else
636  {
637  JLOG(m_journal.info())
638  << "getNeededHashes says acquire is complete";
639  mHaveHeader = true;
640  mHaveTransactions = true;
641  mHaveState = true;
642  mComplete = true;
643  }
644  }
645  }
646 
647  // We can't do much without the header data because we don't know the
648  // state or transaction root hashes.
649  if (!mHaveHeader && !mFailed)
650  {
651  tmGL.set_itype(protocol::liBASE);
652  if (mSeq != 0)
653  tmGL.set_ledgerseq(mSeq);
654  JLOG(m_journal.trace()) << "Sending header request to "
655  << (peer ? "selected peer" : "all peers");
656  sendRequest(tmGL, peer);
657  return;
658  }
659 
660  if (mLedger)
661  tmGL.set_ledgerseq(mLedger->info().seq);
662 
663  if (reason != TriggerReason::reply)
664  {
665  // If we're querying blind, don't query deep
666  tmGL.set_querydepth(0);
667  }
668  else if (peer && peer->isHighLatency())
669  {
670  // If the peer has high latency, query extra deep
671  tmGL.set_querydepth(2);
672  }
673  else
674  tmGL.set_querydepth(1);
675 
676  // Get the state data first because it's the most likely to be useful
677  // if we wind up abandoning this fetch.
678  if (mHaveHeader && !mHaveState && !mFailed)
679  {
680  assert(mLedger);
681 
682  if (!mLedger->stateMap().isValid())
683  {
684  mFailed = true;
685  }
686  else if (mLedger->stateMap().getHash().isZero())
687  {
688  // we need the root node
689  tmGL.set_itype(protocol::liAS_NODE);
690  *tmGL.add_nodeids() = SHAMapNodeID().getRawString();
691  JLOG(m_journal.trace()) << "Sending AS root request to "
692  << (peer ? "selected peer" : "all peers");
693  sendRequest(tmGL, peer);
694  return;
695  }
696  else
697  {
698  AccountStateSF filter(
699  mLedger->stateMap().family().db(), app_.getLedgerMaster());
700 
701  // Release the lock while we process the large state map
702  sl.unlock();
703  auto nodes =
704  mLedger->stateMap().getMissingNodes(missingNodesFind, &filter);
705  sl.lock();
706 
707  // Make sure nothing happened while we released the lock
708  if (!mFailed && !mComplete && !mHaveState)
709  {
710  if (nodes.empty())
711  {
712  if (!mLedger->stateMap().isValid())
713  mFailed = true;
714  else
715  {
716  mHaveState = true;
717 
718  if (mHaveTransactions)
719  mComplete = true;
720  }
721  }
722  else
723  {
724  filterNodes(nodes, reason);
725 
726  if (!nodes.empty())
727  {
728  tmGL.set_itype(protocol::liAS_NODE);
729  for (auto const& id : nodes)
730  {
731  *(tmGL.add_nodeids()) = id.first.getRawString();
732  }
733 
734  JLOG(m_journal.trace())
735  << "Sending AS node request (" << nodes.size()
736  << ") to "
737  << (peer ? "selected peer" : "all peers");
738  sendRequest(tmGL, peer);
739  return;
740  }
741  else
742  {
743  JLOG(m_journal.trace()) << "All AS nodes filtered";
744  }
745  }
746  }
747  }
748  }
749 
751  {
752  assert(mLedger);
753 
754  if (!mLedger->txMap().isValid())
755  {
756  mFailed = true;
757  }
758  else if (mLedger->txMap().getHash().isZero())
759  {
760  // we need the root node
761  tmGL.set_itype(protocol::liTX_NODE);
762  *(tmGL.add_nodeids()) = SHAMapNodeID().getRawString();
763  JLOG(m_journal.trace()) << "Sending TX root request to "
764  << (peer ? "selected peer" : "all peers");
765  sendRequest(tmGL, peer);
766  return;
767  }
768  else
769  {
770  TransactionStateSF filter(
771  mLedger->txMap().family().db(), app_.getLedgerMaster());
772 
773  auto nodes =
774  mLedger->txMap().getMissingNodes(missingNodesFind, &filter);
775 
776  if (nodes.empty())
777  {
778  if (!mLedger->txMap().isValid())
779  mFailed = true;
780  else
781  {
782  mHaveTransactions = true;
783 
784  if (mHaveState)
785  mComplete = true;
786  }
787  }
788  else
789  {
790  filterNodes(nodes, reason);
791 
792  if (!nodes.empty())
793  {
794  tmGL.set_itype(protocol::liTX_NODE);
795  for (auto const& n : nodes)
796  {
797  *(tmGL.add_nodeids()) = n.first.getRawString();
798  }
799  JLOG(m_journal.trace())
800  << "Sending TX node request (" << nodes.size()
801  << ") to " << (peer ? "selected peer" : "all peers");
802  sendRequest(tmGL, peer);
803  return;
804  }
805  else
806  {
807  JLOG(m_journal.trace()) << "All TX nodes filtered";
808  }
809  }
810  }
811  }
812 
813  if (mComplete || mFailed)
814  {
815  JLOG(m_journal.debug())
816  << "Done:" << (mComplete ? " complete" : "")
817  << (mFailed ? " failed " : " ") << mLedger->info().seq;
818  sl.unlock();
819  done();
820  }
821 }
822 
823 void
826  TriggerReason reason)
827 {
828  // Sort nodes so that the ones we haven't recently
829  // requested come before the ones we have.
830  auto dup = std::stable_partition(
831  nodes.begin(), nodes.end(), [this](auto const& item) {
832  return mRecentNodes.count(item.second) == 0;
833  });
834 
835  // If everything is a duplicate we don't want to send
836  // any query at all except on a timeout where we need
837  // to query everyone:
838  if (dup == nodes.begin())
839  {
840  JLOG(m_journal.trace()) << "filterNodes: all duplicates";
841 
842  if (reason != TriggerReason::timeout)
843  {
844  nodes.clear();
845  return;
846  }
847  }
848  else
849  {
850  JLOG(m_journal.trace()) << "filterNodes: pruning duplicates";
851 
852  nodes.erase(dup, nodes.end());
853  }
854 
855  std::size_t const limit =
857 
858  if (nodes.size() > limit)
859  nodes.resize(limit);
860 
861  for (auto const& n : nodes)
862  mRecentNodes.insert(n.second);
863 }
864 
868 // data must not have hash prefix
869 bool
871 {
872  // Return value: true=normal, false=bad data
873  JLOG(m_journal.trace()) << "got header acquiring ledger " << mHash;
874 
875  if (mComplete || mFailed || mHaveHeader)
876  return true;
877 
878  auto* f = mReason == Reason::SHARD ? app_.getShardFamily()
879  : &app_.getNodeFamily();
880  mLedger = std::make_shared<Ledger>(
881  deserializeHeader(makeSlice(data)), app_.config(), *f);
882  if (mLedger->info().hash != mHash ||
883  (mSeq != 0 && mSeq != mLedger->info().seq))
884  {
885  JLOG(m_journal.warn())
886  << "Acquire hash mismatch: " << mLedger->info().hash
887  << "!=" << mHash;
888  mLedger.reset();
889  return false;
890  }
891  if (mSeq == 0)
892  mSeq = mLedger->info().seq;
893  mLedger->stateMap().setLedgerSeq(mSeq);
894  mLedger->txMap().setLedgerSeq(mSeq);
895  mHaveHeader = true;
896 
897  Serializer s(data.size() + 4);
899  s.addRaw(data.data(), data.size());
900  f->db().store(hotLEDGER, std::move(s.modData()), mHash, mSeq);
901 
902  if (mLedger->info().txHash.isZero())
903  mHaveTransactions = true;
904 
905  if (mLedger->info().accountHash.isZero())
906  mHaveState = true;
907 
908  mLedger->txMap().setSynching();
909  mLedger->stateMap().setSynching();
910 
911  return true;
912 }
913 
917 void
918 InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
919 {
920  if (!mHaveHeader)
921  {
922  JLOG(m_journal.warn()) << "Missing ledger header";
923  san.incInvalid();
924  return;
925  }
926  if (packet.type() == protocol::liTX_NODE)
927  {
928  if (mHaveTransactions || mFailed)
929  {
930  san.incDuplicate();
931  return;
932  }
933  }
934  else if (mHaveState || mFailed)
935  {
936  san.incDuplicate();
937  return;
938  }
939 
940  auto [map, rootHash, filter] = [&]()
942  if (packet.type() == protocol::liTX_NODE)
943  return {
944  mLedger->txMap(),
945  SHAMapHash{mLedger->info().txHash},
946  std::make_unique<TransactionStateSF>(
947  mLedger->txMap().family().db(), app_.getLedgerMaster())};
948  return {
949  mLedger->stateMap(),
950  SHAMapHash{mLedger->info().accountHash},
951  std::make_unique<AccountStateSF>(
952  mLedger->stateMap().family().db(), app_.getLedgerMaster())};
953  }();
954 
955  try
956  {
957  for (auto const& node : packet.nodes())
958  {
959  auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
960 
961  if (!nodeID)
962  {
963  san.incInvalid();
964  return;
965  }
966 
967  if (nodeID->isRoot())
968  san += map.addRootNode(
969  rootHash, makeSlice(node.nodedata()), filter.get());
970  else
971  san += map.addKnownNode(
972  *nodeID, makeSlice(node.nodedata()), filter.get());
973 
974  if (!san.isGood())
975  {
976  JLOG(m_journal.warn()) << "Received bad node data";
977  return;
978  }
979  }
980  }
981  catch (std::exception const& e)
982  {
983  JLOG(m_journal.error()) << "Received bad node data: " << e.what();
984  san.incInvalid();
985  return;
986  }
987 
988  if (!map.isSynching())
989  {
990  if (packet.type() == protocol::liTX_NODE)
991  mHaveTransactions = true;
992  else
993  mHaveState = true;
994 
996  {
997  mComplete = true;
998  done();
999  }
1000  }
1001 }
1002 
1006 bool
1008 {
1009  if (mFailed || mHaveState)
1010  {
1011  san.incDuplicate();
1012  return true;
1013  }
1014 
1015  if (!mHaveHeader)
1016  {
1017  assert(false);
1018  return false;
1019  }
1020 
1021  AccountStateSF filter(
1022  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1023  san += mLedger->stateMap().addRootNode(
1024  SHAMapHash{mLedger->info().accountHash}, data, &filter);
1025  return san.isGood();
1026 }
1027 
1031 bool
1033 {
1034  if (mFailed || mHaveTransactions)
1035  {
1036  san.incDuplicate();
1037  return true;
1038  }
1039 
1040  if (!mHaveHeader)
1041  {
1042  assert(false);
1043  return false;
1044  }
1045 
1046  TransactionStateSF filter(
1047  mLedger->txMap().family().db(), app_.getLedgerMaster());
1048  san += mLedger->txMap().addRootNode(
1049  SHAMapHash{mLedger->info().txHash}, data, &filter);
1050  return san.isGood();
1051 }
1052 
1055 {
1057 
1058  if (!mHaveHeader)
1059  {
1060  ret.push_back(
1061  std::make_pair(protocol::TMGetObjectByHash::otLEDGER, mHash));
1062  return ret;
1063  }
1064 
1065  if (!mHaveState)
1066  {
1067  AccountStateSF filter(
1068  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1069  for (auto const& h : neededStateHashes(4, &filter))
1070  {
1071  ret.push_back(
1072  std::make_pair(protocol::TMGetObjectByHash::otSTATE_NODE, h));
1073  }
1074  }
1075 
1076  if (!mHaveTransactions)
1077  {
1078  TransactionStateSF filter(
1079  mLedger->txMap().family().db(), app_.getLedgerMaster());
1080  for (auto const& h : neededTxHashes(4, &filter))
1081  {
1083  protocol::TMGetObjectByHash::otTRANSACTION_NODE, h));
1084  }
1085  }
1086 
1087  return ret;
1088 }
1089 
1093 bool
1095  std::weak_ptr<Peer> peer,
1097 {
1099 
1100  if (isDone())
1101  return false;
1102 
1103  mReceivedData.emplace_back(peer, data);
1104 
1105  if (mReceiveDispatched)
1106  return false;
1107 
1108  mReceiveDispatched = true;
1109  return true;
1110 }
1111 
1115 // VFALCO NOTE, it is not necessary to pass the entire Peer,
1116 // we can get away with just a Resource::Consumer endpoint.
1117 //
1118 // TODO Change peer to Consumer
1119 //
1120 int
1122  std::shared_ptr<Peer> peer,
1123  protocol::TMLedgerData& packet)
1124 {
1125  ScopedLockType sl(mLock);
1126 
1127  if (packet.type() == protocol::liBASE)
1128  {
1129  if (packet.nodes_size() < 1)
1130  {
1131  JLOG(m_journal.warn()) << "Got empty header data";
1132  peer->charge(Resource::feeInvalidRequest);
1133  return -1;
1134  }
1135 
1136  SHAMapAddNode san;
1137 
1138  try
1139  {
1140  if (!mHaveHeader)
1141  {
1142  if (!takeHeader(packet.nodes(0).nodedata()))
1143  {
1144  JLOG(m_journal.warn()) << "Got invalid header data";
1145  peer->charge(Resource::feeInvalidRequest);
1146  return -1;
1147  }
1148 
1149  san.incUseful();
1150  }
1151 
1152  if (!mHaveState && (packet.nodes().size() > 1) &&
1153  !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
1154  {
1155  JLOG(m_journal.warn()) << "Included AS root invalid";
1156  }
1157 
1158  if (!mHaveTransactions && (packet.nodes().size() > 2) &&
1159  !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
1160  {
1161  JLOG(m_journal.warn()) << "Included TX root invalid";
1162  }
1163  }
1164  catch (std::exception const& ex)
1165  {
1166  JLOG(m_journal.warn())
1167  << "Included AS/TX root invalid: " << ex.what();
1168  peer->charge(Resource::feeBadData);
1169  return -1;
1170  }
1171 
1172  if (san.isUseful())
1173  mProgress = true;
1174 
1175  mStats += san;
1176  return san.getGood();
1177  }
1178 
1179  if ((packet.type() == protocol::liTX_NODE) ||
1180  (packet.type() == protocol::liAS_NODE))
1181  {
1182  if (packet.nodes().size() == 0)
1183  {
1184  JLOG(m_journal.info()) << "Got response with no nodes";
1185  peer->charge(Resource::feeInvalidRequest);
1186  return -1;
1187  }
1188 
1189  // Verify node IDs and data are complete
1190  for (auto const& node : packet.nodes())
1191  {
1192  if (!node.has_nodeid() || !node.has_nodedata())
1193  {
1194  JLOG(m_journal.warn()) << "Got bad node";
1195  peer->charge(Resource::feeInvalidRequest);
1196  return -1;
1197  }
1198  }
1199 
1200  SHAMapAddNode san;
1201  receiveNode(packet, san);
1202 
1203  if (packet.type() == protocol::liTX_NODE)
1204  {
1205  JLOG(m_journal.debug()) << "Ledger TX node stats: " << san.get();
1206  }
1207  else
1208  {
1209  JLOG(m_journal.debug()) << "Ledger AS node stats: " << san.get();
1210  }
1211 
1212  if (san.isUseful())
1213  mProgress = true;
1214 
1215  mStats += san;
1216  return san.getGood();
1217  }
1218 
1219  return -1;
1220 }
1221 
1225 void
1227 {
1228  std::shared_ptr<Peer> chosenPeer;
1229  int chosenPeerCount = -1;
1230 
1232 
1233  for (;;)
1234  {
1235  data.clear();
1236  {
1238 
1239  if (mReceivedData.empty())
1240  {
1241  mReceiveDispatched = false;
1242  break;
1243  }
1244 
1245  data.swap(mReceivedData);
1246  }
1247 
1248  // Select the peer that gives us the most nodes that are useful,
1249  // breaking ties in favor of the peer that responded first.
1250  for (auto& entry : data)
1251  {
1252  if (auto peer = entry.first.lock())
1253  {
1254  int count = processData(peer, *(entry.second));
1255  if (count > chosenPeerCount)
1256  {
1257  chosenPeerCount = count;
1258  chosenPeer = std::move(peer);
1259  }
1260  }
1261  }
1262  }
1263 
1264  if (chosenPeer)
1265  trigger(chosenPeer, TriggerReason::reply);
1266 }
1267 
1270 {
1272 
1273  ScopedLockType sl(mLock);
1274 
1275  ret[jss::hash] = to_string(mHash);
1276 
1277  if (mComplete)
1278  ret[jss::complete] = true;
1279 
1280  if (mFailed)
1281  ret[jss::failed] = true;
1282 
1283  if (!mComplete && !mFailed)
1284  ret[jss::peers] = static_cast<int>(mPeers.size());
1285 
1286  ret[jss::have_header] = mHaveHeader;
1287 
1288  if (mHaveHeader)
1289  {
1290  ret[jss::have_state] = mHaveState;
1291  ret[jss::have_transactions] = mHaveTransactions;
1292  }
1293 
1294  ret[jss::timeouts] = mTimeouts;
1295 
1296  if (mHaveHeader && !mHaveState)
1297  {
1299  for (auto const& h : neededStateHashes(16, nullptr))
1300  {
1301  hv.append(to_string(h));
1302  }
1303  ret[jss::needed_state_hashes] = hv;
1304  }
1305 
1307  {
1309  for (auto const& h : neededTxHashes(16, nullptr))
1310  {
1311  hv.append(to_string(h));
1312  }
1313  ret[jss::needed_transaction_hashes] = hv;
1314  }
1315 
1316  return ret;
1317 }
1318 
1319 } // namespace ripple
ripple::InboundLedger::mRecentNodes
std::set< uint256 > mRecentNodes
Definition: InboundLedger.h:200
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:101
ripple::SHAMapAddNode
Definition: SHAMapAddNode.h:28
ripple::Application::getNodeFamily
virtual Family & getNodeFamily()=0
ripple::SHAMapAddNode::get
std::string get() const
Definition: SHAMapAddNode.h:156
ripple::InboundLedger::Reason::HISTORY
@ HISTORY
ripple::InboundLedger::getNeededHashes
std::vector< neededHash_t > getNeededHashes()
Definition: InboundLedger.cpp:1054
ripple::HashPrefix::ledgerMaster
@ ledgerMaster
ledger master data for signing
ripple::InboundLedger::mReason
const Reason mReason
Definition: InboundLedger.h:198
std::unique_lock::lock
T lock(T... args)
ripple::InboundLedger::getJson
Json::Value getJson(int)
Return a Json::objectValue.
Definition: InboundLedger.cpp:1269
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::InboundLedger::TriggerReason
TriggerReason
Definition: InboundLedger.h:124
ripple::reqNodes
@ reqNodes
Definition: InboundLedger.cpp:68
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:52
std::string
STL class.
ripple::InboundLedger::Reason::CONSENSUS
@ CONSENSUS
std::shared_ptr
STL class.
ripple::InboundLedger::mHaveState
bool mHaveState
Definition: InboundLedger.h:193
ripple::LedgerInfo::parentHash
uint256 parentHash
Definition: ReadView.h:103
ripple::SHAMap::getHash
SHAMapHash getHash() const
Definition: SHAMap.cpp:783
ripple::PeerSet::mProgress
bool mProgress
Whether forward progress has been made.
Definition: PeerSet.h:117
std::exception
STL class.
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::PeerSet::isDone
bool isDone() const
Definition: PeerSet.h:84
ripple::deserializeSHAMapNodeID
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
Definition: SHAMapNodeID.cpp:101
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:44
Json::arrayValue
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
ripple::InboundLedger::mSignaled
bool mSignaled
Definition: InboundLedger.h:195
std::pair
std::vector::reserve
T reserve(T... args)
ripple::LedgerInfo::hash
uint256 hash
Definition: ReadView.h:100
ripple::ledgerAcquireTimeout
constexpr auto ledgerAcquireTimeout
Definition: InboundLedger.cpp:72
std::vector
STL class.
ripple::InboundLedger::update
void update(std::uint32_t seq)
Definition: InboundLedger.cpp:185
ripple::InboundLedger::touch
void touch()
Definition: InboundLedger.h:112
ripple::InboundLedger::InboundLedger
InboundLedger(Application &app, uint256 const &hash, std::uint32_t seq, Reason reason, clock_type &)
Definition: InboundLedger.cpp:74
ripple::PeerSet::mPeers
std::set< Peer::id_t > mPeers
The identifiers of the peers we are tracking.
Definition: PeerSet.h:120
std::chrono::duration
ripple::peerCountStart
@ peerCountStart
Definition: InboundLedger.cpp:44
ripple::InboundLedger::~InboundLedger
~InboundLedger()
Definition: InboundLedger.cpp:218
ripple::InboundLedger::mByHash
bool mByHash
Definition: InboundLedger.h:196
ripple::neededHashes
static std::vector< uint256 > neededHashes(uint256 const &root, SHAMap &map, int max, SHAMapSyncFilter *filter)
Definition: InboundLedger.cpp:239
ripple::InboundLedger::filterNodes
void filterNodes(std::vector< std::pair< SHAMapNodeID, uint256 >> &nodes, TriggerReason reason)
Definition: InboundLedger.cpp:824
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
ripple::InboundLedger::processData
int processData(std::shared_ptr< Peer > peer, protocol::TMLedgerData &data)
Process one TMLedgerData Returns the number of useful nodes.
Definition: InboundLedger.cpp:1121
std::lock_guard
STL class.
ripple::SHAMapHash::isZero
bool isZero() const
Definition: SHAMapTreeNode.h:68
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerSet::mComplete
bool mComplete
Definition: PeerSet.h:114
ripple::InboundLedger::queueJob
void queueJob() override
Queue a job to call invokeOnTimer().
Definition: InboundLedger.cpp:169
ripple::InboundLedger::neededStateHashes
std::vector< uint256 > neededStateHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:270
std::tuple
ripple::AccountStateSF
Definition: AccountStateSF.h:31
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::LedgerInfo::seq
LedgerIndex seq
Definition: ReadView.h:92
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:45
ripple::SHAMapNodeID
Identifies a node inside a SHAMap.
Definition: SHAMapNodeID.h:33
ripple::PeerSet::addPeers
void addPeers(std::size_t limit, std::function< bool(std::shared_ptr< Peer > const &)> score)
Add at most limit peers to this set from the overlay.
Definition: PeerSet.cpp:50
ripple::PeerSet::mHash
const uint256 mHash
The hash of the object (in practice, always a ledger) we are trying to fetch.
Definition: PeerSet.h:112
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:301
ripple::Family::db
virtual NodeStore::Database & db()=0
ripple::LedgerInfo::txHash
uint256 txHash
Definition: ReadView.h:101
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:47
algorithm
ripple::jtLEDGER_DATA
@ jtLEDGER_DATA
Definition: Job.h:47
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::base_uint::size
constexpr static std::size_t size()
Definition: base_uint.h:426
ripple::PeerSet::sendRequest
void sendRequest(const protocol::TMGetLedger &message, std::shared_ptr< Peer > const &peer)
Send a GetLedger message to one or all peers.
Definition: PeerSet.cpp:127
std::unique_lock::unlock
T unlock(T... args)
ripple::InboundLedger::neededTxHashes
std::vector< uint256 > neededTxHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:264
std::vector::push_back
T push_back(T... args)
ripple::LedgerInfo::closeTime
NetClock::time_point closeTime
Definition: ReadView.h:123
ripple::base_uint< 256 >
ripple::InboundLedger::takeHeader
bool takeHeader(std::string const &data)
Take ledger header data Call with a lock.
Definition: InboundLedger.cpp:870
ripple::JobQueue::getJobCountTotal
int getJobCountTotal(JobType t) const
Jobs waiting plus running at this priority.
Definition: JobQueue.cpp:133
ripple::reqNodesReply
@ reqNodesReply
Definition: InboundLedger.cpp:64
ripple::InboundLedger::gotData
bool gotData(std::weak_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData > const &)
Stash a TMLedgerData received from a peer for later processing Returns 'true' if we need to dispatch.
Definition: InboundLedger.cpp:1094
std::addressof
T addressof(T... args)
Json::Value::append
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:882
ripple::InboundLedger::mLedger
std::shared_ptr< Ledger > mLedger
Definition: InboundLedger.h:191
std::enable_shared_from_this< InboundLedger >::shared_from_this
T shared_from_this(T... args)
ripple::SerialIter::get8
unsigned char get8()
Definition: Serializer.cpp:354
ripple::base_uint::isZero
bool isZero() const
Definition: base_uint.h:439
ripple::SHAMapAddNode::isUseful
bool isUseful() const
Definition: SHAMapAddNode.h:116
ripple::InboundLedger::getPeerCount
std::size_t getPeerCount() const
Definition: InboundLedger.cpp:161
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::SerialIter::get256
uint256 get256()
Definition: Serializer.h:374
ripple::SerialIter::get64
std::uint64_t get64()
Definition: Serializer.cpp:391
ripple::InboundLedger::addPeers
void addPeers()
Add more peers to the set, if possible.
Definition: InboundLedger.cpp:482
ripple::peerCountAdd
@ peerCountAdd
Definition: InboundLedger.cpp:48
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::InboundLedgers::gotStaleData
virtual void gotStaleData(std::shared_ptr< protocol::TMLedgerData > packet)=0
ripple::Resource::feeBadData
const Charge feeBadData
ripple::LedgerMaster::getFetchPack
boost::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
Definition: LedgerMaster.cpp:2084
ripple::Application::config
virtual Config & config()=0
ripple::InboundLedgers::onLedgerFetched
virtual void onLedgerFetched()=0
Called when a complete ledger is obtained.
ripple::SHAMapAddNode::isGood
bool isGood() const
Definition: SHAMapAddNode.h:132
std::unique_lock< std::recursive_mutex >
ripple::SHAMap
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
ripple::SHAMapNodeID::getRawString
std::string getRawString() const
Definition: SHAMapNodeID.cpp:65
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
ripple::LedgerInfo::closeFlags
int closeFlags
Definition: ReadView.h:114
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::InboundLedger::tryDB
void tryDB(NodeStore::Database &srcDB)
Definition: InboundLedger.cpp:309
ripple::InboundLedger::takeTxRootNode
bool takeTxRootNode(Slice const &data, SHAMapAddNode &)
Process AS root node received from a peer Call with a lock.
Definition: InboundLedger.cpp:1032
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::Job
Definition: Job.h:82
ripple::SerialIter
Definition: Serializer.h:308
ripple::InboundLedger::pmDowncast
std::weak_ptr< PeerSet > pmDowncast() override
Return a weak pointer to this.
Definition: InboundLedger.cpp:490
std::uint32_t
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:145
ripple::missingNodesFind
@ missingNodesFind
Definition: InboundLedger.cpp:60
ripple::InboundLedger::mReceiveDispatched
bool mReceiveDispatched
Definition: InboundLedger.h:207
ripple::InboundLedger::mHaveHeader
bool mHaveHeader
Definition: InboundLedger.h:192
beast::abstract_clock< std::chrono::steady_clock >
ripple::SHAMap::getMissingNodes
std::vector< std::pair< SHAMapNodeID, uint256 > > getMissingNodes(int maxNodes, SHAMapSyncFilter *filter)
Check for nodes in the SHAMap not available.
Definition: SHAMapSync.cpp:317
ripple::LedgerMaster::checkAccept
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
Definition: LedgerMaster.cpp:1037
ripple::LedgerInfo::drops
XRPAmount drops
Definition: ReadView.h:105
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::InboundLedger::TriggerReason::timeout
@ timeout
ripple::InboundLedger::TriggerReason::reply
@ reply
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::deserializeHeader
LedgerInfo deserializeHeader(Slice data, bool hasHash)
Deserialize a ledger header from a byte array.
Definition: InboundLedger.cpp:277
ripple::InboundLedger::mStats
SHAMapAddNode mStats
Definition: InboundLedger.h:202
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::base_uint::begin
iterator begin()
Definition: base_uint.h:124
ripple::PeerSet::app_
Application & app_
Definition: PeerSet.h:105
ripple::InboundLedger::mSeq
std::uint32_t mSeq
Definition: InboundLedger.h:197
ripple::InboundLedger::takeAsRootNode
bool takeAsRootNode(Slice const &data, SHAMapAddNode &)
Process AS root node received from a peer Call with a lock.
Definition: InboundLedger.cpp:1007
ripple::LedgerInfo::closeTimeResolution
NetClock::duration closeTimeResolution
Definition: ReadView.h:117
ripple::PeerSet
Supports data retrieval by managing a set of peers.
Definition: PeerSet.h:48
ripple::PeerSet::mFailed
bool mFailed
Definition: PeerSet.h:115
ripple::NodeStore::DatabaseShard::setStored
virtual void setStored(std::shared_ptr< Ledger const > const &ledger)=0
Notifies the database that the given ledger has been fully acquired and stored.
ripple::Application::overlay
virtual Overlay & overlay()=0
std::count_if
T count_if(T... args)
ripple::InboundLedger::done
void done()
Definition: InboundLedger.cpp:496
ripple::Overlay::findPeerByShortID
virtual std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id) const =0
Returns the peer with the matching short id, or null.
ripple::InboundLedger::trigger
void trigger(std::shared_ptr< Peer > const &, TriggerReason)
Request more nodes, perhaps from a specific peer.
Definition: InboundLedger.cpp:547
ripple::PeerSet::m_journal
beast::Journal m_journal
Definition: PeerSet.h:106
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::SHAMapAddNode::incInvalid
void incInvalid()
Definition: SHAMapAddNode.h:80
std::size_t
ripple::SHAMapAddNode::incUseful
void incUseful()
Definition: SHAMapAddNode.h:86
ripple::hotLEDGER
@ hotLEDGER
Definition: NodeObject.h:34
std::make_pair
T make_pair(T... args)
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::LedgerInfo
Information about the notional ledger backing the view.
Definition: ReadView.h:84
ripple::SHAMapAddNode::getGood
int getGood() const
Definition: SHAMapAddNode.h:104
ripple::LedgerMaster::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
Definition: LedgerMaster.cpp:526
ripple::InboundLedger::Reason
Reason
Definition: InboundLedger.h:45
ripple::SHAMapAddNode::incDuplicate
void incDuplicate()
Definition: SHAMapAddNode.h:92
ripple::InboundLedger::receiveNode
void receiveNode(protocol::TMLedgerData &packet, SHAMapAddNode &)
Process node data received from a peer Call with a lock.
Definition: InboundLedger.cpp:918
ripple::InboundLedger::runData
void runData()
Process pending TMLedgerData Query the 'best' peer.
Definition: InboundLedger.cpp:1226
ripple::PeerSet::mTimeouts
int mTimeouts
Definition: PeerSet.h:113
ripple::SerialIter::get32
std::uint32_t get32()
Definition: Serializer.cpp:378
ripple::InboundLedger::mReceivedData
std::vector< PeerDataPairType > mReceivedData
Definition: InboundLedger.h:206
ripple::ledgerBecomeAggressiveThreshold
@ ledgerBecomeAggressiveThreshold
Definition: InboundLedger.cpp:56
ripple::SHAMapSyncFilter
Definition: SHAMapSyncFilter.h:30
std::unique_ptr
STL class.
std::stable_partition
T stable_partition(T... args)
ripple::InboundLedger::Reason::SHARD
@ SHARD
ripple::InboundLedger::onTimer
void onTimer(bool progress, ScopedLockType &peerSetLock) override
Called with a lock by the PeerSet when the timer expires.
Definition: InboundLedger.cpp:431
ripple::InboundLedger::mReceivedDataLock
std::mutex mReceivedDataLock
Definition: InboundLedger.h:205
ripple::InboundLedger::checkLocal
bool checkLocal()
Definition: InboundLedger.cpp:198
ripple::TransactionStateSF
Definition: TransactionStateSF.h:31
ripple::ledgerTimeoutRetriesMax
@ ledgerTimeoutRetriesMax
Definition: InboundLedger.cpp:52
ripple::PeerSet::setTimer
void setTimer()
Schedule a call to queueJob() after mTimerInterval.
Definition: PeerSet.cpp:87
ripple::InboundLedger::mHaveTransactions
bool mHaveTransactions
Definition: InboundLedger.h:194
ripple::InboundLedger::init
void init(ScopedLockType &collectionLock)
Definition: InboundLedger.cpp:96
ripple::LedgerInfo::accountHash
uint256 accountHash
Definition: ReadView.h:102
std::exception::what
T what(T... args)
ripple::PeerSet::mLock
std::recursive_mutex mLock
Definition: PeerSet.h:108
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::LedgerInfo::parentCloseTime
NetClock::time_point parentCloseTime
Definition: ReadView.h:93