rippled
InboundLedger.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedger.h>
21 #include <ripple/shamap/SHAMapNodeID.h>
22 #include <ripple/app/ledger/AccountStateSF.h>
23 #include <ripple/app/ledger/InboundLedgers.h>
24 #include <ripple/app/ledger/LedgerMaster.h>
25 #include <ripple/app/ledger/TransactionStateSF.h>
26 #include <ripple/app/main/Application.h>
27 #include <ripple/app/misc/NetworkOPs.h>
28 #include <ripple/basics/Log.h>
29 #include <ripple/core/JobQueue.h>
30 #include <ripple/overlay/Overlay.h>
31 #include <ripple/resource/Fees.h>
32 #include <ripple/protocol/HashPrefix.h>
33 #include <ripple/protocol/jss.h>
34 #include <ripple/nodestore/DatabaseShard.h>
35 
36 #include <algorithm>
37 
38 namespace ripple {
39 
40 using namespace std::chrono_literals;
41 
42 enum
43 {
44  // Number of peers to start with
46 
47  // Number of peers to add on a timeout
49 
50  // how many timeouts before we giveup
52 
53  // how many timeouts before we get aggressive
55 
56  // Number of nodes to find initially
58 
59  // Number of nodes to request for a reply
61 
62  // Number of nodes to request blindly
63  ,reqNodes = 8
64 };
65 
66 // millisecond for each ledger timeout
67 auto constexpr ledgerAcquireTimeout = 2500ms;
68 
70  std::uint32_t seq, Reason reason, clock_type& clock)
71  : PeerSet (app, hash, ledgerAcquireTimeout, clock,
72  app.journal("InboundLedger"))
73  , mHaveHeader (false)
74  , mHaveState (false)
75  , mHaveTransactions (false)
76  , mSignaled (false)
77  , mByHash (true)
78  , mSeq (seq)
79  , mReason (reason)
80  , mReceiveDispatched (false)
81 {
82  JLOG (m_journal.trace()) << "Acquiring ledger " << mHash;
83 }
84 
85 void
87 {
88  ScopedLockType sl (mLock);
89  collectionLock.unlock();
90  tryDB(app_.family());
91  if (mFailed)
92  return;
93  if (! mComplete)
94  {
95  auto shardStore = app_.getShardStore();
96  if (mReason == Reason::SHARD)
97  {
98  if (! shardStore || ! app_.shardFamily())
99  {
100  JLOG(m_journal.error()) <<
101  "Acquiring shard with no shard store available";
102  mFailed = true;
103  return;
104  }
105  mHaveHeader = false;
106  mHaveTransactions = false;
107  mHaveState = false;
108  mLedger.reset();
109  tryDB(*app_.shardFamily());
110  if (mFailed)
111  return;
112  }
113  else if (shardStore && mSeq >= shardStore->earliestLedgerSeq())
114  {
115  if (auto l = shardStore->fetchLedger(mHash, mSeq))
116  {
117  mHaveHeader = true;
118  mHaveTransactions = true;
119  mHaveState = true;
120  mComplete = true;
121  mLedger = std::move(l);
122  }
123  }
124  }
125  if (! mComplete)
126  {
127  addPeers();
128  execute();
129  return;
130  }
131 
132  JLOG (m_journal.debug()) <<
133  "Acquiring ledger we already have in " <<
134  " local store. " << mHash;
135  mLedger->setImmutable(app_.config());
136 
138  return;
139 
141 
142  // Check if this could be a newer fully-validated ledger
143  if (mReason == Reason::CONSENSUS)
145 }
146 
148 {
150  {
151  JLOG (m_journal.debug()) <<
152  "Deferring InboundLedger timer due to load";
153  setTimer ();
154  return;
155  }
156 
157  app_.getJobQueue ().addJob (
158  jtLEDGER_DATA, "InboundLedger",
159  [ptr = shared_from_this()] (Job&)
160  {
161  ptr->invokeOnTimer ();
162  });
163 }
165 {
166  ScopedLockType sl (mLock);
167 
168  // If we didn't know the sequence number, but now do, save it
169  if ((seq != 0) && (mSeq == 0))
170  mSeq = seq;
171 
172  // Prevent this from being swept
173  touch ();
174 }
175 
177 {
178  ScopedLockType sl (mLock);
179  if (! isDone())
180  {
181  if (mLedger)
182  tryDB(mLedger->stateMap().family());
183  else if(mReason == Reason::SHARD)
184  tryDB(*app_.shardFamily());
185  else
186  tryDB(app_.family());
187  if (mFailed || mComplete)
188  {
189  done();
190  return true;
191  }
192  }
193  return false;
194 }
195 
197 {
198  // Save any received AS data not processed. It could be useful
199  // for populating a different ledger
200  for (auto& entry : mReceivedData)
201  {
202  if (entry.second->type () == protocol::liAS_NODE)
203  app_.getInboundLedgers().gotStaleData(entry.second);
204  }
205  if (! isDone())
206  {
207  JLOG (m_journal.debug()) <<
208  "Acquire " << mHash << " abort " <<
209  ((getTimeouts () == 0) ? std::string() :
210  (std::string ("timeouts:") +
211  std::to_string (getTimeouts ()) + " ")) <<
212  mStats.get ();
213  }
214 }
215 
218  int max, SHAMapSyncFilter* filter) const
219 {
221 
222  if (mLedger->info().txHash.isNonZero ())
223  {
224  if (mLedger->txMap().getHash().isZero ())
225  ret.push_back (mLedger->info().txHash);
226  else
227  ret = mLedger->txMap().getNeededHashes (max, filter);
228  }
229 
230  return ret;
231 }
232 
235  int max, SHAMapSyncFilter* filter) const
236 {
238 
239  if (mLedger->info().accountHash.isNonZero ())
240  {
241  if (mLedger->stateMap().getHash().isZero ())
242  ret.push_back (mLedger->info().accountHash);
243  else
244  ret = mLedger->stateMap().getNeededHashes (max, filter);
245  }
246 
247  return ret;
248 }
249 
252  Slice data,
253  bool hasPrefix)
254 {
255  SerialIter sit (data.data(), data.size());
256 
257  if (hasPrefix)
258  sit.get32 ();
259 
260  LedgerInfo info;
261 
262  info.seq = sit.get32 ();
263  info.drops = sit.get64 ();
264  info.parentHash = sit.get256 ();
265  info.txHash = sit.get256 ();
266  info.accountHash = sit.get256 ();
270  info.closeFlags = sit.get8 ();
271 
272  return info;
273 }
274 
275 // See how much of the ledger data is stored locally
276 // Data found in a fetch pack will be stored
277 void
279 {
280  if (! mHaveHeader)
281  {
282  auto makeLedger = [&, this](Blob const& data)
283  {
284  JLOG(m_journal.trace()) <<
285  "Ledger header found in fetch pack";
286  mLedger = std::make_shared<Ledger>(
287  deserializeHeader(makeSlice(data), true),
288  app_.config(), f);
289  if (mLedger->info().hash != mHash ||
290  (mSeq != 0 && mSeq != mLedger->info().seq))
291  {
292  // We know for a fact the ledger can never be acquired
293  JLOG(m_journal.warn()) <<
294  "hash " << mHash <<
295  " seq " << std::to_string(mSeq) <<
296  " cannot be a ledger";
297  mLedger.reset();
298  mFailed = true;
299  }
300  };
301 
302  // Try to fetch the ledger header from the DB
303  auto node = f.db().fetch(mHash, mSeq);
304  if (! node)
305  {
306  auto data = app_.getLedgerMaster().getFetchPack(mHash);
307  if (! data)
308  return;
309  JLOG (m_journal.trace()) <<
310  "Ledger header found in fetch pack";
311  makeLedger(*data);
312  if (mLedger)
313  f.db().store(hotLEDGER, std::move(*data),
314  mHash, mLedger->info().seq);
315  }
316  else
317  {
318  JLOG (m_journal.trace()) <<
319  "Ledger header found in node store";
320  makeLedger(node->getData());
321  }
322  if (mFailed)
323  return;
324  if (mSeq == 0)
325  mSeq = mLedger->info().seq;
326  mLedger->stateMap().setLedgerSeq(mSeq);
327  mLedger->txMap().setLedgerSeq(mSeq);
328  mHaveHeader = true;
329  }
330 
331  if (! mHaveTransactions)
332  {
333  if (mLedger->info().txHash.isZero())
334  {
335  JLOG (m_journal.trace()) << "No TXNs to fetch";
336  mHaveTransactions = true;
337  }
338  else
339  {
340  TransactionStateSF filter(mLedger->txMap().family().db(),
342  if (mLedger->txMap().fetchRoot(
343  SHAMapHash{mLedger->info().txHash}, &filter))
344  {
345  if (neededTxHashes(1, &filter).empty())
346  {
347  JLOG(m_journal.trace()) <<
348  "Had full txn map locally";
349  mHaveTransactions = true;
350  }
351  }
352  }
353  }
354 
355  if (! mHaveState)
356  {
357  if (mLedger->info().accountHash.isZero())
358  {
359  JLOG (m_journal.fatal()) <<
360  "We are acquiring a ledger with a zero account hash";
361  mFailed = true;
362  return;
363  }
364  AccountStateSF filter(mLedger->stateMap().family().db(),
366  if (mLedger->stateMap().fetchRoot(
367  SHAMapHash{mLedger->info().accountHash}, &filter))
368  {
369  if (neededStateHashes(1, &filter).empty())
370  {
371  JLOG(m_journal.trace()) <<
372  "Had full AS map locally";
373  mHaveState = true;
374  }
375  }
376  }
377 
379  {
380  JLOG(m_journal.debug()) <<
381  "Had everything locally";
382  mComplete = true;
383  mLedger->setImmutable(app_.config());
384  }
385 }
386 
389 void InboundLedger::onTimer (bool wasProgress, ScopedLockType&)
390 {
391  mRecentNodes.clear ();
392 
393  if (isDone())
394  {
395  JLOG (m_journal.info()) <<
396  "Already done " << mHash;
397  return;
398  }
399 
401  {
402  if (mSeq != 0)
403  {
404  JLOG (m_journal.warn()) <<
405  getTimeouts() << " timeouts for ledger " << mSeq;
406  }
407  else
408  {
409  JLOG (m_journal.warn()) <<
410  getTimeouts() << " timeouts for ledger " << mHash;
411  }
412  setFailed ();
413  done ();
414  return;
415  }
416 
417  if (!wasProgress)
418  {
419  checkLocal();
420 
421  mByHash = true;
422 
423  std::size_t pc = getPeerCount ();
424  JLOG (m_journal.debug()) <<
425  "No progress(" << pc <<
426  ") for ledger " << mHash;
427 
428  // addPeers triggers if the reason is not HISTORY
429  // So if the reason IS HISTORY, need to trigger after we add
430  // otherwise, we need to trigger before we add
431  // so each peer gets triggered once
432  if (mReason != Reason::HISTORY)
433  trigger (nullptr, TriggerReason::timeout);
434  addPeers ();
435  if (mReason == Reason::HISTORY)
436  trigger (nullptr, TriggerReason::timeout);
437  }
438 }
439 
442 {
443  app_.overlay().selectPeers (*this,
446 }
447 
449 {
450  return std::dynamic_pointer_cast<PeerSet> (shared_from_this ());
451 }
452 
454 {
455  if (mSignaled)
456  return;
457 
458  mSignaled = true;
459  touch ();
460 
461  JLOG (m_journal.debug()) <<
462  "Acquire " << mHash <<
463  (mFailed ? " fail " : " ") <<
464  ((getTimeouts () == 0) ? std::string() :
465  (std::string ("timeouts:") +
466  std::to_string (getTimeouts ()) + " ")) <<
467  mStats.get ();
468 
469  assert (mComplete || mFailed);
470 
471  if (mComplete && ! mFailed && mLedger)
472  {
473  mLedger->setImmutable (app_.config());
474  switch (mReason)
475  {
476  case Reason::SHARD:
478  [[fallthrough]];
479  case Reason::HISTORY:
481  break;
482  default:
484  break;
485  }
486  }
487 
488  // We hold the PeerSet lock, so must dispatch
489  app_.getJobQueue ().addJob (
490  jtLEDGER_DATA, "AcquisitionDone",
491  [self = shared_from_this()](Job&)
492  {
493  if (self->mComplete && !self->mFailed)
494  {
495  self->app().getLedgerMaster().checkAccept(
496  self->getLedger());
497  self->app().getLedgerMaster().tryAdvance();
498  }
499  else
500  self->app().getInboundLedgers().logFailure (
501  self->getHash(), self->getSeq());
502  });
503 }
504 
508 {
509  ScopedLockType sl (mLock);
510 
511  if (isDone ())
512  {
513  JLOG (m_journal.debug()) <<
514  "Trigger on ledger: " << mHash <<
515  (mComplete ? " completed" : "") <<
516  (mFailed ? " failed" : "");
517  return;
518  }
519 
520  if (auto stream = m_journal.trace())
521  {
522  if (peer)
523  stream <<
524  "Trigger acquiring ledger " << mHash << " from " << peer;
525  else
526  stream <<
527  "Trigger acquiring ledger " << mHash;
528 
529  if (mComplete || mFailed)
530  stream <<
531  "complete=" << mComplete << " failed=" << mFailed;
532  else
533  stream <<
534  "header=" << mHaveHeader << " tx=" << mHaveTransactions <<
535  " as=" << mHaveState;
536  }
537 
538  if (! mHaveHeader)
539  {
541  *app_.shardFamily() : app_.family());
542  if (mFailed)
543  {
544  JLOG (m_journal.warn()) <<
545  " failed local for " << mHash;
546  return;
547  }
548  }
549 
550  protocol::TMGetLedger tmGL;
551  tmGL.set_ledgerhash (mHash.begin (), mHash.size ());
552 
553  if (getTimeouts () != 0)
554  { // Be more aggressive if we've timed out at least once
555  tmGL.set_querytype (protocol::qtINDIRECT);
556 
557  if (! isProgress () && ! mFailed && mByHash &&
559  {
560  auto need = getNeededHashes ();
561 
562  if (!need.empty ())
563  {
564  protocol::TMGetObjectByHash tmBH;
565  bool typeSet = false;
566  tmBH.set_query (true);
567  tmBH.set_ledgerhash (mHash.begin (), mHash.size ());
568  for (auto const& p : need)
569  {
570  JLOG (m_journal.warn()) <<
571  "Want: " << p.second;
572 
573  if (!typeSet)
574  {
575  tmBH.set_type (p.first);
576  typeSet = true;
577  }
578 
579  if (p.first == tmBH.type ())
580  {
581  protocol::TMIndexedObject* io = tmBH.add_objects ();
582  io->set_hash (p.second.begin (), p.second.size ());
583  if (mSeq != 0)
584  io->set_ledgerseq(mSeq);
585  }
586  }
587 
588  auto packet = std::make_shared <Message> (
589  tmBH, protocol::mtGET_OBJECTS);
590 
591  for (auto id : mPeers)
592  {
593  if (auto p = app_.overlay ().findPeerByShortID (id))
594  {
595  mByHash = false;
596  p->send (packet);
597  }
598  }
599  }
600  else
601  {
602  JLOG (m_journal.info()) <<
603  "getNeededHashes says acquire is complete";
604  mHaveHeader = true;
605  mHaveTransactions = true;
606  mHaveState = true;
607  mComplete = true;
608  }
609  }
610  }
611 
612  // We can't do much without the header data because we don't know the
613  // state or transaction root hashes.
614  if (!mHaveHeader && !mFailed)
615  {
616  tmGL.set_itype (protocol::liBASE);
617  if (mSeq != 0)
618  tmGL.set_ledgerseq (mSeq);
619  JLOG (m_journal.trace()) <<
620  "Sending header request to " <<
621  (peer ? "selected peer" : "all peers");
622  sendRequest (tmGL, peer);
623  return;
624  }
625 
626  if (mLedger)
627  tmGL.set_ledgerseq (mLedger->info().seq);
628 
629  if (reason != TriggerReason::reply)
630  {
631  // If we're querying blind, don't query deep
632  tmGL.set_querydepth (0);
633  }
634  else if (peer && peer->isHighLatency ())
635  {
636  // If the peer has high latency, query extra deep
637  tmGL.set_querydepth (2);
638  }
639  else
640  tmGL.set_querydepth (1);
641 
642  // Get the state data first because it's the most likely to be useful
643  // if we wind up abandoning this fetch.
644  if (mHaveHeader && !mHaveState && !mFailed)
645  {
646  assert (mLedger);
647 
648  if (!mLedger->stateMap().isValid ())
649  {
650  mFailed = true;
651  }
652  else if (mLedger->stateMap().getHash ().isZero ())
653  {
654  // we need the root node
655  tmGL.set_itype (protocol::liAS_NODE);
656  *tmGL.add_nodeids () = SHAMapNodeID ().getRawString ();
657  JLOG (m_journal.trace()) <<
658  "Sending AS root request to " <<
659  (peer ? "selected peer" : "all peers");
660  sendRequest (tmGL, peer);
661  return;
662  }
663  else
664  {
665  AccountStateSF filter(mLedger->stateMap().family().db(),
667 
668  // Release the lock while we process the large state map
669  sl.unlock();
670  auto nodes = mLedger->stateMap().getMissingNodes (
671  missingNodesFind, &filter);
672  sl.lock();
673 
674  // Make sure nothing happened while we released the lock
675  if (!mFailed && !mComplete && !mHaveState)
676  {
677  if (nodes.empty ())
678  {
679  if (!mLedger->stateMap().isValid ())
680  mFailed = true;
681  else
682  {
683  mHaveState = true;
684 
685  if (mHaveTransactions)
686  mComplete = true;
687  }
688  }
689  else
690  {
691  filterNodes (nodes, reason);
692 
693  if (!nodes.empty ())
694  {
695  tmGL.set_itype (protocol::liAS_NODE);
696  for (auto const& id : nodes)
697  {
698  * (tmGL.add_nodeids ()) = id.first.getRawString ();
699  }
700 
701  JLOG (m_journal.trace()) <<
702  "Sending AS node request (" <<
703  nodes.size () << ") to " <<
704  (peer ? "selected peer" : "all peers");
705  sendRequest (tmGL, peer);
706  return;
707  }
708  else
709  {
710  JLOG (m_journal.trace()) <<
711  "All AS nodes filtered";
712  }
713  }
714  }
715  }
716  }
717 
719  {
720  assert (mLedger);
721 
722  if (!mLedger->txMap().isValid ())
723  {
724  mFailed = true;
725  }
726  else if (mLedger->txMap().getHash ().isZero ())
727  {
728  // we need the root node
729  tmGL.set_itype (protocol::liTX_NODE);
730  * (tmGL.add_nodeids ()) = SHAMapNodeID ().getRawString ();
731  JLOG (m_journal.trace()) <<
732  "Sending TX root request to " << (
733  peer ? "selected peer" : "all peers");
734  sendRequest (tmGL, peer);
735  return;
736  }
737  else
738  {
739  TransactionStateSF filter(mLedger->txMap().family().db(),
741 
742  auto nodes = mLedger->txMap().getMissingNodes (
743  missingNodesFind, &filter);
744 
745  if (nodes.empty ())
746  {
747  if (!mLedger->txMap().isValid ())
748  mFailed = true;
749  else
750  {
751  mHaveTransactions = true;
752 
753  if (mHaveState)
754  mComplete = true;
755  }
756  }
757  else
758  {
759  filterNodes (nodes, reason);
760 
761  if (!nodes.empty ())
762  {
763  tmGL.set_itype (protocol::liTX_NODE);
764  for (auto const& n : nodes)
765  {
766  * (tmGL.add_nodeids ()) = n.first.getRawString ();
767  }
768  JLOG (m_journal.trace()) <<
769  "Sending TX node request (" <<
770  nodes.size () << ") to " <<
771  (peer ? "selected peer" : "all peers");
772  sendRequest (tmGL, peer);
773  return;
774  }
775  else
776  {
777  JLOG (m_journal.trace()) <<
778  "All TX nodes filtered";
779  }
780  }
781  }
782  }
783 
784  if (mComplete || mFailed)
785  {
786  JLOG (m_journal.debug()) <<
787  "Done:" << (mComplete ? " complete" : "") <<
788  (mFailed ? " failed " : " ") <<
789  mLedger->info().seq;
790  sl.unlock ();
791  done ();
792  }
793 }
794 
797  TriggerReason reason)
798 {
799  // Sort nodes so that the ones we haven't recently
800  // requested come before the ones we have.
801  auto dup = std::stable_partition (
802  nodes.begin(), nodes.end(),
803  [this](auto const& item)
804  {
805  return mRecentNodes.count (item.second) == 0;
806  });
807 
808  // If everything is a duplicate we don't want to send
809  // any query at all except on a timeout where we need
810  // to query everyone:
811  if (dup == nodes.begin ())
812  {
813  JLOG (m_journal.trace()) <<
814  "filterNodes: all duplicates";
815 
816  if (reason != TriggerReason::timeout)
817  {
818  nodes.clear ();
819  return;
820  }
821  }
822  else
823  {
824  JLOG (m_journal.trace()) <<
825  "filterNodes: pruning duplicates";
826 
827  nodes.erase (dup, nodes.end());
828  }
829 
830  std::size_t const limit = (reason == TriggerReason::reply)
831  ? reqNodesReply
832  : reqNodes;
833 
834  if (nodes.size () > limit)
835  nodes.resize (limit);
836 
837  for (auto const& n : nodes)
838  mRecentNodes.insert (n.second);
839 }
840 
844 // data must not have hash prefix
846 {
847  // Return value: true=normal, false=bad data
848  JLOG (m_journal.trace()) <<
849  "got header acquiring ledger " << mHash;
850 
851  if (mComplete || mFailed || mHaveHeader)
852  return true;
853 
854  auto* f = mReason == Reason::SHARD ?
855  app_.shardFamily() : &app_.family();
856  mLedger = std::make_shared<Ledger>(deserializeHeader(
857  makeSlice(data), false), app_.config(), *f);
858  if (mLedger->info().hash != mHash ||
859  (mSeq != 0 && mSeq != mLedger->info().seq))
860  {
861  JLOG (m_journal.warn()) <<
862  "Acquire hash mismatch: " << mLedger->info().hash <<
863  "!=" << mHash;
864  mLedger.reset ();
865  return false;
866  }
867  if (mSeq == 0)
868  mSeq = mLedger->info().seq;
869  mLedger->stateMap().setLedgerSeq(mSeq);
870  mLedger->txMap().setLedgerSeq(mSeq);
871  mHaveHeader = true;
872 
873  Serializer s (data.size () + 4);
875  s.addRaw (data.data(), data.size());
876  f->db().store(hotLEDGER, std::move (s.modData ()), mHash, mSeq);
877 
878  if (mLedger->info().txHash.isZero ())
879  mHaveTransactions = true;
880 
881  if (mLedger->info().accountHash.isZero ())
882  mHaveState = true;
883 
884  mLedger->txMap().setSynching ();
885  mLedger->stateMap().setSynching ();
886 
887  return true;
888 }
889 
894  const std::vector< Blob >& data, SHAMapAddNode& san)
895 {
896  if (!mHaveHeader)
897  {
898  JLOG (m_journal.warn()) <<
899  "TX node without header";
900  san.incInvalid();
901  return false;
902  }
903 
904  if (mHaveTransactions || mFailed)
905  {
906  san.incDuplicate();
907  return true;
908  }
909 
910  auto nodeIDit = nodeIDs.cbegin ();
911  auto nodeDatait = data.begin ();
912  TransactionStateSF filter(mLedger->txMap().family().db(),
914 
915  while (nodeIDit != nodeIDs.cend ())
916  {
917  if (nodeIDit->isRoot ())
918  {
919  san += mLedger->txMap().addRootNode (
920  SHAMapHash{mLedger->info().txHash},
921  makeSlice(*nodeDatait), snfWIRE, &filter);
922  if (!san.isGood())
923  return false;
924  }
925  else
926  {
927  san += mLedger->txMap().addKnownNode (
928  *nodeIDit, makeSlice(*nodeDatait), &filter);
929  if (!san.isGood())
930  return false;
931  }
932 
933  ++nodeIDit;
934  ++nodeDatait;
935  }
936 
937  if (!mLedger->txMap().isSynching ())
938  {
939  mHaveTransactions = true;
940 
941  if (mHaveState)
942  {
943  mComplete = true;
944  done ();
945  }
946  }
947 
948  return true;
949 }
950 
955  const std::vector< Blob >& data, SHAMapAddNode& san)
956 {
957  JLOG (m_journal.trace()) <<
958  "got ASdata (" << nodeIDs.size () <<
959  ") acquiring ledger " << mHash;
960  if (nodeIDs.size () == 1)
961  {
962  JLOG(m_journal.trace()) <<
963  "got AS node: " << nodeIDs.front ();
964  }
965 
966  ScopedLockType sl (mLock);
967 
968  if (!mHaveHeader)
969  {
970  JLOG (m_journal.warn()) <<
971  "Don't have ledger header";
972  san.incInvalid();
973  return false;
974  }
975 
976  if (mHaveState || mFailed)
977  {
978  san.incDuplicate();
979  return true;
980  }
981 
982  auto nodeIDit = nodeIDs.cbegin ();
983  auto nodeDatait = data.begin ();
984  AccountStateSF filter(mLedger->stateMap().family().db(),
986 
987  while (nodeIDit != nodeIDs.cend ())
988  {
989  if (nodeIDit->isRoot ())
990  {
991  san += mLedger->stateMap().addRootNode (
992  SHAMapHash{mLedger->info().accountHash},
993  makeSlice(*nodeDatait), snfWIRE, &filter);
994  if (!san.isGood ())
995  {
996  JLOG (m_journal.warn()) <<
997  "Bad ledger header";
998  return false;
999  }
1000  }
1001  else
1002  {
1003  san += mLedger->stateMap().addKnownNode (
1004  *nodeIDit, makeSlice(*nodeDatait), &filter);
1005  if (!san.isGood ())
1006  {
1007  JLOG (m_journal.warn()) <<
1008  "Unable to add AS node";
1009  return false;
1010  }
1011  }
1012 
1013  ++nodeIDit;
1014  ++nodeDatait;
1015  }
1016 
1017  if (!mLedger->stateMap().isSynching ())
1018  {
1019  mHaveState = true;
1020 
1021  if (mHaveTransactions)
1022  {
1023  mComplete = true;
1024  done ();
1025  }
1026  }
1027 
1028  return true;
1029 }
1030 
1035 {
1036  if (mFailed || mHaveState)
1037  {
1038  san.incDuplicate();
1039  return true;
1040  }
1041 
1042  if (!mHaveHeader)
1043  {
1044  assert(false);
1045  return false;
1046  }
1047 
1048  AccountStateSF filter(mLedger->stateMap().family().db(),
1049  app_.getLedgerMaster());
1050  san += mLedger->stateMap().addRootNode (
1051  SHAMapHash{mLedger->info().accountHash}, data, snfWIRE, &filter);
1052  return san.isGood();
1053 }
1054 
1059 {
1060  if (mFailed || mHaveTransactions)
1061  {
1062  san.incDuplicate();
1063  return true;
1064  }
1065 
1066  if (!mHaveHeader)
1067  {
1068  assert(false);
1069  return false;
1070  }
1071 
1072  TransactionStateSF filter(mLedger->txMap().family().db(),
1073  app_.getLedgerMaster());
1074  san += mLedger->txMap().addRootNode (
1075  SHAMapHash{mLedger->info().txHash}, data, snfWIRE, &filter);
1076  return san.isGood();
1077 }
1078 
1081 {
1083 
1084  if (!mHaveHeader)
1085  {
1086  ret.push_back (std::make_pair (
1087  protocol::TMGetObjectByHash::otLEDGER, mHash));
1088  return ret;
1089  }
1090 
1091  if (!mHaveState)
1092  {
1093  AccountStateSF filter(mLedger->stateMap().family().db(),
1094  app_.getLedgerMaster());
1095  for (auto const& h : neededStateHashes (4, &filter))
1096  {
1097  ret.push_back (std::make_pair (
1098  protocol::TMGetObjectByHash::otSTATE_NODE, h));
1099  }
1100  }
1101 
1102  if (!mHaveTransactions)
1103  {
1104  TransactionStateSF filter(mLedger->txMap().family().db(),
1105  app_.getLedgerMaster());
1106  for (auto const& h : neededTxHashes (4, &filter))
1107  {
1108  ret.push_back (std::make_pair (
1109  protocol::TMGetObjectByHash::otTRANSACTION_NODE, h));
1110  }
1111  }
1112 
1113  return ret;
1114 }
1115 
1119 bool
1122 {
1124 
1125  if (isDone ())
1126  return false;
1127 
1128  mReceivedData.emplace_back (peer, data);
1129 
1130  if (mReceiveDispatched)
1131  return false;
1132 
1133  mReceiveDispatched = true;
1134  return true;
1135 }
1136 
1140 // VFALCO NOTE, it is not necessary to pass the entire Peer,
1141 // we can get away with just a Resource::Consumer endpoint.
1142 //
1143 // TODO Change peer to Consumer
1144 //
1146  protocol::TMLedgerData& packet)
1147 {
1148  ScopedLockType sl (mLock);
1149 
1150  if (packet.type () == protocol::liBASE)
1151  {
1152  if (packet.nodes_size () < 1)
1153  {
1154  JLOG (m_journal.warn()) <<
1155  "Got empty header data";
1156  peer->charge (Resource::feeInvalidRequest);
1157  return -1;
1158  }
1159 
1160  SHAMapAddNode san;
1161 
1162  if (!mHaveHeader)
1163  {
1164  if (takeHeader (packet.nodes (0).nodedata ()))
1165  san.incUseful ();
1166  else
1167  {
1168  JLOG (m_journal.warn()) <<
1169  "Got invalid header data";
1170  peer->charge (Resource::feeInvalidRequest);
1171  return -1;
1172  }
1173  }
1174 
1175 
1176  if (!mHaveState && (packet.nodes ().size () > 1) &&
1177  !takeAsRootNode (makeSlice(packet.nodes(1).nodedata ()), san))
1178  {
1179  JLOG (m_journal.warn()) <<
1180  "Included AS root invalid";
1181  }
1182 
1183  if (!mHaveTransactions && (packet.nodes ().size () > 2) &&
1184  !takeTxRootNode (makeSlice(packet.nodes(2).nodedata ()), san))
1185  {
1186  JLOG (m_journal.warn()) <<
1187  "Included TX root invalid";
1188  }
1189 
1190  if (san.isUseful ())
1191  progress ();
1192 
1193  mStats += san;
1194  return san.getGood ();
1195  }
1196 
1197  if ((packet.type () == protocol::liTX_NODE) || (
1198  packet.type () == protocol::liAS_NODE))
1199  {
1200  if (packet.nodes ().size () == 0)
1201  {
1202  JLOG (m_journal.info()) <<
1203  "Got response with no nodes";
1204  peer->charge (Resource::feeInvalidRequest);
1205  return -1;
1206  }
1207 
1208  std::vector<SHAMapNodeID> nodeIDs;
1209  nodeIDs.reserve(packet.nodes().size());
1210  std::vector< Blob > nodeData;
1211  nodeData.reserve(packet.nodes().size());
1212 
1213  for (int i = 0; i < packet.nodes ().size (); ++i)
1214  {
1215  const protocol::TMLedgerNode& node = packet.nodes (i);
1216 
1217  if (!node.has_nodeid () || !node.has_nodedata ())
1218  {
1219  JLOG (m_journal.warn()) <<
1220  "Got bad node";
1221  peer->charge (Resource::feeInvalidRequest);
1222  return -1;
1223  }
1224 
1225  nodeIDs.push_back (SHAMapNodeID (node.nodeid ().data (),
1226  node.nodeid ().size ()));
1227  nodeData.push_back (Blob (node.nodedata ().begin (),
1228  node.nodedata ().end ()));
1229  }
1230 
1231  SHAMapAddNode san;
1232 
1233  if (packet.type () == protocol::liTX_NODE)
1234  {
1235  takeTxNode (nodeIDs, nodeData, san);
1236  JLOG (m_journal.debug()) <<
1237  "Ledger TX node stats: " << san.get();
1238  }
1239  else
1240  {
1241  takeAsNode (nodeIDs, nodeData, san);
1242  JLOG (m_journal.debug()) <<
1243  "Ledger AS node stats: " << san.get();
1244  }
1245 
1246  if (san.isUseful ())
1247  progress ();
1248 
1249  mStats += san;
1250  return san.getGood ();
1251  }
1252 
1253  return -1;
1254 }
1255 
1260 {
1261  std::shared_ptr<Peer> chosenPeer;
1262  int chosenPeerCount = -1;
1263 
1265 
1266  for (;;)
1267  {
1268  data.clear();
1269  {
1271 
1272  if (mReceivedData.empty ())
1273  {
1274  mReceiveDispatched = false;
1275  break;
1276  }
1277 
1278  data.swap(mReceivedData);
1279  }
1280 
1281  // Select the peer that gives us the most nodes that are useful,
1282  // breaking ties in favor of the peer that responded first.
1283  for (auto& entry : data)
1284  {
1285  if (auto peer = entry.first.lock())
1286  {
1287  int count = processData (peer, *(entry.second));
1288  if (count > chosenPeerCount)
1289  {
1290  chosenPeerCount = count;
1291  chosenPeer = std::move (peer);
1292  }
1293  }
1294  }
1295  }
1296 
1297  if (chosenPeer)
1298  trigger (chosenPeer, TriggerReason::reply);
1299 }
1300 
1302 {
1304 
1305  ScopedLockType sl (mLock);
1306 
1307  ret[jss::hash] = to_string (mHash);
1308 
1309  if (mComplete)
1310  ret[jss::complete] = true;
1311 
1312  if (mFailed)
1313  ret[jss::failed] = true;
1314 
1315  if (!mComplete && !mFailed)
1316  ret[jss::peers] = static_cast<int>(mPeers.size());
1317 
1318  ret[jss::have_header] = mHaveHeader;
1319 
1320  if (mHaveHeader)
1321  {
1322  ret[jss::have_state] = mHaveState;
1323  ret[jss::have_transactions] = mHaveTransactions;
1324  }
1325 
1326  ret[jss::timeouts] = getTimeouts ();
1327 
1328  if (mHaveHeader && !mHaveState)
1329  {
1331  for (auto const& h : neededStateHashes (16, nullptr))
1332  {
1333  hv.append (to_string (h));
1334  }
1335  ret[jss::needed_state_hashes] = hv;
1336  }
1337 
1339  {
1341  for (auto const& h : neededTxHashes (16, nullptr))
1342  {
1343  hv.append (to_string (h));
1344  }
1345  ret[jss::needed_transaction_hashes] = hv;
1346  }
1347 
1348  return ret;
1349 }
1350 
1351 } // ripple
ripple::InboundLedger::mRecentNodes
std::set< uint256 > mRecentNodes
Definition: InboundLedger.h:169
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:312
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:85
ripple::SHAMapAddNode
Definition: SHAMapAddNode.h:28
ripple::SHAMapAddNode::get
std::string get() const
Definition: SHAMapAddNode.h:163
ripple::InboundLedger::Reason::HISTORY
@ HISTORY
ripple::InboundLedger::getNeededHashes
std::vector< neededHash_t > getNeededHashes()
Definition: InboundLedger.cpp:1080
ripple::HashPrefix::ledgerMaster
@ ledgerMaster
ledger master data for signing
ripple::InboundLedger::mReason
const Reason mReason
Definition: InboundLedger.h:167
std::unique_lock::lock
T lock(T... args)
ripple::PeerSet::setFailed
void setFailed()
Definition: PeerSet.h:137
ripple::InboundLedger::getJson
Json::Value getJson(int)
Return a Json::objectValue.
Definition: InboundLedger.cpp:1301
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:199
ripple::InboundLedger::TriggerReason
TriggerReason
Definition: InboundLedger.h:103
ripple::reqNodes
@ reqNodes
Definition: InboundLedger.cpp:63
std::string
STL class.
ripple::InboundLedger::Reason::CONSENSUS
@ CONSENSUS
std::shared_ptr
STL class.
ripple::InboundLedger::mHaveState
bool mHaveState
Definition: InboundLedger.h:162
ripple::LedgerInfo::parentHash
uint256 parentHash
Definition: ReadView.h:98
ripple::PeerSet::getPeerCount
std::size_t getPeerCount() const
Definition: PeerSet.cpp:137
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:287
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:181
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:43
Json::arrayValue
@ arrayValue
array value (ordered list)
Definition: json_value.h:44
ripple::InboundLedger::mSignaled
bool mSignaled
Definition: InboundLedger.h:164
std::pair
std::vector::reserve
T reserve(T... args)
ripple::ledgerAcquireTimeout
constexpr auto ledgerAcquireTimeout
Definition: InboundLedger.cpp:67
std::vector
STL class.
std::vector::size
T size(T... args)
ripple::InboundLedger::update
void update(std::uint32_t seq)
Definition: InboundLedger.cpp:164
ripple::InboundLedger::InboundLedger
InboundLedger(Application &app, uint256 const &hash, std::uint32_t seq, Reason reason, clock_type &)
Definition: InboundLedger.cpp:69
ripple::PeerSet::mPeers
std::set< Peer::id_t > mPeers
Definition: PeerSet.h:171
ripple::PeerSet::mHash
uint256 mHash
Definition: PeerSet.h:159
std::chrono::duration
ripple::peerCountStart
@ peerCountStart
Definition: InboundLedger.cpp:45
ripple::InboundLedger::~InboundLedger
~InboundLedger()
Definition: InboundLedger.cpp:196
ripple::InboundLedger::mByHash
bool mByHash
Definition: InboundLedger.h:165
ripple::InboundLedger::filterNodes
void filterNodes(std::vector< std::pair< SHAMapNodeID, uint256 >> &nodes, TriggerReason reason)
Definition: InboundLedger.cpp:795
beast::Journal::warn
Stream warn() const
Definition: Journal.h:302
ripple::InboundLedger::processData
int processData(std::shared_ptr< Peer > peer, protocol::TMLedgerData &data)
Process one TMLedgerData Returns the number of useful nodes.
Definition: InboundLedger.cpp:1145
std::lock_guard
STL class.
ripple::InboundLedger::execute
void execute() override
Definition: InboundLedger.cpp:147
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerSet::mComplete
bool mComplete
Definition: PeerSet.h:162
ripple::InboundLedger::neededStateHashes
std::vector< uint256 > neededStateHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:234
ripple::Serializer::add32
int add32(std::uint32_t)
Definition: Serializer.cpp:46
ripple::PeerSet::isDone
virtual bool isDone() const
Definition: PeerSet.h:101
ripple::AccountStateSF
Definition: AccountStateSF.h:31
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:156
ripple::LedgerInfo::seq
LedgerIndex seq
Definition: ReadView.h:87
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:41
ripple::SHAMapNodeID
Definition: SHAMapNodeID.h:33
ripple::PeerSet::touch
void touch()
Definition: PeerSet.h:85
std::vector::front
T front(T... args)
ripple::Family::db
virtual NodeStore::Database & db()=0
ripple::LedgerInfo::txHash
uint256 txHash
Definition: ReadView.h:96
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:44
algorithm
ripple::jtLEDGER_DATA
@ jtLEDGER_DATA
Definition: Job.h:48
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::base_uint::size
constexpr static std::size_t size()
Definition: base_uint.h:417
std::unique_lock::unlock
T unlock(T... args)
ripple::InboundLedger::neededTxHashes
std::vector< uint256 > neededTxHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:217
std::vector::push_back
T push_back(T... args)
ripple::LedgerInfo::closeTime
NetClock::time_point closeTime
Definition: ReadView.h:118
ripple::base_uint< 256 >
ripple::InboundLedger::takeHeader
bool takeHeader(std::string const &data)
Take ledger header data Call with a lock.
Definition: InboundLedger.cpp:845
ripple::JobQueue::getJobCountTotal
int getJobCountTotal(JobType t) const
Jobs waiting plus running at this priority.
Definition: JobQueue.cpp:129
ripple::reqNodesReply
@ reqNodesReply
Definition: InboundLedger.cpp:60
ripple::InboundLedger::gotData
bool gotData(std::weak_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData > const &)
Stash a TMLedgerData received from a peer for later processing Returns 'true' if we need to dispatch.
Definition: InboundLedger.cpp:1120
ripple::NodeStore::Database::store
virtual void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t seq)=0
Store the object.
ripple::InboundLedger::takeTxNode
bool takeTxNode(const std::vector< SHAMapNodeID > &IDs, const std::vector< Blob > &data, SHAMapAddNode &)
Process TX data received from a peer Call with a lock.
Definition: InboundLedger.cpp:893
Json::Value::append
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:907
ripple::InboundLedger::mLedger
std::shared_ptr< Ledger > mLedger
Definition: InboundLedger.h:160
ripple::Application::shardFamily
virtual Family * shardFamily()=0
std::enable_shared_from_this< InboundLedger >::shared_from_this
T shared_from_this(T... args)
ripple::InboundLedger::deserializeHeader
static LedgerInfo deserializeHeader(Slice data, bool hasPrefix)
Definition: InboundLedger.cpp:251
ripple::SerialIter::get8
unsigned char get8()
Definition: Serializer.cpp:430
ripple::Application::family
virtual Family & family()=0
ripple::SHAMapAddNode::isUseful
bool isUseful() const
Definition: SHAMapAddNode.h:117
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:45
ripple::SerialIter::get256
uint256 get256()
Definition: Serializer.h:380
ripple::SerialIter::get64
std::uint64_t get64()
Definition: Serializer.cpp:475
ripple::InboundLedger::addPeers
void addPeers()
Add more peers to the set, if possible.
Definition: InboundLedger.cpp:441
ripple::peerCountAdd
@ peerCountAdd
Definition: InboundLedger.cpp:48
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::InboundLedgers::gotStaleData
virtual void gotStaleData(std::shared_ptr< protocol::TMLedgerData > packet)=0
ripple::LedgerMaster::getFetchPack
boost::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
Definition: LedgerMaster.cpp:1983
ripple::NodeStore::Database::fetch
virtual std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq)=0
Fetch an object.
ripple::Application::config
virtual Config & config()=0
ripple::InboundLedgers::onLedgerFetched
virtual void onLedgerFetched()=0
Called when a complete ledger is obtained.
ripple::SHAMapAddNode::isGood
bool isGood() const
Definition: SHAMapAddNode.h:135
std::unique_lock
STL class.
ripple::SHAMapNodeID::getRawString
std::string getRawString() const
Definition: SHAMapNodeID.cpp:93
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
ripple::LedgerInfo::closeFlags
int closeFlags
Definition: ReadView.h:109
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::InboundLedger::takeTxRootNode
bool takeTxRootNode(Slice const &data, SHAMapAddNode &)
Process AS root node received from a peer Call with a lock.
Definition: InboundLedger.cpp:1058
beast::Journal::error
Stream error() const
Definition: Journal.h:307
beast::Journal::info
Stream info() const
Definition: Journal.h:297
std::chrono::time_point
ripple::Family
Definition: Family.h:32
ripple::Job
Definition: Job.h:83
ripple::SerialIter
Definition: Serializer.h:311
ripple::InboundLedger::pmDowncast
std::weak_ptr< PeerSet > pmDowncast() override
Definition: InboundLedger.cpp:448
ripple::Overlay::selectPeers
virtual std::size_t selectPeers(PeerSet &set, std::size_t limit, std::function< bool(std::shared_ptr< Peer > const &)> score)=0
Select from active peers.
std::uint32_t
ripple::missingNodesFind
@ missingNodesFind
Definition: InboundLedger.cpp:57
ripple::InboundLedger::mReceiveDispatched
bool mReceiveDispatched
Definition: InboundLedger.h:176
ripple::InboundLedger::mHaveHeader
bool mHaveHeader
Definition: InboundLedger.h:161
beast::abstract_clock< std::chrono::steady_clock >
ripple::LedgerMaster::checkAccept
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
Definition: LedgerMaster.cpp:1000
ripple::LedgerInfo::drops
XRPAmount drops
Definition: ReadView.h:100
ripple::PeerSet::sendRequest
void sendRequest(const protocol::TMGetLedger &message)
Definition: PeerSet.cpp:121
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:43
ripple::InboundLedger::TriggerReason::timeout
@ timeout
ripple::InboundLedger::TriggerReason::reply
@ reply
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedger::mStats
SHAMapAddNode mStats
Definition: InboundLedger.h:171
ripple::PeerSet::getTimeouts
int getTimeouts() const
Returns the number of times we timed out.
Definition: PeerSet.h:72
ripple::base_uint::begin
iterator begin()
Definition: base_uint.h:106
ripple::PeerSet::app_
Application & app_
Definition: PeerSet.h:153
std::vector::cbegin
T cbegin(T... args)
ripple::InboundLedger::mSeq
std::uint32_t mSeq
Definition: InboundLedger.h:166
ripple::InboundLedger::takeAsRootNode
bool takeAsRootNode(Slice const &data, SHAMapAddNode &)
Process AS root node received from a peer Call with a lock.
Definition: InboundLedger.cpp:1034
ripple::LedgerInfo::closeTimeResolution
NetClock::duration closeTimeResolution
Definition: ReadView.h:112
ripple::PeerSet
Supports data retrieval by managing a set of peers.
Definition: PeerSet.h:48
ripple::PeerSet::mFailed
bool mFailed
Definition: PeerSet.h:163
ripple::ScoreHasLedger
Definition: Overlay.h:278
ripple::snfWIRE
@ snfWIRE
Definition: SHAMapTreeNode.h:38
ripple::NodeStore::DatabaseShard::setStored
virtual void setStored(std::shared_ptr< Ledger const > const &ledger)=0
Notifies the database that the given ledger has been fully acquired and stored.
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::InboundLedger::done
void done()
Definition: InboundLedger.cpp:453
ripple::InboundLedger::takeAsNode
bool takeAsNode(const std::vector< SHAMapNodeID > &IDs, const std::vector< Blob > &data, SHAMapAddNode &)
Process AS data received from a peer Call with a lock.
Definition: InboundLedger.cpp:954
ripple::InboundLedger::trigger
void trigger(std::shared_ptr< Peer > const &, TriggerReason)
Request more nodes, perhaps from a specific peer.
Definition: InboundLedger.cpp:507
ripple::PeerSet::m_journal
beast::Journal m_journal
Definition: PeerSet.h:154
beast::Journal::debug
Stream debug() const
Definition: Journal.h:292
ripple::SHAMapAddNode::incInvalid
void incInvalid()
Definition: SHAMapAddNode.h:75
std::size_t
ripple::SHAMapAddNode::incUseful
void incUseful()
Definition: SHAMapAddNode.h:82
ripple::hotLEDGER
@ hotLEDGER
Definition: NodeObject.h:36
std::make_pair
T make_pair(T... args)
ripple::LedgerInfo
Information about the notional ledger backing the view.
Definition: ReadView.h:79
ripple::SHAMapAddNode::getGood
int getGood() const
Definition: SHAMapAddNode.h:103
ripple::LedgerMaster::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
Definition: LedgerMaster.cpp:517
std::vector::cend
T cend(T... args)
ripple::InboundLedger::Reason
Reason
Definition: InboundLedger.h:47
ripple::PeerSet::getHash
uint256 const & getHash() const
Returns the hash of the data we want.
Definition: PeerSet.h:54
ripple::SHAMapAddNode::incDuplicate
void incDuplicate()
Definition: SHAMapAddNode.h:89
ripple::InboundLedger::runData
void runData()
Process pending TMLedgerData Query the 'best' peer.
Definition: InboundLedger.cpp:1259
ripple::SerialIter::get32
std::uint32_t get32()
Definition: Serializer.cpp:458
ripple::InboundLedger::mReceivedData
std::vector< PeerDataPairType > mReceivedData
Definition: InboundLedger.h:175
ripple::ledgerBecomeAggressiveThreshold
@ ledgerBecomeAggressiveThreshold
Definition: InboundLedger.cpp:54
ripple::SHAMapSyncFilter
Definition: SHAMapSyncFilter.h:30
std::stable_partition
T stable_partition(T... args)
ripple::InboundLedger::Reason::SHARD
@ SHARD
ripple::PeerSet::isProgress
bool isProgress()
Definition: PeerSet.h:128
ripple::InboundLedger::onTimer
void onTimer(bool progress, ScopedLockType &peerSetLock) override
Called with a lock by the PeerSet when the timer expires.
Definition: InboundLedger.cpp:389
ripple::InboundLedger::mReceivedDataLock
std::mutex mReceivedDataLock
Definition: InboundLedger.h:174
ripple::InboundLedger::checkLocal
bool checkLocal()
Definition: InboundLedger.cpp:176
ripple::TransactionStateSF
Definition: TransactionStateSF.h:31
ripple::ledgerTimeoutRetriesMax
@ ledgerTimeoutRetriesMax
Definition: InboundLedger.cpp:51
ripple::PeerSet::setTimer
void setTimer()
Definition: PeerSet.cpp:69
ripple::InboundLedger::mHaveTransactions
bool mHaveTransactions
Definition: InboundLedger.h:163
ripple::Overlay::findPeerByShortID
virtual std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id)=0
Returns the peer with the matching short id, or null.
ripple::InboundLedger::init
void init(ScopedLockType &collectionLock)
Definition: InboundLedger.cpp:86
ripple::LedgerInfo::accountHash
uint256 accountHash
Definition: ReadView.h:97
ripple::PeerSet::progress
void progress()
Called to indicate that forward progress has been made.
Definition: PeerSet.h:80
ripple::InboundLedger::tryDB
void tryDB(Family &f)
Definition: InboundLedger.cpp:278
ripple::PeerSet::mLock
std::recursive_mutex mLock
Definition: PeerSet.h:157
Json::Value
Represents a JSON value.
Definition: json_value.h:141
ripple::LedgerInfo::parentCloseTime
NetClock::time_point parentCloseTime
Definition: ReadView.h:88