rippled
InboundLedger.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2013 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/AccountStateSF.h>
21 #include <ripple/app/ledger/InboundLedger.h>
22 #include <ripple/app/ledger/InboundLedgers.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/ledger/TransactionStateSF.h>
25 #include <ripple/app/main/Application.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/basics/Log.h>
28 #include <ripple/core/JobQueue.h>
29 #include <ripple/nodestore/DatabaseShard.h>
30 #include <ripple/overlay/Overlay.h>
31 #include <ripple/protocol/HashPrefix.h>
32 #include <ripple/protocol/jss.h>
33 #include <ripple/resource/Fees.h>
34 #include <ripple/shamap/SHAMapNodeID.h>
35 
36 #include <algorithm>
37 
38 namespace ripple {
39 
40 using namespace std::chrono_literals;
41 
42 enum {
43  // Number of peers to start with
45 
46  // Number of peers to add on a timeout
47  ,
49 
50  // how many timeouts before we giveup
51  ,
53 
54  // how many timeouts before we get aggressive
55  ,
57 
58  // Number of nodes to find initially
59  ,
61 
62  // Number of nodes to request for a reply
63  ,
65 
66  // Number of nodes to request blindly
67  ,
69 };
70 
71 // millisecond for each ledger timeout
72 auto constexpr ledgerAcquireTimeout = 2500ms;
73 
75  Application& app,
76  uint256 const& hash,
77  std::uint32_t seq,
78  Reason reason,
79  clock_type& clock)
80  : PeerSet(app, hash, ledgerAcquireTimeout, app.journal("InboundLedger"))
81  , m_clock(clock)
82  , mHaveHeader(false)
83  , mHaveState(false)
84  , mHaveTransactions(false)
85  , mSignaled(false)
86  , mByHash(true)
87  , mSeq(seq)
88  , mReason(reason)
89  , mReceiveDispatched(false)
90 {
91  JLOG(m_journal.trace()) << "Acquiring ledger " << mHash;
92  touch();
93 }
94 
95 void
97 {
99  collectionLock.unlock();
100  tryDB(app_.family());
101  if (mFailed)
102  return;
103  if (!mComplete)
104  {
105  auto shardStore = app_.getShardStore();
106  if (mReason == Reason::SHARD)
107  {
108  if (!shardStore || !app_.shardFamily())
109  {
110  JLOG(m_journal.error())
111  << "Acquiring shard with no shard store available";
112  mFailed = true;
113  return;
114  }
115  mHaveHeader = false;
116  mHaveTransactions = false;
117  mHaveState = false;
118  mLedger.reset();
119  tryDB(*app_.shardFamily());
120  if (mFailed)
121  return;
122  }
123  else if (shardStore && mSeq >= shardStore->earliestLedgerSeq())
124  {
125  if (auto l = shardStore->fetchLedger(mHash, mSeq))
126  {
127  mHaveHeader = true;
128  mHaveTransactions = true;
129  mHaveState = true;
130  mComplete = true;
131  mLedger = std::move(l);
132  }
133  }
134  }
135  if (!mComplete)
136  {
137  addPeers();
138  queueJob();
139  return;
140  }
141 
142  JLOG(m_journal.debug()) << "Acquiring ledger we already have in "
143  << " local store. " << mHash;
144  mLedger->setImmutable(app_.config());
145 
147  return;
148 
150 
151  // Check if this could be a newer fully-validated ledger
152  if (mReason == Reason::CONSENSUS)
154 }
155 
158 {
159  return std::count_if(mPeers.begin(), mPeers.end(), [this](auto id) {
160  return app_.overlay().findPeerByShortID(id) != nullptr;
161  });
162 }
163 
164 void
166 {
168  {
169  JLOG(m_journal.debug()) << "Deferring InboundLedger timer due to load";
170  setTimer();
171  return;
172  }
173 
175  jtLEDGER_DATA, "InboundLedger", [ptr = shared_from_this()](Job&) {
176  ptr->invokeOnTimer();
177  });
178 }
179 
180 void
182 {
183  ScopedLockType sl(mLock);
184 
185  // If we didn't know the sequence number, but now do, save it
186  if ((seq != 0) && (mSeq == 0))
187  mSeq = seq;
188 
189  // Prevent this from being swept
190  touch();
191 }
192 
193 bool
195 {
196  ScopedLockType sl(mLock);
197  if (!isDone())
198  {
199  if (mLedger)
200  tryDB(mLedger->stateMap().family());
201  else if (mReason == Reason::SHARD)
202  tryDB(*app_.shardFamily());
203  else
204  tryDB(app_.family());
205  if (mFailed || mComplete)
206  {
207  done();
208  return true;
209  }
210  }
211  return false;
212 }
213 
215 {
216  // Save any received AS data not processed. It could be useful
217  // for populating a different ledger
218  for (auto& entry : mReceivedData)
219  {
220  if (entry.second->type() == protocol::liAS_NODE)
221  app_.getInboundLedgers().gotStaleData(entry.second);
222  }
223  if (!isDone())
224  {
225  JLOG(m_journal.debug())
226  << "Acquire " << mHash << " abort "
227  << ((mTimeouts == 0) ? std::string()
228  : (std::string("timeouts:") +
229  std::to_string(mTimeouts) + " "))
230  << mStats.get();
231  }
232 }
233 
236 {
238 
239  if (mLedger->info().txHash.isNonZero())
240  {
241  if (mLedger->txMap().getHash().isZero())
242  ret.push_back(mLedger->info().txHash);
243  else
244  ret = mLedger->txMap().getNeededHashes(max, filter);
245  }
246 
247  return ret;
248 }
249 
252 {
254 
255  if (mLedger->info().accountHash.isNonZero())
256  {
257  if (mLedger->stateMap().getHash().isZero())
258  ret.push_back(mLedger->info().accountHash);
259  else
260  ret = mLedger->stateMap().getNeededHashes(max, filter);
261  }
262 
263  return ret;
264 }
265 
268 {
269  SerialIter sit(data.data(), data.size());
270 
271  LedgerInfo info;
272 
273  info.seq = sit.get32();
274  info.drops = sit.get64();
275  info.parentHash = sit.get256();
276  info.txHash = sit.get256();
277  info.accountHash = sit.get256();
278  info.parentCloseTime =
282  info.closeFlags = sit.get8();
283 
284  return info;
285 }
286 
287 LedgerInfo
289 {
290  return deserializeHeader(data + 4);
291 }
292 
293 // See how much of the ledger data is stored locally
294 // Data found in a fetch pack will be stored
295 void
297 {
298  if (!mHaveHeader)
299  {
300  auto makeLedger = [&, this](Blob const& data) {
301  JLOG(m_journal.trace()) << "Ledger header found in fetch pack";
302  mLedger = std::make_shared<Ledger>(
304  if (mLedger->info().hash != mHash ||
305  (mSeq != 0 && mSeq != mLedger->info().seq))
306  {
307  // We know for a fact the ledger can never be acquired
308  JLOG(m_journal.warn())
309  << "hash " << mHash << " seq " << std::to_string(mSeq)
310  << " cannot be a ledger";
311  mLedger.reset();
312  mFailed = true;
313  }
314  };
315 
316  // Try to fetch the ledger header from the DB
317  auto node = f.db().fetch(mHash, mSeq);
318  if (!node)
319  {
320  auto data = app_.getLedgerMaster().getFetchPack(mHash);
321  if (!data)
322  return;
323  JLOG(m_journal.trace()) << "Ledger header found in fetch pack";
324  makeLedger(*data);
325  if (mLedger)
326  f.db().store(
327  hotLEDGER, std::move(*data), mHash, mLedger->info().seq);
328  }
329  else
330  {
331  JLOG(m_journal.trace()) << "Ledger header found in node store";
332  makeLedger(node->getData());
333  }
334  if (mFailed)
335  return;
336  if (mSeq == 0)
337  mSeq = mLedger->info().seq;
338  mLedger->stateMap().setLedgerSeq(mSeq);
339  mLedger->txMap().setLedgerSeq(mSeq);
340  mHaveHeader = true;
341  }
342 
343  if (!mHaveTransactions)
344  {
345  if (mLedger->info().txHash.isZero())
346  {
347  JLOG(m_journal.trace()) << "No TXNs to fetch";
348  mHaveTransactions = true;
349  }
350  else
351  {
352  TransactionStateSF filter(
353  mLedger->txMap().family().db(), app_.getLedgerMaster());
354  if (mLedger->txMap().fetchRoot(
355  SHAMapHash{mLedger->info().txHash}, &filter))
356  {
357  if (neededTxHashes(1, &filter).empty())
358  {
359  JLOG(m_journal.trace()) << "Had full txn map locally";
360  mHaveTransactions = true;
361  }
362  }
363  }
364  }
365 
366  if (!mHaveState)
367  {
368  if (mLedger->info().accountHash.isZero())
369  {
370  JLOG(m_journal.fatal())
371  << "We are acquiring a ledger with a zero account hash";
372  mFailed = true;
373  return;
374  }
375  AccountStateSF filter(
376  mLedger->stateMap().family().db(), app_.getLedgerMaster());
377  if (mLedger->stateMap().fetchRoot(
378  SHAMapHash{mLedger->info().accountHash}, &filter))
379  {
380  if (neededStateHashes(1, &filter).empty())
381  {
382  JLOG(m_journal.trace()) << "Had full AS map locally";
383  mHaveState = true;
384  }
385  }
386  }
387 
389  {
390  JLOG(m_journal.debug()) << "Had everything locally";
391  mComplete = true;
392  mLedger->setImmutable(app_.config());
393  }
394 }
395 
398 void
400 {
401  mRecentNodes.clear();
402 
403  if (isDone())
404  {
405  JLOG(m_journal.info()) << "Already done " << mHash;
406  return;
407  }
408 
410  {
411  if (mSeq != 0)
412  {
413  JLOG(m_journal.warn())
414  << mTimeouts << " timeouts for ledger " << mSeq;
415  }
416  else
417  {
418  JLOG(m_journal.warn())
419  << mTimeouts << " timeouts for ledger " << mHash;
420  }
421  mFailed = true;
422  done();
423  return;
424  }
425 
426  if (!wasProgress)
427  {
428  checkLocal();
429 
430  mByHash = true;
431 
432  std::size_t pc = getPeerCount();
433  JLOG(m_journal.debug())
434  << "No progress(" << pc << ") for ledger " << mHash;
435 
436  // addPeers triggers if the reason is not HISTORY
437  // So if the reason IS HISTORY, need to trigger after we add
438  // otherwise, we need to trigger before we add
439  // so each peer gets triggered once
440  if (mReason != Reason::HISTORY)
442  addPeers();
443  if (mReason == Reason::HISTORY)
445  }
446 }
447 
449 void
451 {
454  [this](auto peer) { return peer->hasLedger(mHash, mSeq); });
455 }
456 
459 {
460  return shared_from_this();
461 }
462 
463 void
465 {
466  if (mSignaled)
467  return;
468 
469  mSignaled = true;
470  touch();
471 
472  JLOG(m_journal.debug())
473  << "Acquire " << mHash << (mFailed ? " fail " : " ")
474  << ((mTimeouts == 0)
475  ? std::string()
476  : (std::string("timeouts:") + std::to_string(mTimeouts) + " "))
477  << mStats.get();
478 
479  assert(mComplete || mFailed);
480 
481  if (mComplete && !mFailed && mLedger)
482  {
483  mLedger->setImmutable(app_.config());
484  switch (mReason)
485  {
486  case Reason::SHARD:
488  [[fallthrough]];
489  case Reason::HISTORY:
491  break;
492  default:
494  break;
495  }
496  }
497 
498  // We hold the PeerSet lock, so must dispatch
500  jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()](Job&) {
501  if (self->mComplete && !self->mFailed)
502  {
503  self->app_.getLedgerMaster().checkAccept(self->getLedger());
504  self->app_.getLedgerMaster().tryAdvance();
505  }
506  else
507  self->app_.getInboundLedgers().logFailure(
508  self->mHash, self->mSeq);
509  });
510 }
511 
514 void
516 {
517  ScopedLockType sl(mLock);
518 
519  if (isDone())
520  {
521  JLOG(m_journal.debug())
522  << "Trigger on ledger: " << mHash << (mComplete ? " completed" : "")
523  << (mFailed ? " failed" : "");
524  return;
525  }
526 
527  if (auto stream = m_journal.trace())
528  {
529  if (peer)
530  stream << "Trigger acquiring ledger " << mHash << " from " << peer;
531  else
532  stream << "Trigger acquiring ledger " << mHash;
533 
534  if (mComplete || mFailed)
535  stream << "complete=" << mComplete << " failed=" << mFailed;
536  else
537  stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions
538  << " as=" << mHaveState;
539  }
540 
541  if (!mHaveHeader)
542  {
544  if (mFailed)
545  {
546  JLOG(m_journal.warn()) << " failed local for " << mHash;
547  return;
548  }
549  }
550 
551  protocol::TMGetLedger tmGL;
552  tmGL.set_ledgerhash(mHash.begin(), mHash.size());
553 
554  if (mTimeouts != 0)
555  {
556  // Be more aggressive if we've timed out at least once
557  tmGL.set_querytype(protocol::qtINDIRECT);
558 
559  if (!mProgress && !mFailed && mByHash &&
561  {
562  auto need = getNeededHashes();
563 
564  if (!need.empty())
565  {
566  protocol::TMGetObjectByHash tmBH;
567  bool typeSet = false;
568  tmBH.set_query(true);
569  tmBH.set_ledgerhash(mHash.begin(), mHash.size());
570  for (auto const& p : need)
571  {
572  JLOG(m_journal.warn()) << "Want: " << p.second;
573 
574  if (!typeSet)
575  {
576  tmBH.set_type(p.first);
577  typeSet = true;
578  }
579 
580  if (p.first == tmBH.type())
581  {
582  protocol::TMIndexedObject* io = tmBH.add_objects();
583  io->set_hash(p.second.begin(), p.second.size());
584  if (mSeq != 0)
585  io->set_ledgerseq(mSeq);
586  }
587  }
588 
589  auto packet =
590  std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
591 
592  for (auto id : mPeers)
593  {
594  if (auto p = app_.overlay().findPeerByShortID(id))
595  {
596  mByHash = false;
597  p->send(packet);
598  }
599  }
600  }
601  else
602  {
603  JLOG(m_journal.info())
604  << "getNeededHashes says acquire is complete";
605  mHaveHeader = true;
606  mHaveTransactions = true;
607  mHaveState = true;
608  mComplete = true;
609  }
610  }
611  }
612 
613  // We can't do much without the header data because we don't know the
614  // state or transaction root hashes.
615  if (!mHaveHeader && !mFailed)
616  {
617  tmGL.set_itype(protocol::liBASE);
618  if (mSeq != 0)
619  tmGL.set_ledgerseq(mSeq);
620  JLOG(m_journal.trace()) << "Sending header request to "
621  << (peer ? "selected peer" : "all peers");
622  sendRequest(tmGL, peer);
623  return;
624  }
625 
626  if (mLedger)
627  tmGL.set_ledgerseq(mLedger->info().seq);
628 
629  if (reason != TriggerReason::reply)
630  {
631  // If we're querying blind, don't query deep
632  tmGL.set_querydepth(0);
633  }
634  else if (peer && peer->isHighLatency())
635  {
636  // If the peer has high latency, query extra deep
637  tmGL.set_querydepth(2);
638  }
639  else
640  tmGL.set_querydepth(1);
641 
642  // Get the state data first because it's the most likely to be useful
643  // if we wind up abandoning this fetch.
644  if (mHaveHeader && !mHaveState && !mFailed)
645  {
646  assert(mLedger);
647 
648  if (!mLedger->stateMap().isValid())
649  {
650  mFailed = true;
651  }
652  else if (mLedger->stateMap().getHash().isZero())
653  {
654  // we need the root node
655  tmGL.set_itype(protocol::liAS_NODE);
656  *tmGL.add_nodeids() = SHAMapNodeID().getRawString();
657  JLOG(m_journal.trace()) << "Sending AS root request to "
658  << (peer ? "selected peer" : "all peers");
659  sendRequest(tmGL, peer);
660  return;
661  }
662  else
663  {
664  AccountStateSF filter(
665  mLedger->stateMap().family().db(), app_.getLedgerMaster());
666 
667  // Release the lock while we process the large state map
668  sl.unlock();
669  auto nodes =
670  mLedger->stateMap().getMissingNodes(missingNodesFind, &filter);
671  sl.lock();
672 
673  // Make sure nothing happened while we released the lock
674  if (!mFailed && !mComplete && !mHaveState)
675  {
676  if (nodes.empty())
677  {
678  if (!mLedger->stateMap().isValid())
679  mFailed = true;
680  else
681  {
682  mHaveState = true;
683 
684  if (mHaveTransactions)
685  mComplete = true;
686  }
687  }
688  else
689  {
690  filterNodes(nodes, reason);
691 
692  if (!nodes.empty())
693  {
694  tmGL.set_itype(protocol::liAS_NODE);
695  for (auto const& id : nodes)
696  {
697  *(tmGL.add_nodeids()) = id.first.getRawString();
698  }
699 
700  JLOG(m_journal.trace())
701  << "Sending AS node request (" << nodes.size()
702  << ") to "
703  << (peer ? "selected peer" : "all peers");
704  sendRequest(tmGL, peer);
705  return;
706  }
707  else
708  {
709  JLOG(m_journal.trace()) << "All AS nodes filtered";
710  }
711  }
712  }
713  }
714  }
715 
717  {
718  assert(mLedger);
719 
720  if (!mLedger->txMap().isValid())
721  {
722  mFailed = true;
723  }
724  else if (mLedger->txMap().getHash().isZero())
725  {
726  // we need the root node
727  tmGL.set_itype(protocol::liTX_NODE);
728  *(tmGL.add_nodeids()) = SHAMapNodeID().getRawString();
729  JLOG(m_journal.trace()) << "Sending TX root request to "
730  << (peer ? "selected peer" : "all peers");
731  sendRequest(tmGL, peer);
732  return;
733  }
734  else
735  {
736  TransactionStateSF filter(
737  mLedger->txMap().family().db(), app_.getLedgerMaster());
738 
739  auto nodes =
740  mLedger->txMap().getMissingNodes(missingNodesFind, &filter);
741 
742  if (nodes.empty())
743  {
744  if (!mLedger->txMap().isValid())
745  mFailed = true;
746  else
747  {
748  mHaveTransactions = true;
749 
750  if (mHaveState)
751  mComplete = true;
752  }
753  }
754  else
755  {
756  filterNodes(nodes, reason);
757 
758  if (!nodes.empty())
759  {
760  tmGL.set_itype(protocol::liTX_NODE);
761  for (auto const& n : nodes)
762  {
763  *(tmGL.add_nodeids()) = n.first.getRawString();
764  }
765  JLOG(m_journal.trace())
766  << "Sending TX node request (" << nodes.size()
767  << ") to " << (peer ? "selected peer" : "all peers");
768  sendRequest(tmGL, peer);
769  return;
770  }
771  else
772  {
773  JLOG(m_journal.trace()) << "All TX nodes filtered";
774  }
775  }
776  }
777  }
778 
779  if (mComplete || mFailed)
780  {
781  JLOG(m_journal.debug())
782  << "Done:" << (mComplete ? " complete" : "")
783  << (mFailed ? " failed " : " ") << mLedger->info().seq;
784  sl.unlock();
785  done();
786  }
787 }
788 
789 void
792  TriggerReason reason)
793 {
794  // Sort nodes so that the ones we haven't recently
795  // requested come before the ones we have.
796  auto dup = std::stable_partition(
797  nodes.begin(), nodes.end(), [this](auto const& item) {
798  return mRecentNodes.count(item.second) == 0;
799  });
800 
801  // If everything is a duplicate we don't want to send
802  // any query at all except on a timeout where we need
803  // to query everyone:
804  if (dup == nodes.begin())
805  {
806  JLOG(m_journal.trace()) << "filterNodes: all duplicates";
807 
808  if (reason != TriggerReason::timeout)
809  {
810  nodes.clear();
811  return;
812  }
813  }
814  else
815  {
816  JLOG(m_journal.trace()) << "filterNodes: pruning duplicates";
817 
818  nodes.erase(dup, nodes.end());
819  }
820 
821  std::size_t const limit =
823 
824  if (nodes.size() > limit)
825  nodes.resize(limit);
826 
827  for (auto const& n : nodes)
828  mRecentNodes.insert(n.second);
829 }
830 
834 // data must not have hash prefix
835 bool
837 {
838  // Return value: true=normal, false=bad data
839  JLOG(m_journal.trace()) << "got header acquiring ledger " << mHash;
840 
841  if (mComplete || mFailed || mHaveHeader)
842  return true;
843 
844  auto* f = mReason == Reason::SHARD ? app_.shardFamily() : &app_.family();
845  mLedger = std::make_shared<Ledger>(
846  deserializeHeader(makeSlice(data)), app_.config(), *f);
847  if (mLedger->info().hash != mHash ||
848  (mSeq != 0 && mSeq != mLedger->info().seq))
849  {
850  JLOG(m_journal.warn())
851  << "Acquire hash mismatch: " << mLedger->info().hash
852  << "!=" << mHash;
853  mLedger.reset();
854  return false;
855  }
856  if (mSeq == 0)
857  mSeq = mLedger->info().seq;
858  mLedger->stateMap().setLedgerSeq(mSeq);
859  mLedger->txMap().setLedgerSeq(mSeq);
860  mHaveHeader = true;
861 
862  Serializer s(data.size() + 4);
864  s.addRaw(data.data(), data.size());
865  f->db().store(hotLEDGER, std::move(s.modData()), mHash, mSeq);
866 
867  if (mLedger->info().txHash.isZero())
868  mHaveTransactions = true;
869 
870  if (mLedger->info().accountHash.isZero())
871  mHaveState = true;
872 
873  mLedger->txMap().setSynching();
874  mLedger->stateMap().setSynching();
875 
876  return true;
877 }
878 
882 bool
884  const std::vector<SHAMapNodeID>& nodeIDs,
885  const std::vector<Blob>& data,
886  SHAMapAddNode& san)
887 {
888  if (!mHaveHeader)
889  {
890  JLOG(m_journal.warn()) << "TX node without header";
891  san.incInvalid();
892  return false;
893  }
894 
895  if (mHaveTransactions || mFailed)
896  {
897  san.incDuplicate();
898  return true;
899  }
900 
901  auto nodeIDit = nodeIDs.cbegin();
902  auto nodeDatait = data.begin();
903  TransactionStateSF filter(
904  mLedger->txMap().family().db(), app_.getLedgerMaster());
905 
906  while (nodeIDit != nodeIDs.cend())
907  {
908  if (nodeIDit->isRoot())
909  {
910  san += mLedger->txMap().addRootNode(
911  SHAMapHash{mLedger->info().txHash},
912  makeSlice(*nodeDatait),
913  snfWIRE,
914  &filter);
915  if (!san.isGood())
916  return false;
917  }
918  else
919  {
920  san += mLedger->txMap().addKnownNode(
921  *nodeIDit, makeSlice(*nodeDatait), &filter);
922  if (!san.isGood())
923  return false;
924  }
925 
926  ++nodeIDit;
927  ++nodeDatait;
928  }
929 
930  if (!mLedger->txMap().isSynching())
931  {
932  mHaveTransactions = true;
933 
934  if (mHaveState)
935  {
936  mComplete = true;
937  done();
938  }
939  }
940 
941  return true;
942 }
943 
947 bool
949  const std::vector<SHAMapNodeID>& nodeIDs,
950  const std::vector<Blob>& data,
951  SHAMapAddNode& san)
952 {
953  JLOG(m_journal.trace())
954  << "got ASdata (" << nodeIDs.size() << ") acquiring ledger " << mHash;
955  if (nodeIDs.size() == 1)
956  {
957  JLOG(m_journal.trace()) << "got AS node: " << nodeIDs.front();
958  }
959 
960  ScopedLockType sl(mLock);
961 
962  if (!mHaveHeader)
963  {
964  JLOG(m_journal.warn()) << "Don't have ledger header";
965  san.incInvalid();
966  return false;
967  }
968 
969  if (mHaveState || mFailed)
970  {
971  san.incDuplicate();
972  return true;
973  }
974 
975  auto nodeIDit = nodeIDs.cbegin();
976  auto nodeDatait = data.begin();
977  AccountStateSF filter(
978  mLedger->stateMap().family().db(), app_.getLedgerMaster());
979 
980  while (nodeIDit != nodeIDs.cend())
981  {
982  if (nodeIDit->isRoot())
983  {
984  san += mLedger->stateMap().addRootNode(
985  SHAMapHash{mLedger->info().accountHash},
986  makeSlice(*nodeDatait),
987  snfWIRE,
988  &filter);
989  if (!san.isGood())
990  {
991  JLOG(m_journal.warn()) << "Bad ledger header";
992  return false;
993  }
994  }
995  else
996  {
997  san += mLedger->stateMap().addKnownNode(
998  *nodeIDit, makeSlice(*nodeDatait), &filter);
999  if (!san.isGood())
1000  {
1001  JLOG(m_journal.warn()) << "Unable to add AS node";
1002  return false;
1003  }
1004  }
1005 
1006  ++nodeIDit;
1007  ++nodeDatait;
1008  }
1009 
1010  if (!mLedger->stateMap().isSynching())
1011  {
1012  mHaveState = true;
1013 
1014  if (mHaveTransactions)
1015  {
1016  mComplete = true;
1017  done();
1018  }
1019  }
1020 
1021  return true;
1022 }
1023 
1027 bool
1029 {
1030  if (mFailed || mHaveState)
1031  {
1032  san.incDuplicate();
1033  return true;
1034  }
1035 
1036  if (!mHaveHeader)
1037  {
1038  assert(false);
1039  return false;
1040  }
1041 
1042  AccountStateSF filter(
1043  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1044  san += mLedger->stateMap().addRootNode(
1045  SHAMapHash{mLedger->info().accountHash}, data, snfWIRE, &filter);
1046  return san.isGood();
1047 }
1048 
1052 bool
1054 {
1055  if (mFailed || mHaveTransactions)
1056  {
1057  san.incDuplicate();
1058  return true;
1059  }
1060 
1061  if (!mHaveHeader)
1062  {
1063  assert(false);
1064  return false;
1065  }
1066 
1067  TransactionStateSF filter(
1068  mLedger->txMap().family().db(), app_.getLedgerMaster());
1069  san += mLedger->txMap().addRootNode(
1070  SHAMapHash{mLedger->info().txHash}, data, snfWIRE, &filter);
1071  return san.isGood();
1072 }
1073 
1076 {
1078 
1079  if (!mHaveHeader)
1080  {
1081  ret.push_back(
1082  std::make_pair(protocol::TMGetObjectByHash::otLEDGER, mHash));
1083  return ret;
1084  }
1085 
1086  if (!mHaveState)
1087  {
1088  AccountStateSF filter(
1089  mLedger->stateMap().family().db(), app_.getLedgerMaster());
1090  for (auto const& h : neededStateHashes(4, &filter))
1091  {
1092  ret.push_back(
1093  std::make_pair(protocol::TMGetObjectByHash::otSTATE_NODE, h));
1094  }
1095  }
1096 
1097  if (!mHaveTransactions)
1098  {
1099  TransactionStateSF filter(
1100  mLedger->txMap().family().db(), app_.getLedgerMaster());
1101  for (auto const& h : neededTxHashes(4, &filter))
1102  {
1104  protocol::TMGetObjectByHash::otTRANSACTION_NODE, h));
1105  }
1106  }
1107 
1108  return ret;
1109 }
1110 
1114 bool
1116  std::weak_ptr<Peer> peer,
1118 {
1120 
1121  if (isDone())
1122  return false;
1123 
1124  mReceivedData.emplace_back(peer, data);
1125 
1126  if (mReceiveDispatched)
1127  return false;
1128 
1129  mReceiveDispatched = true;
1130  return true;
1131 }
1132 
1136 // VFALCO NOTE, it is not necessary to pass the entire Peer,
1137 // we can get away with just a Resource::Consumer endpoint.
1138 //
1139 // TODO Change peer to Consumer
1140 //
1141 int
1143  std::shared_ptr<Peer> peer,
1144  protocol::TMLedgerData& packet)
1145 {
1146  ScopedLockType sl(mLock);
1147 
1148  if (packet.type() == protocol::liBASE)
1149  {
1150  if (packet.nodes_size() < 1)
1151  {
1152  JLOG(m_journal.warn()) << "Got empty header data";
1153  peer->charge(Resource::feeInvalidRequest);
1154  return -1;
1155  }
1156 
1157  SHAMapAddNode san;
1158 
1159  if (!mHaveHeader)
1160  {
1161  if (takeHeader(packet.nodes(0).nodedata()))
1162  san.incUseful();
1163  else
1164  {
1165  JLOG(m_journal.warn()) << "Got invalid header data";
1166  peer->charge(Resource::feeInvalidRequest);
1167  return -1;
1168  }
1169  }
1170 
1171  if (!mHaveState && (packet.nodes().size() > 1) &&
1172  !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
1173  {
1174  JLOG(m_journal.warn()) << "Included AS root invalid";
1175  }
1176 
1177  if (!mHaveTransactions && (packet.nodes().size() > 2) &&
1178  !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
1179  {
1180  JLOG(m_journal.warn()) << "Included TX root invalid";
1181  }
1182 
1183  if (san.isUseful())
1184  mProgress = true;
1185 
1186  mStats += san;
1187  return san.getGood();
1188  }
1189 
1190  if ((packet.type() == protocol::liTX_NODE) ||
1191  (packet.type() == protocol::liAS_NODE))
1192  {
1193  if (packet.nodes().size() == 0)
1194  {
1195  JLOG(m_journal.info()) << "Got response with no nodes";
1196  peer->charge(Resource::feeInvalidRequest);
1197  return -1;
1198  }
1199 
1200  std::vector<SHAMapNodeID> nodeIDs;
1201  nodeIDs.reserve(packet.nodes().size());
1202  std::vector<Blob> nodeData;
1203  nodeData.reserve(packet.nodes().size());
1204 
1205  for (int i = 0; i < packet.nodes().size(); ++i)
1206  {
1207  const protocol::TMLedgerNode& node = packet.nodes(i);
1208 
1209  if (!node.has_nodeid() || !node.has_nodedata())
1210  {
1211  JLOG(m_journal.warn()) << "Got bad node";
1212  peer->charge(Resource::feeInvalidRequest);
1213  return -1;
1214  }
1215 
1216  nodeIDs.push_back(
1217  SHAMapNodeID(node.nodeid().data(), node.nodeid().size()));
1218  nodeData.push_back(
1219  Blob(node.nodedata().begin(), node.nodedata().end()));
1220  }
1221 
1222  SHAMapAddNode san;
1223 
1224  if (packet.type() == protocol::liTX_NODE)
1225  {
1226  takeTxNode(nodeIDs, nodeData, san);
1227  JLOG(m_journal.debug()) << "Ledger TX node stats: " << san.get();
1228  }
1229  else
1230  {
1231  takeAsNode(nodeIDs, nodeData, san);
1232  JLOG(m_journal.debug()) << "Ledger AS node stats: " << san.get();
1233  }
1234 
1235  if (san.isUseful())
1236  mProgress = true;
1237 
1238  mStats += san;
1239  return san.getGood();
1240  }
1241 
1242  return -1;
1243 }
1244 
1248 void
1250 {
1251  std::shared_ptr<Peer> chosenPeer;
1252  int chosenPeerCount = -1;
1253 
1255 
1256  for (;;)
1257  {
1258  data.clear();
1259  {
1261 
1262  if (mReceivedData.empty())
1263  {
1264  mReceiveDispatched = false;
1265  break;
1266  }
1267 
1268  data.swap(mReceivedData);
1269  }
1270 
1271  // Select the peer that gives us the most nodes that are useful,
1272  // breaking ties in favor of the peer that responded first.
1273  for (auto& entry : data)
1274  {
1275  if (auto peer = entry.first.lock())
1276  {
1277  int count = processData(peer, *(entry.second));
1278  if (count > chosenPeerCount)
1279  {
1280  chosenPeerCount = count;
1281  chosenPeer = std::move(peer);
1282  }
1283  }
1284  }
1285  }
1286 
1287  if (chosenPeer)
1288  trigger(chosenPeer, TriggerReason::reply);
1289 }
1290 
1293 {
1295 
1296  ScopedLockType sl(mLock);
1297 
1298  ret[jss::hash] = to_string(mHash);
1299 
1300  if (mComplete)
1301  ret[jss::complete] = true;
1302 
1303  if (mFailed)
1304  ret[jss::failed] = true;
1305 
1306  if (!mComplete && !mFailed)
1307  ret[jss::peers] = static_cast<int>(mPeers.size());
1308 
1309  ret[jss::have_header] = mHaveHeader;
1310 
1311  if (mHaveHeader)
1312  {
1313  ret[jss::have_state] = mHaveState;
1314  ret[jss::have_transactions] = mHaveTransactions;
1315  }
1316 
1317  ret[jss::timeouts] = mTimeouts;
1318 
1319  if (mHaveHeader && !mHaveState)
1320  {
1322  for (auto const& h : neededStateHashes(16, nullptr))
1323  {
1324  hv.append(to_string(h));
1325  }
1326  ret[jss::needed_state_hashes] = hv;
1327  }
1328 
1330  {
1332  for (auto const& h : neededTxHashes(16, nullptr))
1333  {
1334  hv.append(to_string(h));
1335  }
1336  ret[jss::needed_transaction_hashes] = hv;
1337  }
1338 
1339  return ret;
1340 }
1341 
1342 } // namespace ripple
ripple::InboundLedger::mRecentNodes
std::set< uint256 > mRecentNodes
Definition: InboundLedger.h:215
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::Resource::feeInvalidRequest
const Charge feeInvalidRequest
Schedule of fees charged for imposing load on the server.
ripple::Application
Definition: Application.h:94
ripple::SHAMapAddNode
Definition: SHAMapAddNode.h:28
ripple::SHAMapAddNode::get
std::string get() const
Definition: SHAMapAddNode.h:156
ripple::InboundLedger::Reason::HISTORY
@ HISTORY
ripple::InboundLedger::getNeededHashes
std::vector< neededHash_t > getNeededHashes()
Definition: InboundLedger.cpp:1075
ripple::HashPrefix::ledgerMaster
@ ledgerMaster
ledger master data for signing
ripple::InboundLedger::mReason
const Reason mReason
Definition: InboundLedger.h:213
std::unique_lock::lock
T lock(T... args)
ripple::Blob
std::vector< unsigned char > Blob
Storage for linear binary data.
Definition: Blob.h:30
ripple::InboundLedger::getJson
Json::Value getJson(int)
Return a Json::objectValue.
Definition: InboundLedger.cpp:1292
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:194
ripple::InboundLedger::TriggerReason
TriggerReason
Definition: InboundLedger.h:130
ripple::reqNodes
@ reqNodes
Definition: InboundLedger.cpp:68
std::string
STL class.
ripple::InboundLedger::Reason::CONSENSUS
@ CONSENSUS
std::shared_ptr
STL class.
ripple::InboundLedger::mHaveState
bool mHaveState
Definition: InboundLedger.h:208
ripple::LedgerInfo::parentHash
uint256 parentHash
Definition: ReadView.h:99
ripple::PeerSet::mProgress
bool mProgress
Whether forward progress has been made.
Definition: PeerSet.h:117
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::PeerSet::isDone
bool isDone() const
Definition: PeerSet.h:84
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
ripple::Slice
An immutable linear range of bytes.
Definition: Slice.h:43
Json::arrayValue
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
ripple::InboundLedger::mSignaled
bool mSignaled
Definition: InboundLedger.h:210
std::pair
std::vector::reserve
T reserve(T... args)
ripple::ledgerAcquireTimeout
constexpr auto ledgerAcquireTimeout
Definition: InboundLedger.cpp:72
std::vector
STL class.
std::vector::size
T size(T... args)
ripple::InboundLedger::update
void update(std::uint32_t seq)
Definition: InboundLedger.cpp:181
ripple::InboundLedger::touch
void touch()
Definition: InboundLedger.h:118
ripple::InboundLedger::InboundLedger
InboundLedger(Application &app, uint256 const &hash, std::uint32_t seq, Reason reason, clock_type &)
Definition: InboundLedger.cpp:74
ripple::PeerSet::mPeers
std::set< Peer::id_t > mPeers
The identifiers of the peers we are tracking.
Definition: PeerSet.h:120
std::chrono::duration
ripple::peerCountStart
@ peerCountStart
Definition: InboundLedger.cpp:44
ripple::InboundLedger::~InboundLedger
~InboundLedger()
Definition: InboundLedger.cpp:214
ripple::InboundLedger::mByHash
bool mByHash
Definition: InboundLedger.h:211
ripple::InboundLedger::filterNodes
void filterNodes(std::vector< std::pair< SHAMapNodeID, uint256 >> &nodes, TriggerReason reason)
Definition: InboundLedger.cpp:790
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
ripple::InboundLedger::processData
int processData(std::shared_ptr< Peer > peer, protocol::TMLedgerData &data)
Process one TMLedgerData Returns the number of useful nodes.
Definition: InboundLedger.cpp:1142
std::lock_guard
STL class.
ripple::Application::getShardStore
virtual NodeStore::DatabaseShard * getShardStore()=0
ripple::PeerSet::mComplete
bool mComplete
Definition: PeerSet.h:114
ripple::InboundLedger::queueJob
void queueJob() override
Queue a job to call invokeOnTimer().
Definition: InboundLedger.cpp:165
ripple::InboundLedger::neededStateHashes
std::vector< uint256 > neededStateHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:251
ripple::AccountStateSF
Definition: AccountStateSF.h:31
ripple::JobQueue::addJob
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
ripple::LedgerInfo::seq
LedgerIndex seq
Definition: ReadView.h:88
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:41
ripple::SHAMapNodeID
Definition: SHAMapNodeID.h:33
ripple::PeerSet::addPeers
void addPeers(std::size_t limit, std::function< bool(std::shared_ptr< Peer > const &)> score)
Add at most limit peers to this set from the overlay.
Definition: PeerSet.cpp:50
ripple::PeerSet::mHash
const uint256 mHash
The hash of the object (in practice, always a ledger) we are trying to fetch.
Definition: PeerSet.h:112
ripple::deserializeHeader
LedgerInfo deserializeHeader(Slice data)
Deserialize a ledger header from a byte array.
Definition: InboundLedger.cpp:267
std::vector::front
T front(T... args)
ripple::Family::db
virtual NodeStore::Database & db()=0
ripple::LedgerInfo::txHash
uint256 txHash
Definition: ReadView.h:97
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:43
algorithm
ripple::jtLEDGER_DATA
@ jtLEDGER_DATA
Definition: Job.h:47
ripple::Application::getInboundLedgers
virtual InboundLedgers & getInboundLedgers()=0
ripple::base_uint::size
constexpr static std::size_t size()
Definition: base_uint.h:462
ripple::PeerSet::sendRequest
void sendRequest(const protocol::TMGetLedger &message, std::shared_ptr< Peer > const &peer)
Send a GetLedger message to one or all peers.
Definition: PeerSet.cpp:127
std::unique_lock::unlock
T unlock(T... args)
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:288
ripple::InboundLedger::neededTxHashes
std::vector< uint256 > neededTxHashes(int max, SHAMapSyncFilter *filter) const
Definition: InboundLedger.cpp:235
std::vector::push_back
T push_back(T... args)
ripple::LedgerInfo::closeTime
NetClock::time_point closeTime
Definition: ReadView.h:119
ripple::base_uint< 256 >
ripple::InboundLedger::takeHeader
bool takeHeader(std::string const &data)
Take ledger header data Call with a lock.
Definition: InboundLedger.cpp:836
ripple::JobQueue::getJobCountTotal
int getJobCountTotal(JobType t) const
Jobs waiting plus running at this priority.
Definition: JobQueue.cpp:131
ripple::reqNodesReply
@ reqNodesReply
Definition: InboundLedger.cpp:64
ripple::InboundLedger::gotData
bool gotData(std::weak_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData > const &)
Stash a TMLedgerData received from a peer for later processing Returns 'true' if we need to dispatch.
Definition: InboundLedger.cpp:1115
ripple::NodeStore::Database::store
virtual void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t seq)=0
Store the object.
ripple::InboundLedger::takeTxNode
bool takeTxNode(const std::vector< SHAMapNodeID > &IDs, const std::vector< Blob > &data, SHAMapAddNode &)
Process TX data received from a peer Call with a lock.
Definition: InboundLedger.cpp:883
Json::Value::append
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:882
ripple::InboundLedger::mLedger
std::shared_ptr< Ledger > mLedger
Definition: InboundLedger.h:206
ripple::Application::shardFamily
virtual Family * shardFamily()=0
std::enable_shared_from_this< InboundLedger >::shared_from_this
T shared_from_this(T... args)
ripple::SerialIter::get8
unsigned char get8()
Definition: Serializer.cpp:354
ripple::Application::family
virtual Family & family()=0
ripple::SHAMapAddNode::isUseful
bool isUseful() const
Definition: SHAMapAddNode.h:116
ripple::InboundLedger::getPeerCount
std::size_t getPeerCount() const
Definition: InboundLedger.cpp:157
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::SerialIter::get256
uint256 get256()
Definition: Serializer.h:374
ripple::SerialIter::get64
std::uint64_t get64()
Definition: Serializer.cpp:391
ripple::InboundLedger::addPeers
void addPeers()
Add more peers to the set, if possible.
Definition: InboundLedger.cpp:450
ripple::peerCountAdd
@ peerCountAdd
Definition: InboundLedger.cpp:48
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::InboundLedgers::gotStaleData
virtual void gotStaleData(std::shared_ptr< protocol::TMLedgerData > packet)=0
ripple::LedgerMaster::getFetchPack
boost::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
Definition: LedgerMaster.cpp:1976
ripple::NodeStore::Database::fetch
virtual std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq)=0
Fetch an object.
ripple::Application::config
virtual Config & config()=0
ripple::InboundLedgers::onLedgerFetched
virtual void onLedgerFetched()=0
Called when a complete ledger is obtained.
ripple::SHAMapAddNode::isGood
bool isGood() const
Definition: SHAMapAddNode.h:132
std::unique_lock
STL class.
ripple::SHAMapNodeID::getRawString
std::string getRawString() const
Definition: SHAMapNodeID.cpp:92
ripple::Serializer::addRaw
int addRaw(Blob const &vector)
Definition: Serializer.cpp:100
ripple::LedgerInfo::closeFlags
int closeFlags
Definition: ReadView.h:110
std::to_string
T to_string(T... args)
ripple::Application::getJobQueue
virtual JobQueue & getJobQueue()=0
ripple::InboundLedger::takeTxRootNode
bool takeTxRootNode(Slice const &data, SHAMapAddNode &)
Process AS root node received from a peer Call with a lock.
Definition: InboundLedger.cpp:1053
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
std::chrono::time_point
ripple::Family
Definition: Family.h:32
ripple::Job
Definition: Job.h:82
ripple::SerialIter
Definition: Serializer.h:308
ripple::InboundLedger::pmDowncast
std::weak_ptr< PeerSet > pmDowncast() override
Return a weak pointer to this.
Definition: InboundLedger.cpp:458
std::uint32_t
ripple::missingNodesFind
@ missingNodesFind
Definition: InboundLedger.cpp:60
ripple::InboundLedger::mReceiveDispatched
bool mReceiveDispatched
Definition: InboundLedger.h:222
ripple::InboundLedger::mHaveHeader
bool mHaveHeader
Definition: InboundLedger.h:207
beast::abstract_clock< std::chrono::steady_clock >
ripple::LedgerMaster::checkAccept
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
Definition: LedgerMaster.cpp:998
ripple::LedgerInfo::drops
XRPAmount drops
Definition: ReadView.h:101
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::InboundLedger::TriggerReason::timeout
@ timeout
ripple::InboundLedger::TriggerReason::reply
@ reply
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::InboundLedger::mStats
SHAMapAddNode mStats
Definition: InboundLedger.h:217
ripple::base_uint::begin
iterator begin()
Definition: base_uint.h:114
ripple::PeerSet::app_
Application & app_
Definition: PeerSet.h:105
std::vector::cbegin
T cbegin(T... args)
ripple::InboundLedger::mSeq
std::uint32_t mSeq
Definition: InboundLedger.h:212
ripple::InboundLedger::takeAsRootNode
bool takeAsRootNode(Slice const &data, SHAMapAddNode &)
Process AS root node received from a peer Call with a lock.
Definition: InboundLedger.cpp:1028
ripple::LedgerInfo::closeTimeResolution
NetClock::duration closeTimeResolution
Definition: ReadView.h:113
ripple::PeerSet
Supports data retrieval by managing a set of peers.
Definition: PeerSet.h:48
ripple::PeerSet::mFailed
bool mFailed
Definition: PeerSet.h:115
ripple::snfWIRE
@ snfWIRE
Definition: SHAMapTreeNode.h:37
ripple::NodeStore::DatabaseShard::setStored
virtual void setStored(std::shared_ptr< Ledger const > const &ledger)=0
Notifies the database that the given ledger has been fully acquired and stored.
ripple::Application::overlay
virtual Overlay & overlay()=0
std::count_if
T count_if(T... args)
ripple::InboundLedger::done
void done()
Definition: InboundLedger.cpp:464
ripple::InboundLedger::takeAsNode
bool takeAsNode(const std::vector< SHAMapNodeID > &IDs, const std::vector< Blob > &data, SHAMapAddNode &)
Process AS data received from a peer Call with a lock.
Definition: InboundLedger.cpp:948
ripple::InboundLedger::trigger
void trigger(std::shared_ptr< Peer > const &, TriggerReason)
Request more nodes, perhaps from a specific peer.
Definition: InboundLedger.cpp:515
ripple::PeerSet::m_journal
beast::Journal m_journal
Definition: PeerSet.h:106
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::SHAMapAddNode::incInvalid
void incInvalid()
Definition: SHAMapAddNode.h:80
std::size_t
ripple::SHAMapAddNode::incUseful
void incUseful()
Definition: SHAMapAddNode.h:86
ripple::hotLEDGER
@ hotLEDGER
Definition: NodeObject.h:34
std::make_pair
T make_pair(T... args)
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::LedgerInfo
Information about the notional ledger backing the view.
Definition: ReadView.h:80
ripple::SHAMapAddNode::getGood
int getGood() const
Definition: SHAMapAddNode.h:104
ripple::LedgerMaster::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
Definition: LedgerMaster.cpp:525
std::vector::cend
T cend(T... args)
ripple::InboundLedger::Reason
Reason
Definition: InboundLedger.h:51
ripple::SHAMapAddNode::incDuplicate
void incDuplicate()
Definition: SHAMapAddNode.h:92
ripple::InboundLedger::runData
void runData()
Process pending TMLedgerData Query the 'best' peer.
Definition: InboundLedger.cpp:1249
ripple::PeerSet::mTimeouts
int mTimeouts
Definition: PeerSet.h:113
ripple::SerialIter::get32
std::uint32_t get32()
Definition: Serializer.cpp:378
ripple::InboundLedger::mReceivedData
std::vector< PeerDataPairType > mReceivedData
Definition: InboundLedger.h:221
ripple::ledgerBecomeAggressiveThreshold
@ ledgerBecomeAggressiveThreshold
Definition: InboundLedger.cpp:56
ripple::SHAMapSyncFilter
Definition: SHAMapSyncFilter.h:30
std::stable_partition
T stable_partition(T... args)
ripple::InboundLedger::Reason::SHARD
@ SHARD
ripple::InboundLedger::onTimer
void onTimer(bool progress, ScopedLockType &peerSetLock) override
Called with a lock by the PeerSet when the timer expires.
Definition: InboundLedger.cpp:399
ripple::InboundLedger::mReceivedDataLock
std::mutex mReceivedDataLock
Definition: InboundLedger.h:220
ripple::InboundLedger::checkLocal
bool checkLocal()
Definition: InboundLedger.cpp:194
ripple::TransactionStateSF
Definition: TransactionStateSF.h:31
ripple::ledgerTimeoutRetriesMax
@ ledgerTimeoutRetriesMax
Definition: InboundLedger.cpp:52
ripple::PeerSet::setTimer
void setTimer()
Schedule a call to queueJob() after mTimerInterval.
Definition: PeerSet.cpp:87
ripple::InboundLedger::mHaveTransactions
bool mHaveTransactions
Definition: InboundLedger.h:209
ripple::Overlay::findPeerByShortID
virtual std::shared_ptr< Peer > findPeerByShortID(Peer::id_t const &id)=0
Returns the peer with the matching short id, or null.
ripple::InboundLedger::init
void init(ScopedLockType &collectionLock)
Definition: InboundLedger.cpp:96
ripple::LedgerInfo::accountHash
uint256 accountHash
Definition: ReadView.h:98
ripple::InboundLedger::tryDB
void tryDB(Family &f)
Definition: InboundLedger.cpp:296
ripple::PeerSet::mLock
std::recursive_mutex mLock
Definition: PeerSet.h:108
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::LedgerInfo::parentCloseTime
NetClock::time_point parentCloseTime
Definition: ReadView.h:89