rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1#include <xrpld/app/consensus/RCLValidations.h>
2#include <xrpld/app/ledger/Ledger.h>
3#include <xrpld/app/ledger/LedgerMaster.h>
4#include <xrpld/app/ledger/LedgerReplayer.h>
5#include <xrpld/app/ledger/OpenLedger.h>
6#include <xrpld/app/ledger/PendingSaves.h>
7#include <xrpld/app/main/Application.h>
8#include <xrpld/app/misc/SHAMapStore.h>
9#include <xrpld/app/misc/Transaction.h>
10#include <xrpld/app/misc/TxQ.h>
11#include <xrpld/app/misc/ValidatorList.h>
12#include <xrpld/app/paths/PathRequests.h>
13#include <xrpld/core/TimeKeeper.h>
14#include <xrpld/overlay/Overlay.h>
15#include <xrpld/overlay/Peer.h>
16
17#include <xrpl/basics/Log.h>
18#include <xrpl/basics/MathUtilities.h>
19#include <xrpl/basics/UptimeClock.h>
20#include <xrpl/basics/contract.h>
21#include <xrpl/basics/safe_cast.h>
22#include <xrpl/basics/scope.h>
23#include <xrpl/beast/utility/instrumentation.h>
24#include <xrpl/ledger/AmendmentTable.h>
25#include <xrpl/ledger/OrderBookDB.h>
26#include <xrpl/protocol/BuildInfo.h>
27#include <xrpl/protocol/HashPrefix.h>
28#include <xrpl/protocol/digest.h>
29#include <xrpl/rdb/RelationalDatabase.h>
30#include <xrpl/resource/Fees.h>
31#include <xrpl/server/LoadFeeTrack.h>
32#include <xrpl/server/NetworkOPs.h>
33
34#include <algorithm>
35#include <chrono>
36#include <cstdlib>
37#include <memory>
38#include <vector>
39
40namespace xrpl {
41
42// Don't catch up more than 100 ledgers (cannot exceed 256)
43static constexpr int MAX_LEDGER_GAP{100};
44
45// Don't acquire history if ledger is too old
47
48// Don't acquire history if write load is too high
49static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
50
51// Helper function for LedgerMaster::doAdvance()
52// Return true if candidateLedger should be fetched from the network.
53static bool
55 std::uint32_t const currentLedger,
56 std::uint32_t const ledgerHistory,
57 std::optional<LedgerIndex> const minimumOnline,
58 std::uint32_t const candidateLedger,
60{
61 bool const ret = [&]() {
62 // Fetch ledger if it may be the current ledger
63 if (candidateLedger >= currentLedger)
64 return true;
65
66 // Or if it is within our configured history range:
67 if (currentLedger - candidateLedger <= ledgerHistory)
68 return true;
69
70 // Or if greater than or equal to a specific minimum ledger.
71 // Do nothing if the minimum ledger to keep online is unknown.
72 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
73 }();
74
75 JLOG(j.trace()) << "Missing ledger " << candidateLedger << (ret ? " should" : " should NOT")
76 << " be acquired";
77 return ret;
78}
79
81 Application& app,
83 beast::insight::Collector::ptr const& collector,
84 beast::Journal journal)
85 : app_(app)
86 , m_journal(journal)
87 , mLedgerHistory(collector, app)
88 , standalone_(app_.config().standalone())
89 , fetch_depth_(app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
90 , ledger_history_(app_.config().LEDGER_HISTORY)
91 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
92 , fetch_packs_(
93 "FetchPack",
94 65536,
95 std::chrono::seconds{45},
97 app_.journal("TaggedCache"))
98 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
99{
100}
101
104{
105 return app_.openLedger().current()->header().seq;
106}
107
113
114bool
116{
117 auto validLedger = getValidatedLedger();
118
119 if (validLedger && !areCompatible(*validLedger, view, s, reason))
120 {
121 return false;
122 }
123
124 {
126
127 if ((mLastValidLedger.second != 0) &&
128 !areCompatible(mLastValidLedger.first, mLastValidLedger.second, view, s, reason))
129 {
130 return false;
131 }
132 }
133
134 return true;
135}
136
139{
140 using namespace std::chrono_literals;
142 if (pubClose == 0s)
143 {
144 JLOG(m_journal.debug()) << "No published ledger";
145 return weeks{2};
146 }
147
148 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
149 ret -= pubClose;
150 ret = (ret > 0s) ? ret : 0s;
151 static std::chrono::seconds lastRet = -1s;
152
153 if (ret != lastRet)
154 {
155 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
156 lastRet = ret;
157 }
158 return ret;
159}
160
163{
164 using namespace std::chrono_literals;
165
167 if (valClose == 0s)
168 {
169 JLOG(m_journal.debug()) << "No validated ledger";
170 return weeks{2};
171 }
172
173 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
174 ret -= valClose;
175 ret = (ret > 0s) ? ret : 0s;
176 static std::chrono::seconds lastRet = -1s;
177
178 if (ret != lastRet)
179 {
180 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
181 lastRet = ret;
182 }
183 return ret;
184}
185
186bool
188{
189 using namespace std::chrono_literals;
190
191 if (getPublishedLedgerAge() > 3min)
192 {
193 reason = "No recently-published ledger";
194 return false;
195 }
196 std::uint32_t validClose = mValidLedgerSign.load();
198 if (!validClose || !pubClose)
199 {
200 reason = "No published ledger";
201 return false;
202 }
203 if (validClose > (pubClose + 90))
204 {
205 reason = "Published ledger lags validated ledger";
206 return false;
207 }
208 return true;
209}
210
211void
213{
215 std::optional<uint256> consensusHash;
216
217 if (!standalone_)
218 {
219 auto validations = app_.validators().negativeUNLFilter(
220 app_.getValidations().getTrustedForLedger(l->header().hash, l->header().seq));
221 times.reserve(validations.size());
222 for (auto const& val : validations)
223 times.push_back(val->getSignTime());
224
225 if (!validations.empty())
226 consensusHash = validations.front()->getConsensusHash();
227 }
228
229 NetClock::time_point signTime;
230
231 if (!times.empty() && times.size() >= app_.validators().quorum())
232 {
233 // Calculate the sample median
234 std::sort(times.begin(), times.end());
235 auto const t0 = times[(times.size() - 1) / 2];
236 auto const t1 = times[times.size() / 2];
237 signTime = t0 + (t1 - t0) / 2;
238 }
239 else
240 {
241 signTime = l->header().closeTime;
242 }
243
244 mValidLedger.set(l);
245 mValidLedgerSign = signTime.time_since_epoch().count();
246 XRPL_ASSERT(
249 "xrpl::LedgerMaster::setValidLedger : valid ledger sequence");
251 mValidLedgerSeq = l->header().seq;
252
255 mLedgerHistory.validatedLedger(l, consensusHash);
257 if (!app_.getOPs().isBlocked())
258 {
260 {
261 JLOG(m_journal.error()) << "One or more unsupported amendments "
262 "activated: server blocked.";
264 }
265 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
266 {
267 // Amendments can lose majority, so re-check periodically (every
268 // flag ledger), and clear the flag if appropriate. If an unknown
269 // amendment gains majority log a warning as soon as it's
270 // discovered, then again every flag ledger until the operator
271 // upgrades, the amendment loses majority, or the amendment goes
272 // live and the node gets blocked. Unlike being amendment blocked,
273 // this message may be logged more than once per session, because
274 // the node will otherwise function normally, and this gives
275 // operators an opportunity to see and resolve the warning.
276 if (auto const first = app_.getAmendmentTable().firstUnsupportedExpected())
277 {
278 JLOG(m_journal.error()) << "One or more unsupported amendments "
279 "reached majority. Upgrade before "
280 << to_string(*first)
281 << " to prevent your server from "
282 "becoming amendment blocked.";
284 }
285 else
287 }
288 }
289}
290
291void
293{
294 mPubLedger = l;
295 mPubLedgerClose = l->header().closeTime.time_since_epoch().count();
296 mPubLedgerSeq = l->header().seq;
297}
298
299void
301{
303 mHeldTransactions.insert(transaction->getSTransaction());
304}
305
306// Validate a ledger's close time and sequence number if we're considering
307// jumping to that ledger. This helps defend against some rare hostile or
308// diverged majority scenarios.
309bool
311{
312 XRPL_ASSERT(ledger, "xrpl::LedgerMaster::canBeCurrent : non-null input");
313
314 // Never jump to a candidate ledger that precedes our
315 // last validated ledger
316
317 auto validLedger = getValidatedLedger();
318 if (validLedger && (ledger->header().seq < validLedger->header().seq))
319 {
320 JLOG(m_journal.trace()) << "Candidate for current ledger has low seq "
321 << ledger->header().seq << " < " << validLedger->header().seq;
322 return false;
323 }
324
325 // Ensure this ledger's parent close time is within five minutes of
326 // our current time. If we already have a known fully-valid ledger
327 // we perform this check. Otherwise, we only do it if we've built a
328 // few ledgers as our clock can be off when we first start up
329
330 auto closeTime = app_.timeKeeper().closeTime();
331 auto ledgerClose = ledger->header().parentCloseTime;
332
333 using namespace std::chrono_literals;
334 if ((validLedger || (ledger->header().seq > 10)) &&
335 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) > 5min))
336 {
337 JLOG(m_journal.warn()) << "Candidate for current ledger has close time "
338 << to_string(ledgerClose) << " at network time "
339 << to_string(closeTime) << " seq " << ledger->header().seq;
340 return false;
341 }
342
343 if (validLedger)
344 {
345 // Sequence number must not be too high. We allow ten ledgers
346 // for time inaccuracies plus a maximum run rate of one ledger
347 // every two seconds. The goal is to prevent a malicious ledger
348 // from increasing our sequence unreasonably high
349
350 LedgerIndex maxSeq = validLedger->header().seq + 10;
351
352 if (closeTime > validLedger->header().parentCloseTime)
353 maxSeq += std::chrono::duration_cast<std::chrono::seconds>(
354 closeTime - validLedger->header().parentCloseTime)
355 .count() /
356 2;
357
358 if (ledger->header().seq > maxSeq)
359 {
360 JLOG(m_journal.warn()) << "Candidate for current ledger has high seq "
361 << ledger->header().seq << " > " << maxSeq;
362 return false;
363 }
364
365 JLOG(m_journal.trace()) << "Acceptable seq range: " << validLedger->header().seq
366 << " <= " << ledger->header().seq << " <= " << maxSeq;
367 }
368
369 return true;
370}
371
372void
374{
375 XRPL_ASSERT(lastClosed, "xrpl::LedgerMaster::switchLCL : non-null input");
376 if (!lastClosed->isImmutable())
377 LogicError("mutable ledger in switchLCL");
378
379 if (lastClosed->open())
380 LogicError("The new last closed ledger is open!");
381
382 {
384 mClosedLedger.set(lastClosed);
385 }
386
387 if (standalone_)
388 {
389 setFullLedger(lastClosed, true, false);
390 tryAdvance();
391 }
392 else
393 {
394 checkAccept(lastClosed);
395 }
396}
397
398bool
399LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
400{
401 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
402}
403
404bool
406{
407 bool validated = ledger->header().validated;
408 // Returns true if we already had the ledger
409 return mLedgerHistory.insert(std::move(ledger), validated);
410}
411
417void
419{
420 CanonicalTXSet const set = [this]() {
422 // VFALCO NOTE The hash for an open ledger is undefined so we use
423 // something that is a reasonable substitute.
424 CanonicalTXSet set(app_.openLedger().current()->header().parentHash);
426 return set;
427 }();
428
429 if (!set.empty())
431}
432
440
441void
446
447bool
449{
451 return boost::icl::contains(mCompleteLedgers, seq);
452}
453
454void
460
461bool
463{
464 if (ledger.open())
465 return false;
466
467 if (ledger.header().validated)
468 return true;
469
470 auto const seq = ledger.header().seq;
471 try
472 {
473 // Use the skip list in the last validated ledger to see if ledger
474 // comes before the last validated ledger (and thus has been
475 // validated).
476 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
477
478 if (!hash || ledger.header().hash != *hash)
479 {
480 // This ledger's hash is not the hash of the validated ledger
481 if (hash)
482 {
483 XRPL_ASSERT(hash->isNonZero(), "xrpl::LedgerMaster::isValidated : nonzero hash");
485 if (valHash == ledger.header().hash)
486 {
487 // SQL database doesn't match ledger chain
488 clearLedger(seq);
489 }
490 }
491 return false;
492 }
493 }
494 catch (SHAMapMissingNode const& mn)
495 {
496 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
497 return false;
498 }
499
500 // Mark ledger as validated to save time if we see it again.
501 ledger.header().validated = true;
502 return true;
503}
504
505// returns Ledgers we have all the nodes for
506bool
508{
509 // Validated ledger is likely not stored in the DB yet so we use the
510 // published ledger which is.
511 maxVal = mPubLedgerSeq.load();
512
513 if (!maxVal)
514 return false;
515
517 {
519 maybeMin = prevMissing(mCompleteLedgers, maxVal);
520 }
521
522 if (maybeMin == std::nullopt)
523 minVal = maxVal;
524 else
525 minVal = 1 + *maybeMin;
526
527 return true;
528}
529
530// Returns Ledgers we have all the nodes for and are indexed
531bool
533{
534 if (!getFullValidatedRange(minVal, maxVal))
535 return false;
536
537 // Remove from the validated range any ledger sequences that may not be
538 // fully updated in the database yet
539
540 auto const pendingSaves = app_.pendingSaves().getSnapshot();
541
542 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
543 {
544 // Ensure we shrink the tips as much as possible. If we have 7-9 and
545 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
546 // because then we'll have nothing when we could have 7.
547 while (pendingSaves.count(maxVal) > 0)
548 --maxVal;
549 while (pendingSaves.count(minVal) > 0)
550 ++minVal;
551
552 // Best effort for remaining exclusions
553 for (auto v : pendingSaves)
554 {
555 if ((v.first >= minVal) && (v.first <= maxVal))
556 {
557 if (v.first > ((minVal + maxVal) / 2))
558 maxVal = v.first - 1;
559 else
560 minVal = v.first + 1;
561 }
562 }
563
564 if (minVal > maxVal)
565 minVal = maxVal = 0;
566 }
567
568 return true;
569}
570
571// Get the earliest ledger we will let peers fetch
574{
575 // The earliest ledger we will let people fetch is ledger zero,
576 // unless that creates a larger range than allowed
577 std::uint32_t e = getClosedLedger()->header().seq;
578
579 if (e > fetch_depth_)
580 e -= fetch_depth_;
581 else
582 e = 0;
583 return e;
584}
585
586void
588{
589 std::uint32_t seq = ledger->header().seq;
590 uint256 prevHash = ledger->header().parentHash;
591
593
594 std::uint32_t minHas = seq;
595 std::uint32_t maxHas = seq;
596
598 while (!app_.getJobQueue().isStopping() && seq > 0)
599 {
600 {
602 minHas = seq;
603 --seq;
604
605 if (haveLedger(seq))
606 break;
607 }
608
609 auto it(ledgerHashes.find(seq));
610
611 if (it == ledgerHashes.end())
612 {
613 if (app_.isStopping())
614 return;
615
616 {
618 mCompleteLedgers.insert(range(minHas, maxHas));
619 }
620 maxHas = minHas;
621 ledgerHashes =
622 app_.getRelationalDatabase().getHashesByIndex((seq < 500) ? 0 : (seq - 499), seq);
623 it = ledgerHashes.find(seq);
624
625 if (it == ledgerHashes.end())
626 break;
627
628 if (!nodeStore.fetchNodeObject(
629 ledgerHashes.begin()->second.ledgerHash, ledgerHashes.begin()->first))
630 {
631 // The ledger is not backed by the node store
632 JLOG(m_journal.warn())
633 << "SQL DB ledger sequence " << seq << " mismatches node store";
634 break;
635 }
636 }
637
638 if (it->second.ledgerHash != prevHash)
639 break;
640
641 prevHash = it->second.parentHash;
642 }
643
644 {
646 mCompleteLedgers.insert(range(minHas, maxHas));
647 }
648 {
650 mFillInProgress = 0;
651 tryAdvance();
652 }
653}
654
657void
659{
660 LedgerIndex const ledgerIndex = missing + 1;
661
662 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
663 if (!haveHash || haveHash->isZero())
664 {
665 JLOG(m_journal.error()) << "No hash for fetch pack. Missing Index " << missing;
666 return;
667 }
668
669 // Select target Peer based on highest score. The score is randomized
670 // but biased in favor of Peers with low latency.
672 {
673 int maxScore = 0;
674 auto peerList = app_.overlay().getActivePeers();
675 for (auto const& peer : peerList)
676 {
677 if (peer->hasRange(missing, missing + 1))
678 {
679 int score = peer->getScore(true);
680 if (!target || (score > maxScore))
681 {
682 target = peer;
683 maxScore = score;
684 }
685 }
686 }
687 }
688
689 if (target)
690 {
691 protocol::TMGetObjectByHash tmBH;
692 tmBH.set_query(true);
693 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
694 tmBH.set_ledgerhash(haveHash->begin(), 32);
695 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
696
697 target->send(packet);
698 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
699 }
700 else
701 JLOG(m_journal.debug()) << "No peer for fetch pack";
702}
703
704void
706{
707 int invalidate = 0;
709
710 for (std::uint32_t lSeq = ledger.header().seq - 1; lSeq > 0; --lSeq)
711 {
712 if (haveLedger(lSeq))
713 {
714 try
715 {
716 hash = hashOfSeq(ledger, lSeq, m_journal);
717 }
718 catch (std::exception const& ex)
719 {
720 JLOG(m_journal.warn())
721 << "fixMismatch encounters partial ledger. Exception: " << ex.what();
722 clearLedger(lSeq);
723 return;
724 }
725
726 if (hash)
727 {
728 // try to close the seam
729 auto otherLedger = getLedgerBySeq(lSeq);
730
731 if (otherLedger && (otherLedger->header().hash == *hash))
732 {
733 // we closed the seam
734 if (invalidate != 0)
735 {
736 JLOG(m_journal.warn()) << "Match at " << lSeq << ", " << invalidate
737 << " prior ledgers invalidated";
738 }
739
740 return;
741 }
742 }
743
744 clearLedger(lSeq);
745 ++invalidate;
746 }
747 }
748
749 // all prior ledgers invalidated
750 if (invalidate != 0)
751 {
752 JLOG(m_journal.warn()) << "All " << invalidate << " prior ledgers invalidated";
753 }
754}
755
756void
758 std::shared_ptr<Ledger const> const& ledger,
759 bool isSynchronous,
760 bool isCurrent)
761{
762 // A new ledger has been accepted as part of the trusted chain
763 JLOG(m_journal.debug()) << "Ledger " << ledger->header().seq
764 << " accepted :" << ledger->header().hash;
765 XRPL_ASSERT(
766 ledger->stateMap().getHash().isNonZero(),
767 "xrpl::LedgerMaster::setFullLedger : nonzero ledger state hash");
768
769 ledger->setValidated();
770 ledger->setFull();
771
772 if (isCurrent)
773 mLedgerHistory.insert(ledger, true);
774
775 {
776 // Check the SQL database's entry for the sequence before this
777 // ledger, if it's not this ledger's parent, invalidate it
778 uint256 prevHash = app_.getRelationalDatabase().getHashByIndex(ledger->header().seq - 1);
779 if (prevHash.isNonZero() && prevHash != ledger->header().parentHash)
780 clearLedger(ledger->header().seq - 1);
781 }
782
783 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
784
785 {
787 mCompleteLedgers.insert(ledger->header().seq);
788 }
789
790 {
792
793 if (ledger->header().seq > mValidLedgerSeq)
794 setValidLedger(ledger);
795 if (!mPubLedger)
796 {
797 setPubLedger(ledger);
798 app_.getOrderBookDB().setup(ledger);
799 }
800
801 if (ledger->header().seq != 0 && haveLedger(ledger->header().seq - 1))
802 {
803 // we think we have the previous ledger, double check
804 auto prevLedger = getLedgerBySeq(ledger->header().seq - 1);
805
806 if (!prevLedger || (prevLedger->header().hash != ledger->header().parentHash))
807 {
808 JLOG(m_journal.warn()) << "Acquired ledger invalidates previous ledger: "
809 << (prevLedger ? "hashMismatch" : "missingLedger");
810 fixMismatch(*ledger);
811 }
812 }
813 }
814}
815
816void
822
823// Check if the specified ledger can become the new last fully-validated
824// ledger.
825void
827{
828 std::size_t valCount = 0;
829
830 if (seq != 0)
831 {
832 // Ledger is too old
833 if (seq < mValidLedgerSeq)
834 return;
835
836 auto validations = app_.validators().negativeUNLFilter(
838 valCount = validations.size();
839 if (valCount >= app_.validators().quorum())
840 {
842 if (seq > mLastValidLedger.second)
843 mLastValidLedger = std::make_pair(hash, seq);
844 }
845
846 if (seq == mValidLedgerSeq)
847 return;
848
849 // Ledger could match the ledger we're already building
850 if (seq == mBuildingLedgerSeq)
851 return;
852 }
853
854 auto ledger = mLedgerHistory.getLedgerByHash(hash);
855
856 if (!ledger)
857 {
858 if ((seq != 0) && (getValidLedgerIndex() == 0))
859 {
860 // Set peers converged early if we can
861 if (valCount >= app_.validators().quorum())
863 }
864
865 // FIXME: We may not want to fetch a ledger with just one
866 // trusted validation
868 }
869
870 if (ledger)
871 checkAccept(ledger);
872}
873
884
885void
887{
888 // Can we accept this ledger as our new last fully-validated ledger
889
890 if (!canBeCurrent(ledger))
891 return;
892
893 // Can we advance the last fully-validated ledger? If so, can we
894 // publish?
896
897 if (ledger->header().seq <= mValidLedgerSeq)
898 return;
899
900 auto const minVal = getNeededValidations();
901 auto validations = app_.validators().negativeUNLFilter(
902 app_.getValidations().getTrustedForLedger(ledger->header().hash, ledger->header().seq));
903 auto const tvc = validations.size();
904 if (tvc < minVal) // nothing we can do
905 {
906 JLOG(m_journal.trace()) << "Only " << tvc << " validations for " << ledger->header().hash;
907 return;
908 }
909
910 JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq
911 << " with >= " << minVal << " validations";
912
913 ledger->setValidated();
914 ledger->setFull();
915 setValidLedger(ledger);
916 if (!mPubLedger)
917 {
918 pendSaveValidated(app_, ledger, true, true);
919 setPubLedger(ledger);
920 app_.getOrderBookDB().setup(ledger);
921 }
922
923 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
924 auto fees = app_.getValidations().fees(ledger->header().hash, base);
925 {
926 auto fees2 = app_.getValidations().fees(ledger->header().parentHash, base);
927 fees.reserve(fees.size() + fees2.size());
928 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
929 }
930 std::uint32_t fee;
931 if (!fees.empty())
932 {
933 std::sort(fees.begin(), fees.end());
934 if (auto stream = m_journal.debug())
935 {
937 s << "Received fees from validations: (" << fees.size() << ") ";
938 for (auto const fee1 : fees)
939 {
940 s << " " << fee1;
941 }
942 stream << s.str();
943 }
944 fee = fees[fees.size() / 2]; // median
945 }
946 else
947 {
948 fee = base;
949 }
950
952
953 tryAdvance();
954
955 if (ledger->seq() % 256 == 0)
956 {
957 // Check if the majority of validators run a higher version rippled
958 // software. If so print a warning.
959 //
960 // Validators include their rippled software version in the validation
961 // messages of every (flag - 1) ledger. We wait for one ledger time
962 // before checking the version information to accumulate more validation
963 // messages.
964
965 auto currentTime = app_.timeKeeper().now();
966 bool needPrint = false;
967
968 // The variable upgradeWarningPrevTime_ will be set when and only when
969 // the warning is printed.
971 {
972 // Have not printed the warning before, check if need to print.
973 auto const vals = app_.getValidations().getTrustedForLedger(
974 ledger->header().parentHash, ledger->header().seq - 1);
975 std::size_t higherVersionCount = 0;
976 std::size_t rippledCount = 0;
977 for (auto const& v : vals)
978 {
979 if (v->isFieldPresent(sfServerVersion))
980 {
981 auto version = v->getFieldU64(sfServerVersion);
982 higherVersionCount += BuildInfo::isNewerVersion(version) ? 1 : 0;
983 rippledCount += BuildInfo::isRippledVersion(version) ? 1 : 0;
984 }
985 }
986 // We report only if (1) we have accumulated validation messages
987 // from 90% validators from the UNL, (2) 60% of validators
988 // running the rippled implementation have higher version numbers,
989 // and (3) the calculation won't cause divide-by-zero.
990 if (higherVersionCount > 0 && rippledCount > 0)
991 {
992 constexpr std::size_t reportingPercent = 90;
993 constexpr std::size_t cutoffPercent = 60;
994 auto const unlSize{app_.validators().getQuorumKeys().second.size()};
995 needPrint = unlSize > 0 &&
996 calculatePercent(vals.size(), unlSize) >= reportingPercent &&
997 calculatePercent(higherVersionCount, rippledCount) >= cutoffPercent;
998 }
999 }
1000 // To throttle the warning messages, instead of printing a warning
1001 // every flag ledger, we print every week.
1002 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
1003 {
1004 // Printed the warning before, and assuming most validators
1005 // do not downgrade, we keep printing the warning
1006 // until the local server is restarted.
1007 needPrint = true;
1008 }
1009
1010 if (needPrint)
1011 {
1012 upgradeWarningPrevTime_ = currentTime;
1013 auto const upgradeMsg =
1014 "Check for upgrade: "
1015 "A majority of trusted validators are "
1016 "running a newer version.";
1017 std::cerr << upgradeMsg << std::endl;
1018 JLOG(m_journal.error()) << upgradeMsg;
1019 }
1020 }
1021}
1022
1024void
1026 std::shared_ptr<Ledger const> const& ledger,
1027 uint256 const& consensusHash,
1028 Json::Value consensus)
1029{
1030 // Because we just built a ledger, we are no longer building one
1032
1033 // No need to process validations in standalone mode
1034 if (standalone_)
1035 return;
1036
1037 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1038
1039 if (ledger->header().seq <= mValidLedgerSeq)
1040 {
1041 auto stream = app_.journal("LedgerConsensus").info();
1042 JLOG(stream) << "Consensus built old ledger: " << ledger->header().seq
1043 << " <= " << mValidLedgerSeq;
1044 return;
1045 }
1046
1047 // See if this ledger can be the new fully-validated ledger
1048 checkAccept(ledger);
1049
1050 if (ledger->header().seq <= mValidLedgerSeq)
1051 {
1052 auto stream = app_.journal("LedgerConsensus").debug();
1053 JLOG(stream) << "Consensus ledger fully validated";
1054 return;
1055 }
1056
1057 // This ledger cannot be the new fully-validated ledger, but
1058 // maybe we saved up validations for some other ledger that can be
1059
1061
1062 // Track validation counts with sequence numbers
1063 class valSeq
1064 {
1065 public:
1066 valSeq() : valCount_(0), ledgerSeq_(0)
1067 {
1068 ;
1069 }
1070
1071 void
1072 mergeValidation(LedgerIndex seq)
1073 {
1074 valCount_++;
1075
1076 // If we didn't already know the sequence, now we do
1077 if (ledgerSeq_ == 0)
1078 ledgerSeq_ = seq;
1079 }
1080
1081 std::size_t valCount_;
1082 LedgerIndex ledgerSeq_;
1083 };
1084
1085 // Count the number of current, trusted validations
1087 for (auto const& v : validations)
1088 {
1089 valSeq& vs = count[v->getLedgerHash()];
1090 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1091 }
1092
1093 auto const neededValidations = getNeededValidations();
1094 auto maxSeq = mValidLedgerSeq.load();
1095 auto maxLedger = ledger->header().hash;
1096
1097 // Of the ledgers with sufficient validations,
1098 // find the one with the highest sequence
1099 for (auto& v : count)
1100 if (v.second.valCount_ > neededValidations)
1101 {
1102 // If we still don't know the sequence, get it
1103 if (v.second.ledgerSeq_ == 0)
1104 {
1105 if (auto l = getLedgerByHash(v.first))
1106 v.second.ledgerSeq_ = l->header().seq;
1107 }
1108
1109 if (v.second.ledgerSeq_ > maxSeq)
1110 {
1111 maxSeq = v.second.ledgerSeq_;
1112 maxLedger = v.first;
1113 }
1114 }
1115
1116 if (maxSeq > mValidLedgerSeq)
1117 {
1118 auto stream = app_.journal("LedgerConsensus").debug();
1119 JLOG(stream) << "Consensus triggered check of ledger";
1120 checkAccept(maxLedger, maxSeq);
1121 }
1122}
1123
1126{
1127 // Try to get the hash of a ledger we need to fetch for history
1129 auto const& l{mHistLedger};
1130
1131 if (l && l->header().seq >= index)
1132 {
1133 ret = hashOfSeq(*l, index, m_journal);
1134 if (!ret)
1135 ret = walkHashBySeq(index, l, reason);
1136 }
1137
1138 if (!ret)
1139 ret = walkHashBySeq(index, reason);
1140
1141 return ret;
1142}
1143
1146{
1148
1149 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1150
1151 // No valid ledger, nothing to do
1152 if (mValidLedger.empty())
1153 {
1154 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1155 return {};
1156 }
1157
1158 if (!mPubLedger)
1159 {
1160 JLOG(m_journal.info()) << "First published ledger will be " << mValidLedgerSeq;
1161 return {mValidLedger.get()};
1162 }
1163
1165 {
1166 JLOG(m_journal.warn()) << "Gap in validated ledger stream " << mPubLedgerSeq << " - "
1167 << mValidLedgerSeq - 1;
1168
1169 auto valLedger = mValidLedger.get();
1170 ret.push_back(valLedger);
1171 setPubLedger(valLedger);
1172 app_.getOrderBookDB().setup(valLedger);
1173
1174 return {valLedger};
1175 }
1176
1178 {
1179 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1180 return {};
1181 }
1182
1183 int acqCount = 0;
1184
1185 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1186 auto valLedger = mValidLedger.get();
1187 std::uint32_t valSeq = valLedger->header().seq;
1188
1189 scope_unlock sul{sl};
1190 try
1191 {
1192 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1193 {
1194 JLOG(m_journal.trace()) << "Trying to fetch/publish valid ledger " << seq;
1195
1197 // This can throw
1198 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1199 // VFALCO TODO Restructure this code so that zero is not
1200 // used.
1201 if (!hash)
1202 hash = beast::zero; // kludge
1203 if (seq == valSeq)
1204 {
1205 // We need to publish the ledger we just fully validated
1206 ledger = valLedger;
1207 }
1208 else if (hash->isZero())
1209 {
1210 // LCOV_EXCL_START
1211 JLOG(m_journal.fatal())
1212 << "Ledger: " << valSeq << " does not have hash for " << seq;
1213 UNREACHABLE(
1214 "xrpl::LedgerMaster::findNewLedgersToPublish : ledger "
1215 "not found");
1216 // LCOV_EXCL_STOP
1217 }
1218 else
1219 {
1220 ledger = mLedgerHistory.getLedgerByHash(*hash);
1221 }
1222
1223 if (!app_.config().LEDGER_REPLAY)
1224 {
1225 // Can we try to acquire the ledger we need?
1226 if (!ledger && (++acqCount < ledger_fetch_size_))
1227 ledger = app_.getInboundLedgers().acquire(
1228 *hash, seq, InboundLedger::Reason::GENERIC);
1229 }
1230
1231 // Did we acquire the next ledger we need to publish?
1232 if (ledger && (ledger->header().seq == pubSeq))
1233 {
1234 ledger->setValidated();
1235 ret.push_back(ledger);
1236 ++pubSeq;
1237 }
1238 }
1239
1240 JLOG(m_journal.trace()) << "ready to publish " << ret.size() << " ledgers.";
1241 }
1242 catch (std::exception const& ex)
1243 {
1244 JLOG(m_journal.error()) << "Exception while trying to find ledgers to publish: "
1245 << ex.what();
1246 }
1247
1249 {
1250 /* Narrow down the gap of ledgers, and try to replay them.
1251 * When replaying a ledger gap, if the local node has
1252 * the start ledger, it saves an expensive InboundLedger
1253 * acquire. If the local node has the finish ledger, it
1254 * saves a skip list acquire.
1255 */
1256 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1257 auto finishLedger = valLedger;
1258 while (startLedger->seq() + 1 < finishLedger->seq())
1259 {
1260 if (auto const parent =
1261 mLedgerHistory.getLedgerByHash(finishLedger->header().parentHash);
1262 parent)
1263 {
1264 finishLedger = parent;
1265 }
1266 else
1267 {
1268 auto numberLedgers = finishLedger->seq() - startLedger->seq() + 1;
1269 JLOG(m_journal.debug())
1270 << "Publish LedgerReplays " << numberLedgers
1271 << " ledgers, from seq=" << startLedger->header().seq << ", "
1272 << startLedger->header().hash << " to seq=" << finishLedger->header().seq
1273 << ", " << finishLedger->header().hash;
1275 InboundLedger::Reason::GENERIC, finishLedger->header().hash, numberLedgers);
1276 break;
1277 }
1278 }
1279 }
1280
1281 return ret;
1282}
1283
1284void
1286{
1288
1289 // Can't advance without at least one fully-valid ledger
1290 mAdvanceWork = true;
1292 {
1293 mAdvanceThread = true;
1294 app_.getJobQueue().addJob(jtADVANCE, "AdvanceLedger", [this]() {
1296
1297 XRPL_ASSERT(
1299 "xrpl::LedgerMaster::tryAdvance : has valid ledger");
1300
1301 JLOG(m_journal.trace()) << "advanceThread<";
1302
1303 try
1304 {
1305 doAdvance(sl);
1306 }
1307 catch (std::exception const& ex)
1308 {
1309 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1310 }
1311
1312 mAdvanceThread = false;
1313 JLOG(m_journal.trace()) << "advanceThread>";
1314 });
1315 }
1316}
1317
1318void
1320{
1321 {
1324 {
1326 mPathLedger.reset();
1327 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1328 return;
1329 }
1330 }
1331
1332 while (!app_.getJobQueue().isStopping())
1333 {
1334 JLOG(m_journal.debug()) << "updatePaths running";
1336 {
1338
1339 if (!mValidLedger.empty() &&
1340 (!mPathLedger || (mPathLedger->header().seq != mValidLedgerSeq)))
1341 { // We have a new valid ledger since the last full pathfinding
1343 lastLedger = mPathLedger;
1344 }
1345 else if (mPathFindNewRequest)
1346 { // We have a new request but no new ledger
1347 lastLedger = app_.openLedger().current();
1348 }
1349 else
1350 { // Nothing to do
1352 mPathLedger.reset();
1353 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1354 return;
1355 }
1356 }
1357
1358 if (!standalone_)
1359 { // don't pathfind with a ledger that's more than 60 seconds old
1360 using namespace std::chrono;
1361 auto age = time_point_cast<seconds>(app_.timeKeeper().closeTime()) -
1362 lastLedger->header().closeTime;
1363 if (age > 1min)
1364 {
1365 JLOG(m_journal.debug()) << "Published ledger too old for updating paths";
1368 mPathLedger.reset();
1369 return;
1370 }
1371 }
1372
1373 try
1374 {
1375 auto& pathRequests = app_.getPathRequests();
1376 {
1378 if (!pathRequests.requestsPending())
1379 {
1381 mPathLedger.reset();
1382 JLOG(m_journal.debug()) << "No path requests found. Nothing to do for updating "
1383 "paths. "
1384 << mPathFindThread << " jobs remaining";
1385 return;
1386 }
1387 }
1388 JLOG(m_journal.debug()) << "Updating paths";
1389 pathRequests.updateAll(lastLedger);
1390
1392 if (!pathRequests.requestsPending())
1393 {
1394 JLOG(m_journal.debug()) << "No path requests left. No need for further updating "
1395 "paths";
1397 mPathLedger.reset();
1398 return;
1399 }
1400 }
1401 catch (SHAMapMissingNode const& mn)
1402 {
1403 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1404 if (lastLedger->open())
1405 {
1406 // our parent is the problem
1408 lastLedger->header().parentHash,
1409 lastLedger->header().seq - 1,
1411 }
1412 else
1413 {
1414 // this ledger is the problem
1416 lastLedger->header().hash,
1417 lastLedger->header().seq,
1419 }
1420 }
1421 }
1422}
1423
1424bool
1426{
1428 mPathFindNewRequest = newPFWork("PthFindNewReq", ml);
1429 return mPathFindNewRequest;
1430}
1431
1432bool
1434{
1436 bool const ret = mPathFindNewRequest;
1437 mPathFindNewRequest = false;
1438 return ret;
1439}
1440
1441// If the order book is radically updated, we need to reprocess all
1442// pathfinding requests.
1443bool
1445{
1447 mPathLedger.reset();
1448
1449 return newPFWork("PthFindOBDB", ml);
1450}
1451
1454bool
1456{
1458 {
1459 JLOG(m_journal.debug()) << "newPFWork: Creating job. path find threads: "
1460 << mPathFindThread;
1461 if (app_.getJobQueue().addJob(jtUPDATE_PF, name, [this]() { updatePaths(); }))
1462 {
1464 }
1465 }
1466 // If we're stopping don't give callers the expectation that their
1467 // request will be fulfilled, even if it may be serviced.
1468 return mPathFindThread > 0 && !app_.isStopping();
1469}
1470
1473{
1474 return m_mutex;
1475}
1476
1477// The current ledger is the ledger we believe new transactions should go in
1483
1489
1490Rules
1492{
1493 // Once we have a guarantee that there's always a last validated
1494 // ledger then we can dispense with the if.
1495
1496 // Return the Rules from the last validated ledger.
1497 if (auto const ledger = getValidatedLedger())
1498 return ledger->rules();
1499
1500 return Rules(app_.config().features);
1501}
1502
1503// This is the last ledger we published to clients and can lag the validated
1504// ledger.
1511
1518
1521{
1522 uint256 hash = getHashBySeq(ledgerIndex);
1523 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex) : std::nullopt;
1524}
1525
1528{
1529 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1530 if (nodeObject && (nodeObject->getData().size() >= 120))
1531 {
1532 SerialIter it(nodeObject->getData().data(), nodeObject->getData().size());
1533 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1534 {
1535 it.skip(
1536 4 + 8 + 32 + // seq drops parentHash
1537 32 + 32 + 4); // txHash acctHash parentClose
1539 }
1540 }
1541
1542 return std::nullopt;
1543}
1544
1545uint256
1547{
1549
1550 if (hash.isNonZero())
1551 return hash;
1552
1554}
1555
1558{
1559 std::optional<LedgerHash> ledgerHash;
1560
1561 if (auto referenceLedger = mValidLedger.get())
1562 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1563
1564 return ledgerHash;
1565}
1566
1569 std::uint32_t index,
1570 std::shared_ptr<ReadView const> const& referenceLedger,
1571 InboundLedger::Reason reason)
1572{
1573 if (!referenceLedger || (referenceLedger->header().seq < index))
1574 {
1575 // Nothing we can do. No validated ledger.
1576 return std::nullopt;
1577 }
1578
1579 // See if the hash for the ledger we need is in the reference ledger
1580 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1581 if (ledgerHash)
1582 return ledgerHash;
1583
1584 // The hash is not in the reference ledger. Get another ledger which can
1585 // be located easily and should contain the hash.
1586 LedgerIndex refIndex = getCandidateLedger(index);
1587 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1588 XRPL_ASSERT(refHash, "xrpl::LedgerMaster::walkHashBySeq : found ledger");
1589 if (refHash)
1590 {
1591 // Try the hash and sequence of a better reference ledger just found
1592 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1593
1594 if (ledger)
1595 {
1596 try
1597 {
1598 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1599 }
1600 catch (SHAMapMissingNode const&)
1601 {
1602 ledger.reset();
1603 }
1604 }
1605
1606 // Try to acquire the complete ledger
1607 if (!ledger)
1608 {
1609 if (auto const l = app_.getInboundLedgers().acquire(*refHash, refIndex, reason))
1610 {
1611 ledgerHash = hashOfSeq(*l, index, m_journal);
1612 XRPL_ASSERT(
1613 ledgerHash,
1614 "xrpl::LedgerMaster::walkHashBySeq : has complete "
1615 "ledger");
1616 }
1617 }
1618 }
1619 return ledgerHash;
1620}
1621
1624{
1625 if (index <= mValidLedgerSeq)
1626 {
1627 // Always prefer a validated ledger
1628 if (auto valid = mValidLedger.get())
1629 {
1630 if (valid->header().seq == index)
1631 return valid;
1632
1633 try
1634 {
1635 auto const hash = hashOfSeq(*valid, index, m_journal);
1636
1637 if (hash)
1639 }
1640 catch (std::exception const&)
1641 {
1642 // Missing nodes are already handled
1643 }
1644 }
1645 }
1646
1647 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1648 return ret;
1649
1650 auto ret = mClosedLedger.get();
1651 if (ret && (ret->header().seq == index))
1652 return ret;
1653
1654 clearLedger(index);
1655 return {};
1656}
1657
1660{
1661 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1662 return ret;
1663
1664 auto ret = mClosedLedger.get();
1665 if (ret && (ret->header().hash == hash))
1666 return ret;
1667
1668 return {};
1669}
1670
1671void
1677
1678void
1680{
1682 fetch_packs_.sweep();
1683}
1684
1685float
1690
1691void
1693{
1695 if (seq > 0)
1696 mCompleteLedgers.erase(range(0u, seq - 1));
1697}
1698
1699void
1704
1705void
1707{
1708 replayData = std::move(replay);
1709}
1710
1713{
1714 return std::move(replayData);
1715}
1716
1717void
1719 std::uint32_t missing,
1720 bool& progress,
1721 InboundLedger::Reason reason,
1723{
1724 scope_unlock sul{sl};
1725 if (auto hash = getLedgerHashForHistory(missing, reason))
1726 {
1727 XRPL_ASSERT(hash->isNonZero(), "xrpl::LedgerMaster::fetchForHistory : found ledger");
1728 auto ledger = getLedgerByHash(*hash);
1729 if (!ledger)
1730 {
1732 {
1733 ledger = app_.getInboundLedgers().acquire(*hash, missing, reason);
1734 if (!ledger && missing != fetch_seq_ &&
1735 missing > app_.getNodeStore().earliestLedgerSeq())
1736 {
1737 JLOG(m_journal.trace()) << "fetchForHistory want fetch pack " << missing;
1738 fetch_seq_ = missing;
1739 getFetchPack(missing, reason);
1740 }
1741 else
1742 JLOG(m_journal.trace()) << "fetchForHistory no fetch pack for " << missing;
1743 }
1744 else
1745 JLOG(m_journal.debug()) << "fetchForHistory found failed acquire";
1746 }
1747 if (ledger)
1748 {
1749 auto seq = ledger->header().seq;
1750 XRPL_ASSERT(seq == missing, "xrpl::LedgerMaster::fetchForHistory : sequence match");
1751 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1752 setFullLedger(ledger, false, false);
1753 int fillInProgress;
1754 {
1756 mHistLedger = ledger;
1757 fillInProgress = mFillInProgress;
1758 }
1759 if (fillInProgress == 0 &&
1760 app_.getRelationalDatabase().getHashByIndex(seq - 1) == ledger->header().parentHash)
1761 {
1762 {
1763 // Previous ledger is in DB
1765 mFillInProgress = seq;
1766 }
1768 jtADVANCE, "TryFill", [this, ledger]() { tryFill(ledger); });
1769 }
1770 progress = true;
1771 }
1772 else
1773 {
1774 std::uint32_t fetchSz;
1775 // Do not fetch ledger sequences lower
1776 // than the earliest ledger sequence
1777 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1778 fetchSz =
1779 missing >= fetchSz ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1) : 0;
1780 try
1781 {
1782 for (std::uint32_t i = 0; i < fetchSz; ++i)
1783 {
1784 std::uint32_t seq = missing - i;
1785 if (auto h = getLedgerHashForHistory(seq, reason))
1786 {
1787 XRPL_ASSERT(
1788 h->isNonZero(),
1789 "xrpl::LedgerMaster::fetchForHistory : "
1790 "prefetched ledger");
1791 app_.getInboundLedgers().acquire(*h, seq, reason);
1792 }
1793 }
1794 }
1795 catch (std::exception const& ex)
1796 {
1797 JLOG(m_journal.warn()) << "Threw while prefetching: " << ex.what();
1798 }
1799 }
1800 }
1801 else
1802 {
1803 JLOG(m_journal.fatal()) << "Can't find ledger following prevMissing " << missing;
1804 JLOG(m_journal.fatal()) << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1805 JLOG(m_journal.fatal()) << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1806 JLOG(m_journal.fatal()) << "Acquire reason: "
1807 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY"
1808 : "NOT HISTORY");
1809 clearLedger(missing + 1);
1810 progress = true;
1811 }
1812}
1813
1814// Try to publish ledgers, acquire missing ledgers
1815void
1817{
1818 do
1819 {
1820 mAdvanceWork = false; // If there's work to do, we'll make progress
1821 bool progress = false;
1822
1823 auto const pubLedgers = findNewLedgersToPublish(sl);
1824 if (pubLedgers.empty())
1825 {
1831 {
1832 // We are in sync, so can acquire
1835 {
1837 missing = prevMissing(
1839 mPubLedger->header().seq,
1841 }
1842 if (missing)
1843 {
1844 JLOG(m_journal.trace()) << "tryAdvance discovered missing " << *missing;
1845 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1850 *missing,
1851 m_journal))
1852 {
1853 JLOG(m_journal.trace()) << "advanceThread should acquire";
1854 }
1855 else
1856 missing = std::nullopt;
1857 }
1858 if (missing)
1859 {
1860 fetchForHistory(*missing, progress, reason, sl);
1862 {
1863 JLOG(m_journal.debug()) << "tryAdvance found last valid changed";
1864 progress = true;
1865 }
1866 }
1867 }
1868 else
1869 {
1870 mHistLedger.reset();
1871 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1872 }
1873 }
1874 else
1875 {
1876 JLOG(m_journal.trace())
1877 << "tryAdvance found " << pubLedgers.size() << " ledgers to publish";
1878 for (auto const& ledger : pubLedgers)
1879 {
1880 {
1881 scope_unlock sul{sl};
1882 JLOG(m_journal.debug()) << "tryAdvance publishing seq " << ledger->header().seq;
1883 setFullLedger(ledger, true, true);
1884 }
1885
1886 setPubLedger(ledger);
1887
1888 {
1889 scope_unlock sul{sl};
1890 app_.getOPs().pubLedger(ledger);
1891 }
1892 }
1893
1895 progress = newPFWork("PthFindNewLed", sl);
1896 }
1897 if (progress)
1898 mAdvanceWork = true;
1899 } while (mAdvanceWork);
1900}
1901
1902void
1904{
1905 fetch_packs_.canonicalize_replace_client(hash, data);
1906}
1907
1910{
1911 Blob data;
1912 if (fetch_packs_.retrieve(hash, data))
1913 {
1914 fetch_packs_.del(hash, false);
1915 if (hash == sha512Half(makeSlice(data)))
1916 return data;
1917 }
1918 return std::nullopt;
1919}
1920
1921void
1932
1958static void
1960 SHAMap const& want,
1961 SHAMap const* have,
1962 std::uint32_t cnt,
1963 protocol::TMGetObjectByHash* into,
1964 std::uint32_t seq,
1965 bool withLeaves = true)
1966{
1967 XRPL_ASSERT(cnt, "xrpl::populateFetchPack : nonzero count input");
1968
1969 Serializer s(1024);
1970
1971 want.visitDifferences(have, [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
1972 if (!withLeaves && n.isLeaf())
1973 return true;
1974
1975 s.erase();
1977
1978 auto const& hash = n.getHash().as_uint256();
1979
1980 protocol::TMIndexedObject* obj = into->add_objects();
1981 obj->set_ledgerseq(seq);
1982 obj->set_hash(hash.data(), hash.size());
1983 obj->set_data(s.getDataPtr(), s.getLength());
1984
1985 return --cnt != 0;
1986 });
1987}
1988
1989void
1991 std::weak_ptr<Peer> const& wPeer,
1993 uint256 haveLedgerHash,
1995{
1996 using namespace std::chrono_literals;
1997 if (UptimeClock::now() > uptime + 1s)
1998 {
1999 JLOG(m_journal.info()) << "Fetch pack request got stale";
2000 return;
2001 }
2002
2004 {
2005 JLOG(m_journal.info()) << "Too busy to make fetch pack";
2006 return;
2007 }
2008
2009 auto peer = wPeer.lock();
2010
2011 if (!peer)
2012 return;
2013
2014 auto have = getLedgerByHash(haveLedgerHash);
2015
2016 if (!have)
2017 {
2018 JLOG(m_journal.info()) << "Peer requests fetch pack for ledger we don't have: " << have;
2019 peer->charge(Resource::feeRequestNoReply, "get_object ledger");
2020 return;
2021 }
2022
2023 if (have->open())
2024 {
2025 JLOG(m_journal.warn()) << "Peer requests fetch pack from open ledger: " << have;
2026 peer->charge(Resource::feeMalformedRequest, "get_object ledger open");
2027 return;
2028 }
2029
2030 if (have->header().seq < getEarliestFetch())
2031 {
2032 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
2033 peer->charge(Resource::feeMalformedRequest, "get_object ledger early");
2034 return;
2035 }
2036
2037 auto want = getLedgerByHash(have->header().parentHash);
2038
2039 if (!want)
2040 {
2041 JLOG(m_journal.info()) << "Peer requests fetch pack for ledger whose predecessor we "
2042 << "don't have: " << have;
2043 peer->charge(Resource::feeRequestNoReply, "get_object ledger no parent");
2044 return;
2045 }
2046
2047 try
2048 {
2049 Serializer hdr(128);
2050
2051 protocol::TMGetObjectByHash reply;
2052 reply.set_query(false);
2053
2054 if (request->has_seq())
2055 reply.set_seq(request->seq());
2056
2057 reply.set_ledgerhash(request->ledgerhash());
2058 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2059
2060 // Building a fetch pack:
2061 // 1. Add the header for the requested ledger.
2062 // 2. Add the nodes for the AccountStateMap of that ledger.
2063 // 3. If there are transactions, add the nodes for the
2064 // transactions of the ledger.
2065 // 4. If the FetchPack now contains at least 512 entries then stop.
2066 // 5. If not very much time has elapsed, then loop back and repeat
2067 // the same process adding the previous ledger to the FetchPack.
2068 do
2069 {
2070 std::uint32_t lSeq = want->header().seq;
2071
2072 {
2073 // Serialize the ledger header:
2074 hdr.erase();
2075
2077 addRaw(want->header(), hdr);
2078
2079 // Add the data
2080 protocol::TMIndexedObject* obj = reply.add_objects();
2081 obj->set_hash(want->header().hash.data(), want->header().hash.size());
2082 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2083 obj->set_ledgerseq(lSeq);
2084 }
2085
2086 populateFetchPack(want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2087
2088 // We use nullptr here because transaction maps are per ledger
2089 // and so the requestor is unlikely to already have it.
2090 if (want->header().txHash.isNonZero())
2091 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2092
2093 if (reply.objects().size() >= 512)
2094 break;
2095
2096 have = std::move(want);
2097 want = getLedgerByHash(have->header().parentHash);
2098 } while (want && UptimeClock::now() <= uptime + 1s);
2099
2100 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2101
2102 JLOG(m_journal.info()) << "Built fetch pack with " << reply.objects().size() << " nodes ("
2103 << msg->getBufferSize() << " bytes)";
2104
2105 peer->send(msg);
2106 }
2107 catch (std::exception const& ex)
2108 {
2109 JLOG(m_journal.warn()) << "Exception building fetch pack. Exception: " << ex.what();
2110 }
2111}
2112
2115{
2116 return fetch_packs_.getCacheSize();
2117}
2118
2119// Returns the minimum ledger sequence in SQL database, if any.
2125
2128{
2129 uint32_t first = 0, last = 0;
2130
2131 if (!getValidatedRange(first, last) || last < ledgerSeq)
2132 return {};
2133
2134 auto const lgr = getLedgerBySeq(ledgerSeq);
2135 if (!lgr || lgr->txs.empty())
2136 return {};
2137
2138 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2139 if (it->first && it->second && it->second->isFieldPresent(sfTransactionIndex) &&
2140 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2141 return it->first->getTransactionID();
2142
2143 return {};
2144}
2145
2146} // namespace xrpl
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition json_value.h:130
Provide a light-weight way to check active() before string formatting.
Definition Journal.h:180
A generic endpoint for log messages.
Definition Journal.h:40
Stream fatal() const
Definition Journal.h:325
Stream error() const
Definition Journal.h:319
Stream debug() const
Definition Journal.h:301
Stream info() const
Definition Journal.h:307
Stream trace() const
Severity stream access functions.
Definition Journal.h:295
Stream warn() const
Definition Journal.h:313
typename Clock::time_point time_point
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
Holds transactions which were deferred to the next pass of consensus.
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
std::unordered_set< uint256, beast::uhash<> > features
Definition Config.h:257
bool LEDGER_REPLAY
Definition Config.h:204
virtual bool isFailure(uint256 const &h)=0
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:146
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition JobQueue.cpp:108
bool isStopping() const
Definition JobQueue.h:209
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
void sweep()
Remove stale cache entries.
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
void set(std::shared_ptr< Ledger const > ledger)
std::shared_ptr< Ledger const > get()
std::optional< LedgerIndex > minSqlSeq()
std::atomic< LedgerIndex > mValidLedgerSeq
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
bool haveLedger(std::uint32_t seq)
TaggedCache< uint256, Blob > fetch_packs_
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
void setValidLedger(std::shared_ptr< Ledger const > const &l)
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
std::recursive_mutex & peekMutex()
std::uint32_t fetch_seq_
std::chrono::seconds getValidatedLedgerAge()
TimeKeeper::time_point upgradeWarningPrevTime_
LedgerIndex getCurrentLedgerIndex()
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
bool storeLedger(std::shared_ptr< Ledger const > ledger)
void gotFetchPack(bool progress, std::uint32_t seq)
void tryFill(std::shared_ptr< Ledger const > ledger)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
bool newPFWork(char const *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
void clearPriorLedgers(LedgerIndex seq)
LedgerIndex const max_ledger_difference_
CanonicalTXSet mHeldTransactions
std::uint32_t const ledger_fetch_size_
void setBuildingLedger(LedgerIndex index)
std::pair< uint256, LedgerIndex > mLastValidLedger
bool isCaughtUp(std::string &reason)
std::size_t getFetchPackCacheSize() const
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the corresponding hash from peers.
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
std::atomic< std::uint32_t > mPubLedgerClose
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::uint32_t const fetch_depth_
std::atomic< LedgerIndex > mPubLedgerSeq
void clearLedger(std::uint32_t seq)
void clearLedgerCachePrior(LedgerIndex seq)
std::atomic_flag mGotFetchPackThread
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< Ledger const > getClosedLedger()
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
std::string getCompleteLedgers()
std::shared_ptr< Ledger const > getValidatedLedger()
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::uint32_t const ledger_history_
bool isValidated(ReadView const &ledger)
void fixMismatch(ReadView const &ledger)
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
LedgerIndex getValidLedgerIndex()
std::shared_ptr< Ledger const > mPathLedger
bool const standalone_
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > mHistLedger
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
LedgerHistory mLedgerHistory
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
std::chrono::seconds getPublishedLedgerAge()
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
RangeSet< std::uint32_t > mCompleteLedgers
LedgerHolder mValidLedger
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
LedgerHolder mClosedLedger
void doAdvance(std::unique_lock< std::recursive_mutex > &)
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
std::shared_ptr< ReadView const > getCurrentLedger()
beast::Journal m_journal
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::unique_ptr< LedgerReplay > replayData
std::unique_ptr< LedgerReplay > releaseReplay()
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
std::shared_ptr< Ledger const > mPubLedger
void failedSave(std::uint32_t seq, uint256 const &hash)
Application & app_
std::atomic< LedgerIndex > mBuildingLedgerSeq
std::recursive_mutex mCompleteLock
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
bool isLoadedLocal() const
std::uint32_t getLoadBase() const
virtual void setAmendmentBlocked()=0
virtual bool isBlocked()=0
virtual void processTransactionSet(CanonicalTXSet const &set)=0
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual bool isAmendmentWarned()=0
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
virtual bool isNeedNetworkLedger()=0
virtual void clearNeedNetworkLedger()=0
virtual void setAmendmentWarned()=0
Persistency layer for NodeObject.
Definition Database.h:31
std::uint32_t earliestLedgerSeq() const noexcept
Definition Database.h:189
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition Database.cpp:209
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
virtual void setup(std::shared_ptr< ReadView const > const &ledger)=0
Initialize or update the order book database with a new ledger.
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
bool requestsPending() const
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
A view into a ledger.
Definition ReadView.h:31
virtual LedgerHeader const & header() const =0
Returns information about the ledger.
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
Rules controlling protocol behavior.
Definition Rules.h:18
uint256 const & as_uint256() const
Definition SHAMapHash.h:24
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition SHAMap.h:77
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
void skip(int num)
std::uint32_t get32()
void const * getDataPtr() const
Definition Serializer.h:197
int getLength() const
Definition Serializer.h:207
virtual JobQueue & getJobQueue()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual PendingSaves & pendingSaves()=0
virtual ValidatorList & validators()=0
virtual NetworkOPs & getOPs()=0
virtual SHAMapStore & getSHAMapStore()=0
virtual Overlay & overlay()=0
virtual NodeStore::Database & getNodeStore()=0
virtual PathRequests & getPathRequests()=0
virtual RCLValidations & getValidations()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual bool isStopping() const =0
virtual LedgerReplayer & getLedgerReplayer()=0
virtual TimeKeeper & timeKeeper()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual beast::Journal journal(std::string const &name)=0
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:44
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:56
static time_point now()
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
std::size_t quorum() const
Get quorum value for current trusted key set.
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
bool isNonZero() const
Definition base_uint.h:516
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:202
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T is_same_v
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
STL namespace.
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition RangeSet.h:163
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition digest.h:204
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition chrono.h:94
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition RangeSet.h:34
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:600
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
bool pendSaveValidated(Application &app, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger Returns false on error.
Definition Ledger.cpp:936
SizedItem
Definition Config.h:26
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, char const *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition View.cpp:802
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition View.h:446
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition View.cpp:953
@ jtLEDGER_DATA
Definition Job.h:45
@ jtUPDATE_PF
Definition Job.h:35
@ jtPUBOLDLEDGER
Definition Job.h:23
@ jtADVANCE
Definition Job.h:46
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
static constexpr int MAX_LEDGER_GAP
@ ledgerMaster
ledger master data for signing
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition Slice.h:215
static constexpr int MAX_WRITE_LOAD_ACQUIRE
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T swap(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)