rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1#include <xrpld/app/consensus/RCLValidations.h>
2#include <xrpld/app/ledger/Ledger.h>
3#include <xrpld/app/ledger/LedgerMaster.h>
4#include <xrpld/app/ledger/LedgerReplayer.h>
5#include <xrpld/app/ledger/OpenLedger.h>
6#include <xrpld/app/ledger/OrderBookDB.h>
7#include <xrpld/app/ledger/PendingSaves.h>
8#include <xrpld/app/main/Application.h>
9#include <xrpld/app/misc/AmendmentTable.h>
10#include <xrpld/app/misc/LoadFeeTrack.h>
11#include <xrpld/app/misc/NetworkOPs.h>
12#include <xrpld/app/misc/SHAMapStore.h>
13#include <xrpld/app/misc/Transaction.h>
14#include <xrpld/app/misc/TxQ.h>
15#include <xrpld/app/misc/ValidatorList.h>
16#include <xrpld/app/paths/PathRequests.h>
17#include <xrpld/app/rdb/RelationalDatabase.h>
18#include <xrpld/core/TimeKeeper.h>
19#include <xrpld/overlay/Overlay.h>
20#include <xrpld/overlay/Peer.h>
21
22#include <xrpl/basics/Log.h>
23#include <xrpl/basics/MathUtilities.h>
24#include <xrpl/basics/UptimeClock.h>
25#include <xrpl/basics/contract.h>
26#include <xrpl/basics/safe_cast.h>
27#include <xrpl/basics/scope.h>
28#include <xrpl/beast/utility/instrumentation.h>
29#include <xrpl/protocol/BuildInfo.h>
30#include <xrpl/protocol/HashPrefix.h>
31#include <xrpl/protocol/digest.h>
32#include <xrpl/resource/Fees.h>
33
34#include <algorithm>
35#include <chrono>
36#include <cstdlib>
37#include <memory>
38#include <vector>
39
40namespace xrpl {
41
42// Don't catch up more than 100 ledgers (cannot exceed 256)
43static constexpr int MAX_LEDGER_GAP{100};
44
45// Don't acquire history if ledger is too old
47
48// Don't acquire history if write load is too high
49static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
50
51// Helper function for LedgerMaster::doAdvance()
52// Return true if candidateLedger should be fetched from the network.
53static bool
55 std::uint32_t const currentLedger,
56 std::uint32_t const ledgerHistory,
57 std::optional<LedgerIndex> const minimumOnline,
58 std::uint32_t const candidateLedger,
60{
61 bool const ret = [&]() {
62 // Fetch ledger if it may be the current ledger
63 if (candidateLedger >= currentLedger)
64 return true;
65
66 // Or if it is within our configured history range:
67 if (currentLedger - candidateLedger <= ledgerHistory)
68 return true;
69
70 // Or if greater than or equal to a specific minimum ledger.
71 // Do nothing if the minimum ledger to keep online is unknown.
72 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
73 }();
74
75 JLOG(j.trace()) << "Missing ledger " << candidateLedger << (ret ? " should" : " should NOT") << " be acquired";
76 return ret;
77}
78
80 Application& app,
82 beast::insight::Collector::ptr const& collector,
83 beast::Journal journal)
84 : app_(app)
85 , m_journal(journal)
86 , mLedgerHistory(collector, app)
87 , standalone_(app_.config().standalone())
88 , fetch_depth_(app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
89 , ledger_history_(app_.config().LEDGER_HISTORY)
90 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
91 , fetch_packs_("FetchPack", 65536, std::chrono::seconds{45}, stopwatch, app_.journal("TaggedCache"))
92 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
93{
94}
95
98{
99 return app_.openLedger().current()->header().seq;
100}
101
107
108bool
110{
111 auto validLedger = getValidatedLedger();
112
113 if (validLedger && !areCompatible(*validLedger, view, s, reason))
114 {
115 return false;
116 }
117
118 {
120
121 if ((mLastValidLedger.second != 0) &&
122 !areCompatible(mLastValidLedger.first, mLastValidLedger.second, view, s, reason))
123 {
124 return false;
125 }
126 }
127
128 return true;
129}
130
133{
134 using namespace std::chrono_literals;
136 if (pubClose == 0s)
137 {
138 JLOG(m_journal.debug()) << "No published ledger";
139 return weeks{2};
140 }
141
142 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
143 ret -= pubClose;
144 ret = (ret > 0s) ? ret : 0s;
145 static std::chrono::seconds lastRet = -1s;
146
147 if (ret != lastRet)
148 {
149 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
150 lastRet = ret;
151 }
152 return ret;
153}
154
157{
158 using namespace std::chrono_literals;
159
161 if (valClose == 0s)
162 {
163 JLOG(m_journal.debug()) << "No validated ledger";
164 return weeks{2};
165 }
166
167 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
168 ret -= valClose;
169 ret = (ret > 0s) ? ret : 0s;
170 static std::chrono::seconds lastRet = -1s;
171
172 if (ret != lastRet)
173 {
174 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
175 lastRet = ret;
176 }
177 return ret;
178}
179
180bool
182{
183 using namespace std::chrono_literals;
184
185 if (getPublishedLedgerAge() > 3min)
186 {
187 reason = "No recently-published ledger";
188 return false;
189 }
190 std::uint32_t validClose = mValidLedgerSign.load();
192 if (!validClose || !pubClose)
193 {
194 reason = "No published ledger";
195 return false;
196 }
197 if (validClose > (pubClose + 90))
198 {
199 reason = "Published ledger lags validated ledger";
200 return false;
201 }
202 return true;
203}
204
205void
207{
209 std::optional<uint256> consensusHash;
210
211 if (!standalone_)
212 {
213 auto validations = app_.validators().negativeUNLFilter(
214 app_.getValidations().getTrustedForLedger(l->header().hash, l->header().seq));
215 times.reserve(validations.size());
216 for (auto const& val : validations)
217 times.push_back(val->getSignTime());
218
219 if (!validations.empty())
220 consensusHash = validations.front()->getConsensusHash();
221 }
222
223 NetClock::time_point signTime;
224
225 if (!times.empty() && times.size() >= app_.validators().quorum())
226 {
227 // Calculate the sample median
228 std::sort(times.begin(), times.end());
229 auto const t0 = times[(times.size() - 1) / 2];
230 auto const t1 = times[times.size() / 2];
231 signTime = t0 + (t1 - t0) / 2;
232 }
233 else
234 {
235 signTime = l->header().closeTime;
236 }
237
238 mValidLedger.set(l);
239 mValidLedgerSign = signTime.time_since_epoch().count();
240 XRPL_ASSERT(
243 "xrpl::LedgerMaster::setValidLedger : valid ledger sequence");
245 mValidLedgerSeq = l->header().seq;
246
249 mLedgerHistory.validatedLedger(l, consensusHash);
251 if (!app_.getOPs().isBlocked())
252 {
254 {
255 JLOG(m_journal.error()) << "One or more unsupported amendments "
256 "activated: server blocked.";
258 }
259 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
260 {
261 // Amendments can lose majority, so re-check periodically (every
262 // flag ledger), and clear the flag if appropriate. If an unknown
263 // amendment gains majority log a warning as soon as it's
264 // discovered, then again every flag ledger until the operator
265 // upgrades, the amendment loses majority, or the amendment goes
266 // live and the node gets blocked. Unlike being amendment blocked,
267 // this message may be logged more than once per session, because
268 // the node will otherwise function normally, and this gives
269 // operators an opportunity to see and resolve the warning.
270 if (auto const first = app_.getAmendmentTable().firstUnsupportedExpected())
271 {
272 JLOG(m_journal.error()) << "One or more unsupported amendments "
273 "reached majority. Upgrade before "
274 << to_string(*first)
275 << " to prevent your server from "
276 "becoming amendment blocked.";
278 }
279 else
281 }
282 }
283}
284
285void
287{
288 mPubLedger = l;
289 mPubLedgerClose = l->header().closeTime.time_since_epoch().count();
290 mPubLedgerSeq = l->header().seq;
291}
292
293void
295{
297 mHeldTransactions.insert(transaction->getSTransaction());
298}
299
300// Validate a ledger's close time and sequence number if we're considering
301// jumping to that ledger. This helps defend against some rare hostile or
302// diverged majority scenarios.
303bool
305{
306 XRPL_ASSERT(ledger, "xrpl::LedgerMaster::canBeCurrent : non-null input");
307
308 // Never jump to a candidate ledger that precedes our
309 // last validated ledger
310
311 auto validLedger = getValidatedLedger();
312 if (validLedger && (ledger->header().seq < validLedger->header().seq))
313 {
314 JLOG(m_journal.trace()) << "Candidate for current ledger has low seq " << ledger->header().seq << " < "
315 << validLedger->header().seq;
316 return false;
317 }
318
319 // Ensure this ledger's parent close time is within five minutes of
320 // our current time. If we already have a known fully-valid ledger
321 // we perform this check. Otherwise, we only do it if we've built a
322 // few ledgers as our clock can be off when we first start up
323
324 auto closeTime = app_.timeKeeper().closeTime();
325 auto ledgerClose = ledger->header().parentCloseTime;
326
327 using namespace std::chrono_literals;
328 if ((validLedger || (ledger->header().seq > 10)) &&
329 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) > 5min))
330 {
331 JLOG(m_journal.warn()) << "Candidate for current ledger has close time " << to_string(ledgerClose)
332 << " at network time " << to_string(closeTime) << " seq " << ledger->header().seq;
333 return false;
334 }
335
336 if (validLedger)
337 {
338 // Sequence number must not be too high. We allow ten ledgers
339 // for time inaccuracies plus a maximum run rate of one ledger
340 // every two seconds. The goal is to prevent a malicious ledger
341 // from increasing our sequence unreasonably high
342
343 LedgerIndex maxSeq = validLedger->header().seq + 10;
344
345 if (closeTime > validLedger->header().parentCloseTime)
346 maxSeq +=
347 std::chrono::duration_cast<std::chrono::seconds>(closeTime - validLedger->header().parentCloseTime)
348 .count() /
349 2;
350
351 if (ledger->header().seq > maxSeq)
352 {
353 JLOG(m_journal.warn()) << "Candidate for current ledger has high seq " << ledger->header().seq << " > "
354 << maxSeq;
355 return false;
356 }
357
358 JLOG(m_journal.trace()) << "Acceptable seq range: " << validLedger->header().seq
359 << " <= " << ledger->header().seq << " <= " << maxSeq;
360 }
361
362 return true;
363}
364
365void
367{
368 XRPL_ASSERT(lastClosed, "xrpl::LedgerMaster::switchLCL : non-null input");
369 if (!lastClosed->isImmutable())
370 LogicError("mutable ledger in switchLCL");
371
372 if (lastClosed->open())
373 LogicError("The new last closed ledger is open!");
374
375 {
377 mClosedLedger.set(lastClosed);
378 }
379
380 if (standalone_)
381 {
382 setFullLedger(lastClosed, true, false);
383 tryAdvance();
384 }
385 else
386 {
387 checkAccept(lastClosed);
388 }
389}
390
391bool
392LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
393{
394 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
395}
396
397bool
399{
400 bool validated = ledger->header().validated;
401 // Returns true if we already had the ledger
402 return mLedgerHistory.insert(std::move(ledger), validated);
403}
404
410void
412{
413 CanonicalTXSet const set = [this]() {
415 // VFALCO NOTE The hash for an open ledger is undefined so we use
416 // something that is a reasonable substitute.
417 CanonicalTXSet set(app_.openLedger().current()->header().parentHash);
419 return set;
420 }();
421
422 if (!set.empty())
424}
425
433
434void
439
440bool
442{
444 return boost::icl::contains(mCompleteLedgers, seq);
445}
446
447void
453
454bool
456{
457 if (ledger.open())
458 return false;
459
460 if (ledger.header().validated)
461 return true;
462
463 auto const seq = ledger.header().seq;
464 try
465 {
466 // Use the skip list in the last validated ledger to see if ledger
467 // comes before the last validated ledger (and thus has been
468 // validated).
469 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
470
471 if (!hash || ledger.header().hash != *hash)
472 {
473 // This ledger's hash is not the hash of the validated ledger
474 if (hash)
475 {
476 XRPL_ASSERT(hash->isNonZero(), "xrpl::LedgerMaster::isValidated : nonzero hash");
478 if (valHash == ledger.header().hash)
479 {
480 // SQL database doesn't match ledger chain
481 clearLedger(seq);
482 }
483 }
484 return false;
485 }
486 }
487 catch (SHAMapMissingNode const& mn)
488 {
489 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
490 return false;
491 }
492
493 // Mark ledger as validated to save time if we see it again.
494 ledger.header().validated = true;
495 return true;
496}
497
498// returns Ledgers we have all the nodes for
499bool
501{
502 // Validated ledger is likely not stored in the DB yet so we use the
503 // published ledger which is.
504 maxVal = mPubLedgerSeq.load();
505
506 if (!maxVal)
507 return false;
508
510 {
512 maybeMin = prevMissing(mCompleteLedgers, maxVal);
513 }
514
515 if (maybeMin == std::nullopt)
516 minVal = maxVal;
517 else
518 minVal = 1 + *maybeMin;
519
520 return true;
521}
522
523// Returns Ledgers we have all the nodes for and are indexed
524bool
526{
527 if (!getFullValidatedRange(minVal, maxVal))
528 return false;
529
530 // Remove from the validated range any ledger sequences that may not be
531 // fully updated in the database yet
532
533 auto const pendingSaves = app_.pendingSaves().getSnapshot();
534
535 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
536 {
537 // Ensure we shrink the tips as much as possible. If we have 7-9 and
538 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
539 // because then we'll have nothing when we could have 7.
540 while (pendingSaves.count(maxVal) > 0)
541 --maxVal;
542 while (pendingSaves.count(minVal) > 0)
543 ++minVal;
544
545 // Best effort for remaining exclusions
546 for (auto v : pendingSaves)
547 {
548 if ((v.first >= minVal) && (v.first <= maxVal))
549 {
550 if (v.first > ((minVal + maxVal) / 2))
551 maxVal = v.first - 1;
552 else
553 minVal = v.first + 1;
554 }
555 }
556
557 if (minVal > maxVal)
558 minVal = maxVal = 0;
559 }
560
561 return true;
562}
563
564// Get the earliest ledger we will let peers fetch
567{
568 // The earliest ledger we will let people fetch is ledger zero,
569 // unless that creates a larger range than allowed
570 std::uint32_t e = getClosedLedger()->header().seq;
571
572 if (e > fetch_depth_)
573 e -= fetch_depth_;
574 else
575 e = 0;
576 return e;
577}
578
579void
581{
582 std::uint32_t seq = ledger->header().seq;
583 uint256 prevHash = ledger->header().parentHash;
584
586
587 std::uint32_t minHas = seq;
588 std::uint32_t maxHas = seq;
589
591 while (!app_.getJobQueue().isStopping() && seq > 0)
592 {
593 {
595 minHas = seq;
596 --seq;
597
598 if (haveLedger(seq))
599 break;
600 }
601
602 auto it(ledgerHashes.find(seq));
603
604 if (it == ledgerHashes.end())
605 {
606 if (app_.isStopping())
607 return;
608
609 {
611 mCompleteLedgers.insert(range(minHas, maxHas));
612 }
613 maxHas = minHas;
614 ledgerHashes = app_.getRelationalDatabase().getHashesByIndex((seq < 500) ? 0 : (seq - 499), seq);
615 it = ledgerHashes.find(seq);
616
617 if (it == ledgerHashes.end())
618 break;
619
620 if (!nodeStore.fetchNodeObject(ledgerHashes.begin()->second.ledgerHash, ledgerHashes.begin()->first))
621 {
622 // The ledger is not backed by the node store
623 JLOG(m_journal.warn()) << "SQL DB ledger sequence " << seq << " mismatches node store";
624 break;
625 }
626 }
627
628 if (it->second.ledgerHash != prevHash)
629 break;
630
631 prevHash = it->second.parentHash;
632 }
633
634 {
636 mCompleteLedgers.insert(range(minHas, maxHas));
637 }
638 {
640 mFillInProgress = 0;
641 tryAdvance();
642 }
643}
644
647void
649{
650 LedgerIndex const ledgerIndex = missing + 1;
651
652 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
653 if (!haveHash || haveHash->isZero())
654 {
655 JLOG(m_journal.error()) << "No hash for fetch pack. Missing Index " << missing;
656 return;
657 }
658
659 // Select target Peer based on highest score. The score is randomized
660 // but biased in favor of Peers with low latency.
662 {
663 int maxScore = 0;
664 auto peerList = app_.overlay().getActivePeers();
665 for (auto const& peer : peerList)
666 {
667 if (peer->hasRange(missing, missing + 1))
668 {
669 int score = peer->getScore(true);
670 if (!target || (score > maxScore))
671 {
672 target = peer;
673 maxScore = score;
674 }
675 }
676 }
677 }
678
679 if (target)
680 {
681 protocol::TMGetObjectByHash tmBH;
682 tmBH.set_query(true);
683 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
684 tmBH.set_ledgerhash(haveHash->begin(), 32);
685 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
686
687 target->send(packet);
688 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
689 }
690 else
691 JLOG(m_journal.debug()) << "No peer for fetch pack";
692}
693
694void
696{
697 int invalidate = 0;
699
700 for (std::uint32_t lSeq = ledger.header().seq - 1; lSeq > 0; --lSeq)
701 {
702 if (haveLedger(lSeq))
703 {
704 try
705 {
706 hash = hashOfSeq(ledger, lSeq, m_journal);
707 }
708 catch (std::exception const& ex)
709 {
710 JLOG(m_journal.warn()) << "fixMismatch encounters partial ledger. Exception: " << ex.what();
711 clearLedger(lSeq);
712 return;
713 }
714
715 if (hash)
716 {
717 // try to close the seam
718 auto otherLedger = getLedgerBySeq(lSeq);
719
720 if (otherLedger && (otherLedger->header().hash == *hash))
721 {
722 // we closed the seam
723 if (invalidate != 0)
724 {
725 JLOG(m_journal.warn())
726 << "Match at " << lSeq << ", " << invalidate << " prior ledgers invalidated";
727 }
728
729 return;
730 }
731 }
732
733 clearLedger(lSeq);
734 ++invalidate;
735 }
736 }
737
738 // all prior ledgers invalidated
739 if (invalidate != 0)
740 {
741 JLOG(m_journal.warn()) << "All " << invalidate << " prior ledgers invalidated";
742 }
743}
744
745void
747{
748 // A new ledger has been accepted as part of the trusted chain
749 JLOG(m_journal.debug()) << "Ledger " << ledger->header().seq << " accepted :" << ledger->header().hash;
750 XRPL_ASSERT(
751 ledger->stateMap().getHash().isNonZero(), "xrpl::LedgerMaster::setFullLedger : nonzero ledger state hash");
752
753 ledger->setValidated();
754 ledger->setFull();
755
756 if (isCurrent)
757 mLedgerHistory.insert(ledger, true);
758
759 {
760 // Check the SQL database's entry for the sequence before this
761 // ledger, if it's not this ledger's parent, invalidate it
762 uint256 prevHash = app_.getRelationalDatabase().getHashByIndex(ledger->header().seq - 1);
763 if (prevHash.isNonZero() && prevHash != ledger->header().parentHash)
764 clearLedger(ledger->header().seq - 1);
765 }
766
767 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
768
769 {
771 mCompleteLedgers.insert(ledger->header().seq);
772 }
773
774 {
776
777 if (ledger->header().seq > mValidLedgerSeq)
778 setValidLedger(ledger);
779 if (!mPubLedger)
780 {
781 setPubLedger(ledger);
782 app_.getOrderBookDB().setup(ledger);
783 }
784
785 if (ledger->header().seq != 0 && haveLedger(ledger->header().seq - 1))
786 {
787 // we think we have the previous ledger, double check
788 auto prevLedger = getLedgerBySeq(ledger->header().seq - 1);
789
790 if (!prevLedger || (prevLedger->header().hash != ledger->header().parentHash))
791 {
792 JLOG(m_journal.warn()) << "Acquired ledger invalidates previous ledger: "
793 << (prevLedger ? "hashMismatch" : "missingLedger");
794 fixMismatch(*ledger);
795 }
796 }
797 }
798}
799
800void
806
807// Check if the specified ledger can become the new last fully-validated
808// ledger.
809void
811{
812 std::size_t valCount = 0;
813
814 if (seq != 0)
815 {
816 // Ledger is too old
817 if (seq < mValidLedgerSeq)
818 return;
819
820 auto validations = app_.validators().negativeUNLFilter(app_.getValidations().getTrustedForLedger(hash, seq));
821 valCount = validations.size();
822 if (valCount >= app_.validators().quorum())
823 {
825 if (seq > mLastValidLedger.second)
826 mLastValidLedger = std::make_pair(hash, seq);
827 }
828
829 if (seq == mValidLedgerSeq)
830 return;
831
832 // Ledger could match the ledger we're already building
833 if (seq == mBuildingLedgerSeq)
834 return;
835 }
836
837 auto ledger = mLedgerHistory.getLedgerByHash(hash);
838
839 if (!ledger)
840 {
841 if ((seq != 0) && (getValidLedgerIndex() == 0))
842 {
843 // Set peers converged early if we can
844 if (valCount >= app_.validators().quorum())
846 }
847
848 // FIXME: We may not want to fetch a ledger with just one
849 // trusted validation
851 }
852
853 if (ledger)
854 checkAccept(ledger);
855}
856
867
868void
870{
871 // Can we accept this ledger as our new last fully-validated ledger
872
873 if (!canBeCurrent(ledger))
874 return;
875
876 // Can we advance the last fully-validated ledger? If so, can we
877 // publish?
879
880 if (ledger->header().seq <= mValidLedgerSeq)
881 return;
882
883 auto const minVal = getNeededValidations();
884 auto validations = app_.validators().negativeUNLFilter(
885 app_.getValidations().getTrustedForLedger(ledger->header().hash, ledger->header().seq));
886 auto const tvc = validations.size();
887 if (tvc < minVal) // nothing we can do
888 {
889 JLOG(m_journal.trace()) << "Only " << tvc << " validations for " << ledger->header().hash;
890 return;
891 }
892
893 JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq << " with >= " << minVal
894 << " validations";
895
896 ledger->setValidated();
897 ledger->setFull();
898 setValidLedger(ledger);
899 if (!mPubLedger)
900 {
901 pendSaveValidated(app_, ledger, true, true);
902 setPubLedger(ledger);
903 app_.getOrderBookDB().setup(ledger);
904 }
905
906 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
907 auto fees = app_.getValidations().fees(ledger->header().hash, base);
908 {
909 auto fees2 = app_.getValidations().fees(ledger->header().parentHash, base);
910 fees.reserve(fees.size() + fees2.size());
911 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
912 }
913 std::uint32_t fee;
914 if (!fees.empty())
915 {
916 std::sort(fees.begin(), fees.end());
917 if (auto stream = m_journal.debug())
918 {
920 s << "Received fees from validations: (" << fees.size() << ") ";
921 for (auto const fee1 : fees)
922 {
923 s << " " << fee1;
924 }
925 stream << s.str();
926 }
927 fee = fees[fees.size() / 2]; // median
928 }
929 else
930 {
931 fee = base;
932 }
933
935
936 tryAdvance();
937
938 if (ledger->seq() % 256 == 0)
939 {
940 // Check if the majority of validators run a higher version rippled
941 // software. If so print a warning.
942 //
943 // Validators include their rippled software version in the validation
944 // messages of every (flag - 1) ledger. We wait for one ledger time
945 // before checking the version information to accumulate more validation
946 // messages.
947
948 auto currentTime = app_.timeKeeper().now();
949 bool needPrint = false;
950
951 // The variable upgradeWarningPrevTime_ will be set when and only when
952 // the warning is printed.
954 {
955 // Have not printed the warning before, check if need to print.
956 auto const vals =
957 app_.getValidations().getTrustedForLedger(ledger->header().parentHash, ledger->header().seq - 1);
958 std::size_t higherVersionCount = 0;
959 std::size_t rippledCount = 0;
960 for (auto const& v : vals)
961 {
962 if (v->isFieldPresent(sfServerVersion))
963 {
964 auto version = v->getFieldU64(sfServerVersion);
965 higherVersionCount += BuildInfo::isNewerVersion(version) ? 1 : 0;
966 rippledCount += BuildInfo::isRippledVersion(version) ? 1 : 0;
967 }
968 }
969 // We report only if (1) we have accumulated validation messages
970 // from 90% validators from the UNL, (2) 60% of validators
971 // running the rippled implementation have higher version numbers,
972 // and (3) the calculation won't cause divide-by-zero.
973 if (higherVersionCount > 0 && rippledCount > 0)
974 {
975 constexpr std::size_t reportingPercent = 90;
976 constexpr std::size_t cutoffPercent = 60;
977 auto const unlSize{app_.validators().getQuorumKeys().second.size()};
978 needPrint = unlSize > 0 && calculatePercent(vals.size(), unlSize) >= reportingPercent &&
979 calculatePercent(higherVersionCount, rippledCount) >= cutoffPercent;
980 }
981 }
982 // To throttle the warning messages, instead of printing a warning
983 // every flag ledger, we print every week.
984 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
985 {
986 // Printed the warning before, and assuming most validators
987 // do not downgrade, we keep printing the warning
988 // until the local server is restarted.
989 needPrint = true;
990 }
991
992 if (needPrint)
993 {
994 upgradeWarningPrevTime_ = currentTime;
995 auto const upgradeMsg =
996 "Check for upgrade: "
997 "A majority of trusted validators are "
998 "running a newer version.";
999 std::cerr << upgradeMsg << std::endl;
1000 JLOG(m_journal.error()) << upgradeMsg;
1001 }
1002 }
1003}
1004
1006void
1008 std::shared_ptr<Ledger const> const& ledger,
1009 uint256 const& consensusHash,
1010 Json::Value consensus)
1011{
1012 // Because we just built a ledger, we are no longer building one
1014
1015 // No need to process validations in standalone mode
1016 if (standalone_)
1017 return;
1018
1019 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1020
1021 if (ledger->header().seq <= mValidLedgerSeq)
1022 {
1023 auto stream = app_.journal("LedgerConsensus").info();
1024 JLOG(stream) << "Consensus built old ledger: " << ledger->header().seq << " <= " << mValidLedgerSeq;
1025 return;
1026 }
1027
1028 // See if this ledger can be the new fully-validated ledger
1029 checkAccept(ledger);
1030
1031 if (ledger->header().seq <= mValidLedgerSeq)
1032 {
1033 auto stream = app_.journal("LedgerConsensus").debug();
1034 JLOG(stream) << "Consensus ledger fully validated";
1035 return;
1036 }
1037
1038 // This ledger cannot be the new fully-validated ledger, but
1039 // maybe we saved up validations for some other ledger that can be
1040
1042
1043 // Track validation counts with sequence numbers
1044 class valSeq
1045 {
1046 public:
1047 valSeq() : valCount_(0), ledgerSeq_(0)
1048 {
1049 ;
1050 }
1051
1052 void
1053 mergeValidation(LedgerIndex seq)
1054 {
1055 valCount_++;
1056
1057 // If we didn't already know the sequence, now we do
1058 if (ledgerSeq_ == 0)
1059 ledgerSeq_ = seq;
1060 }
1061
1062 std::size_t valCount_;
1063 LedgerIndex ledgerSeq_;
1064 };
1065
1066 // Count the number of current, trusted validations
1068 for (auto const& v : validations)
1069 {
1070 valSeq& vs = count[v->getLedgerHash()];
1071 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1072 }
1073
1074 auto const neededValidations = getNeededValidations();
1075 auto maxSeq = mValidLedgerSeq.load();
1076 auto maxLedger = ledger->header().hash;
1077
1078 // Of the ledgers with sufficient validations,
1079 // find the one with the highest sequence
1080 for (auto& v : count)
1081 if (v.second.valCount_ > neededValidations)
1082 {
1083 // If we still don't know the sequence, get it
1084 if (v.second.ledgerSeq_ == 0)
1085 {
1086 if (auto l = getLedgerByHash(v.first))
1087 v.second.ledgerSeq_ = l->header().seq;
1088 }
1089
1090 if (v.second.ledgerSeq_ > maxSeq)
1091 {
1092 maxSeq = v.second.ledgerSeq_;
1093 maxLedger = v.first;
1094 }
1095 }
1096
1097 if (maxSeq > mValidLedgerSeq)
1098 {
1099 auto stream = app_.journal("LedgerConsensus").debug();
1100 JLOG(stream) << "Consensus triggered check of ledger";
1101 checkAccept(maxLedger, maxSeq);
1102 }
1103}
1104
1107{
1108 // Try to get the hash of a ledger we need to fetch for history
1110 auto const& l{mHistLedger};
1111
1112 if (l && l->header().seq >= index)
1113 {
1114 ret = hashOfSeq(*l, index, m_journal);
1115 if (!ret)
1116 ret = walkHashBySeq(index, l, reason);
1117 }
1118
1119 if (!ret)
1120 ret = walkHashBySeq(index, reason);
1121
1122 return ret;
1123}
1124
1127{
1129
1130 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1131
1132 // No valid ledger, nothing to do
1133 if (mValidLedger.empty())
1134 {
1135 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1136 return {};
1137 }
1138
1139 if (!mPubLedger)
1140 {
1141 JLOG(m_journal.info()) << "First published ledger will be " << mValidLedgerSeq;
1142 return {mValidLedger.get()};
1143 }
1144
1146 {
1147 JLOG(m_journal.warn()) << "Gap in validated ledger stream " << mPubLedgerSeq << " - " << mValidLedgerSeq - 1;
1148
1149 auto valLedger = mValidLedger.get();
1150 ret.push_back(valLedger);
1151 setPubLedger(valLedger);
1152 app_.getOrderBookDB().setup(valLedger);
1153
1154 return {valLedger};
1155 }
1156
1158 {
1159 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1160 return {};
1161 }
1162
1163 int acqCount = 0;
1164
1165 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1166 auto valLedger = mValidLedger.get();
1167 std::uint32_t valSeq = valLedger->header().seq;
1168
1169 scope_unlock sul{sl};
1170 try
1171 {
1172 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1173 {
1174 JLOG(m_journal.trace()) << "Trying to fetch/publish valid ledger " << seq;
1175
1177 // This can throw
1178 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1179 // VFALCO TODO Restructure this code so that zero is not
1180 // used.
1181 if (!hash)
1182 hash = beast::zero; // kludge
1183 if (seq == valSeq)
1184 {
1185 // We need to publish the ledger we just fully validated
1186 ledger = valLedger;
1187 }
1188 else if (hash->isZero())
1189 {
1190 // LCOV_EXCL_START
1191 JLOG(m_journal.fatal()) << "Ledger: " << valSeq << " does not have hash for " << seq;
1192 UNREACHABLE(
1193 "xrpl::LedgerMaster::findNewLedgersToPublish : ledger "
1194 "not found");
1195 // LCOV_EXCL_STOP
1196 }
1197 else
1198 {
1199 ledger = mLedgerHistory.getLedgerByHash(*hash);
1200 }
1201
1202 if (!app_.config().LEDGER_REPLAY)
1203 {
1204 // Can we try to acquire the ledger we need?
1205 if (!ledger && (++acqCount < ledger_fetch_size_))
1207 }
1208
1209 // Did we acquire the next ledger we need to publish?
1210 if (ledger && (ledger->header().seq == pubSeq))
1211 {
1212 ledger->setValidated();
1213 ret.push_back(ledger);
1214 ++pubSeq;
1215 }
1216 }
1217
1218 JLOG(m_journal.trace()) << "ready to publish " << ret.size() << " ledgers.";
1219 }
1220 catch (std::exception const& ex)
1221 {
1222 JLOG(m_journal.error()) << "Exception while trying to find ledgers to publish: " << ex.what();
1223 }
1224
1226 {
1227 /* Narrow down the gap of ledgers, and try to replay them.
1228 * When replaying a ledger gap, if the local node has
1229 * the start ledger, it saves an expensive InboundLedger
1230 * acquire. If the local node has the finish ledger, it
1231 * saves a skip list acquire.
1232 */
1233 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1234 auto finishLedger = valLedger;
1235 while (startLedger->seq() + 1 < finishLedger->seq())
1236 {
1237 if (auto const parent = mLedgerHistory.getLedgerByHash(finishLedger->header().parentHash); parent)
1238 {
1239 finishLedger = parent;
1240 }
1241 else
1242 {
1243 auto numberLedgers = finishLedger->seq() - startLedger->seq() + 1;
1244 JLOG(m_journal.debug()) << "Publish LedgerReplays " << numberLedgers
1245 << " ledgers, from seq=" << startLedger->header().seq << ", "
1246 << startLedger->header().hash << " to seq=" << finishLedger->header().seq
1247 << ", " << finishLedger->header().hash;
1249 InboundLedger::Reason::GENERIC, finishLedger->header().hash, numberLedgers);
1250 break;
1251 }
1252 }
1253 }
1254
1255 return ret;
1256}
1257
1258void
1260{
1262
1263 // Can't advance without at least one fully-valid ledger
1264 mAdvanceWork = true;
1266 {
1267 mAdvanceThread = true;
1268 app_.getJobQueue().addJob(jtADVANCE, "AdvanceLedger", [this]() {
1270
1271 XRPL_ASSERT(!mValidLedger.empty() && mAdvanceThread, "xrpl::LedgerMaster::tryAdvance : has valid ledger");
1272
1273 JLOG(m_journal.trace()) << "advanceThread<";
1274
1275 try
1276 {
1277 doAdvance(sl);
1278 }
1279 catch (std::exception const& ex)
1280 {
1281 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1282 }
1283
1284 mAdvanceThread = false;
1285 JLOG(m_journal.trace()) << "advanceThread>";
1286 });
1287 }
1288}
1289
1290void
1292{
1293 {
1296 {
1298 mPathLedger.reset();
1299 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1300 return;
1301 }
1302 }
1303
1304 while (!app_.getJobQueue().isStopping())
1305 {
1306 JLOG(m_journal.debug()) << "updatePaths running";
1308 {
1310
1311 if (!mValidLedger.empty() && (!mPathLedger || (mPathLedger->header().seq != mValidLedgerSeq)))
1312 { // We have a new valid ledger since the last full pathfinding
1314 lastLedger = mPathLedger;
1315 }
1316 else if (mPathFindNewRequest)
1317 { // We have a new request but no new ledger
1318 lastLedger = app_.openLedger().current();
1319 }
1320 else
1321 { // Nothing to do
1323 mPathLedger.reset();
1324 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1325 return;
1326 }
1327 }
1328
1329 if (!standalone_)
1330 { // don't pathfind with a ledger that's more than 60 seconds old
1331 using namespace std::chrono;
1332 auto age = time_point_cast<seconds>(app_.timeKeeper().closeTime()) - lastLedger->header().closeTime;
1333 if (age > 1min)
1334 {
1335 JLOG(m_journal.debug()) << "Published ledger too old for updating paths";
1338 mPathLedger.reset();
1339 return;
1340 }
1341 }
1342
1343 try
1344 {
1345 auto& pathRequests = app_.getPathRequests();
1346 {
1348 if (!pathRequests.requestsPending())
1349 {
1351 mPathLedger.reset();
1352 JLOG(m_journal.debug()) << "No path requests found. Nothing to do for updating "
1353 "paths. "
1354 << mPathFindThread << " jobs remaining";
1355 return;
1356 }
1357 }
1358 JLOG(m_journal.debug()) << "Updating paths";
1359 pathRequests.updateAll(lastLedger);
1360
1362 if (!pathRequests.requestsPending())
1363 {
1364 JLOG(m_journal.debug()) << "No path requests left. No need for further updating "
1365 "paths";
1367 mPathLedger.reset();
1368 return;
1369 }
1370 }
1371 catch (SHAMapMissingNode const& mn)
1372 {
1373 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1374 if (lastLedger->open())
1375 {
1376 // our parent is the problem
1378 lastLedger->header().parentHash, lastLedger->header().seq - 1, InboundLedger::Reason::GENERIC);
1379 }
1380 else
1381 {
1382 // this ledger is the problem
1384 lastLedger->header().hash, lastLedger->header().seq, InboundLedger::Reason::GENERIC);
1385 }
1386 }
1387 }
1388}
1389
1390bool
1392{
1394 mPathFindNewRequest = newPFWork("PthFindNewReq", ml);
1395 return mPathFindNewRequest;
1396}
1397
1398bool
1400{
1402 bool const ret = mPathFindNewRequest;
1403 mPathFindNewRequest = false;
1404 return ret;
1405}
1406
1407// If the order book is radically updated, we need to reprocess all
1408// pathfinding requests.
1409bool
1411{
1413 mPathLedger.reset();
1414
1415 return newPFWork("PthFindOBDB", ml);
1416}
1417
1420bool
1422{
1424 {
1425 JLOG(m_journal.debug()) << "newPFWork: Creating job. path find threads: " << mPathFindThread;
1426 if (app_.getJobQueue().addJob(jtUPDATE_PF, name, [this]() { updatePaths(); }))
1427 {
1429 }
1430 }
1431 // If we're stopping don't give callers the expectation that their
1432 // request will be fulfilled, even if it may be serviced.
1433 return mPathFindThread > 0 && !app_.isStopping();
1434}
1435
1438{
1439 return m_mutex;
1440}
1441
1442// The current ledger is the ledger we believe new transactions should go in
1448
1454
1455Rules
1457{
1458 // Once we have a guarantee that there's always a last validated
1459 // ledger then we can dispense with the if.
1460
1461 // Return the Rules from the last validated ledger.
1462 if (auto const ledger = getValidatedLedger())
1463 return ledger->rules();
1464
1465 return Rules(app_.config().features);
1466}
1467
1468// This is the last ledger we published to clients and can lag the validated
1469// ledger.
1476
1483
1486{
1487 uint256 hash = getHashBySeq(ledgerIndex);
1488 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex) : std::nullopt;
1489}
1490
1493{
1494 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1495 if (nodeObject && (nodeObject->getData().size() >= 120))
1496 {
1497 SerialIter it(nodeObject->getData().data(), nodeObject->getData().size());
1498 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1499 {
1500 it.skip(
1501 4 + 8 + 32 + // seq drops parentHash
1502 32 + 32 + 4); // txHash acctHash parentClose
1504 }
1505 }
1506
1507 return std::nullopt;
1508}
1509
1510uint256
1512{
1514
1515 if (hash.isNonZero())
1516 return hash;
1517
1519}
1520
1523{
1524 std::optional<LedgerHash> ledgerHash;
1525
1526 if (auto referenceLedger = mValidLedger.get())
1527 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1528
1529 return ledgerHash;
1530}
1531
1534 std::uint32_t index,
1535 std::shared_ptr<ReadView const> const& referenceLedger,
1536 InboundLedger::Reason reason)
1537{
1538 if (!referenceLedger || (referenceLedger->header().seq < index))
1539 {
1540 // Nothing we can do. No validated ledger.
1541 return std::nullopt;
1542 }
1543
1544 // See if the hash for the ledger we need is in the reference ledger
1545 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1546 if (ledgerHash)
1547 return ledgerHash;
1548
1549 // The hash is not in the reference ledger. Get another ledger which can
1550 // be located easily and should contain the hash.
1551 LedgerIndex refIndex = getCandidateLedger(index);
1552 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1553 XRPL_ASSERT(refHash, "xrpl::LedgerMaster::walkHashBySeq : found ledger");
1554 if (refHash)
1555 {
1556 // Try the hash and sequence of a better reference ledger just found
1557 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1558
1559 if (ledger)
1560 {
1561 try
1562 {
1563 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1564 }
1565 catch (SHAMapMissingNode const&)
1566 {
1567 ledger.reset();
1568 }
1569 }
1570
1571 // Try to acquire the complete ledger
1572 if (!ledger)
1573 {
1574 if (auto const l = app_.getInboundLedgers().acquire(*refHash, refIndex, reason))
1575 {
1576 ledgerHash = hashOfSeq(*l, index, m_journal);
1577 XRPL_ASSERT(
1578 ledgerHash,
1579 "xrpl::LedgerMaster::walkHashBySeq : has complete "
1580 "ledger");
1581 }
1582 }
1583 }
1584 return ledgerHash;
1585}
1586
1589{
1590 if (index <= mValidLedgerSeq)
1591 {
1592 // Always prefer a validated ledger
1593 if (auto valid = mValidLedger.get())
1594 {
1595 if (valid->header().seq == index)
1596 return valid;
1597
1598 try
1599 {
1600 auto const hash = hashOfSeq(*valid, index, m_journal);
1601
1602 if (hash)
1604 }
1605 catch (std::exception const&)
1606 {
1607 // Missing nodes are already handled
1608 }
1609 }
1610 }
1611
1612 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1613 return ret;
1614
1615 auto ret = mClosedLedger.get();
1616 if (ret && (ret->header().seq == index))
1617 return ret;
1618
1619 clearLedger(index);
1620 return {};
1621}
1622
1625{
1626 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1627 return ret;
1628
1629 auto ret = mClosedLedger.get();
1630 if (ret && (ret->header().hash == hash))
1631 return ret;
1632
1633 return {};
1634}
1635
1636void
1642
1643void
1645{
1647 fetch_packs_.sweep();
1648}
1649
1650float
1655
1656void
1658{
1660 if (seq > 0)
1661 mCompleteLedgers.erase(range(0u, seq - 1));
1662}
1663
1664void
1669
1670void
1672{
1673 replayData = std::move(replay);
1674}
1675
1678{
1679 return std::move(replayData);
1680}
1681
1682void
1684 std::uint32_t missing,
1685 bool& progress,
1686 InboundLedger::Reason reason,
1688{
1689 scope_unlock sul{sl};
1690 if (auto hash = getLedgerHashForHistory(missing, reason))
1691 {
1692 XRPL_ASSERT(hash->isNonZero(), "xrpl::LedgerMaster::fetchForHistory : found ledger");
1693 auto ledger = getLedgerByHash(*hash);
1694 if (!ledger)
1695 {
1697 {
1698 ledger = app_.getInboundLedgers().acquire(*hash, missing, reason);
1699 if (!ledger && missing != fetch_seq_ && missing > app_.getNodeStore().earliestLedgerSeq())
1700 {
1701 JLOG(m_journal.trace()) << "fetchForHistory want fetch pack " << missing;
1702 fetch_seq_ = missing;
1703 getFetchPack(missing, reason);
1704 }
1705 else
1706 JLOG(m_journal.trace()) << "fetchForHistory no fetch pack for " << missing;
1707 }
1708 else
1709 JLOG(m_journal.debug()) << "fetchForHistory found failed acquire";
1710 }
1711 if (ledger)
1712 {
1713 auto seq = ledger->header().seq;
1714 XRPL_ASSERT(seq == missing, "xrpl::LedgerMaster::fetchForHistory : sequence match");
1715 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1716 setFullLedger(ledger, false, false);
1717 int fillInProgress;
1718 {
1720 mHistLedger = ledger;
1721 fillInProgress = mFillInProgress;
1722 }
1723 if (fillInProgress == 0 &&
1724 app_.getRelationalDatabase().getHashByIndex(seq - 1) == ledger->header().parentHash)
1725 {
1726 {
1727 // Previous ledger is in DB
1729 mFillInProgress = seq;
1730 }
1731 app_.getJobQueue().addJob(jtADVANCE, "TryFill", [this, ledger]() { tryFill(ledger); });
1732 }
1733 progress = true;
1734 }
1735 else
1736 {
1737 std::uint32_t fetchSz;
1738 // Do not fetch ledger sequences lower
1739 // than the earliest ledger sequence
1740 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1741 fetchSz = missing >= fetchSz ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1) : 0;
1742 try
1743 {
1744 for (std::uint32_t i = 0; i < fetchSz; ++i)
1745 {
1746 std::uint32_t seq = missing - i;
1747 if (auto h = getLedgerHashForHistory(seq, reason))
1748 {
1749 XRPL_ASSERT(
1750 h->isNonZero(),
1751 "xrpl::LedgerMaster::fetchForHistory : "
1752 "prefetched ledger");
1753 app_.getInboundLedgers().acquire(*h, seq, reason);
1754 }
1755 }
1756 }
1757 catch (std::exception const& ex)
1758 {
1759 JLOG(m_journal.warn()) << "Threw while prefetching: " << ex.what();
1760 }
1761 }
1762 }
1763 else
1764 {
1765 JLOG(m_journal.fatal()) << "Can't find ledger following prevMissing " << missing;
1766 JLOG(m_journal.fatal()) << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1767 JLOG(m_journal.fatal()) << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1768 JLOG(m_journal.fatal()) << "Acquire reason: "
1769 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY" : "NOT HISTORY");
1770 clearLedger(missing + 1);
1771 progress = true;
1772 }
1773}
1774
1775// Try to publish ledgers, acquire missing ledgers
1776void
1778{
1779 do
1780 {
1781 mAdvanceWork = false; // If there's work to do, we'll make progress
1782 bool progress = false;
1783
1784 auto const pubLedgers = findNewLedgersToPublish(sl);
1785 if (pubLedgers.empty())
1786 {
1791 {
1792 // We are in sync, so can acquire
1795 {
1797 missing = prevMissing(
1799 }
1800 if (missing)
1801 {
1802 JLOG(m_journal.trace()) << "tryAdvance discovered missing " << *missing;
1803 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1808 *missing,
1809 m_journal))
1810 {
1811 JLOG(m_journal.trace()) << "advanceThread should acquire";
1812 }
1813 else
1814 missing = std::nullopt;
1815 }
1816 if (missing)
1817 {
1818 fetchForHistory(*missing, progress, reason, sl);
1820 {
1821 JLOG(m_journal.debug()) << "tryAdvance found last valid changed";
1822 progress = true;
1823 }
1824 }
1825 }
1826 else
1827 {
1828 mHistLedger.reset();
1829 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1830 }
1831 }
1832 else
1833 {
1834 JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size() << " ledgers to publish";
1835 for (auto const& ledger : pubLedgers)
1836 {
1837 {
1838 scope_unlock sul{sl};
1839 JLOG(m_journal.debug()) << "tryAdvance publishing seq " << ledger->header().seq;
1840 setFullLedger(ledger, true, true);
1841 }
1842
1843 setPubLedger(ledger);
1844
1845 {
1846 scope_unlock sul{sl};
1847 app_.getOPs().pubLedger(ledger);
1848 }
1849 }
1850
1852 progress = newPFWork("PthFindNewLed", sl);
1853 }
1854 if (progress)
1855 mAdvanceWork = true;
1856 } while (mAdvanceWork);
1857}
1858
1859void
1861{
1862 fetch_packs_.canonicalize_replace_client(hash, data);
1863}
1864
1867{
1868 Blob data;
1869 if (fetch_packs_.retrieve(hash, data))
1870 {
1871 fetch_packs_.del(hash, false);
1872 if (hash == sha512Half(makeSlice(data)))
1873 return data;
1874 }
1875 return std::nullopt;
1876}
1877
1878void
1889
1915static void
1917 SHAMap const& want,
1918 SHAMap const* have,
1919 std::uint32_t cnt,
1920 protocol::TMGetObjectByHash* into,
1921 std::uint32_t seq,
1922 bool withLeaves = true)
1923{
1924 XRPL_ASSERT(cnt, "xrpl::populateFetchPack : nonzero count input");
1925
1926 Serializer s(1024);
1927
1928 want.visitDifferences(have, [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
1929 if (!withLeaves && n.isLeaf())
1930 return true;
1931
1932 s.erase();
1934
1935 auto const& hash = n.getHash().as_uint256();
1936
1937 protocol::TMIndexedObject* obj = into->add_objects();
1938 obj->set_ledgerseq(seq);
1939 obj->set_hash(hash.data(), hash.size());
1940 obj->set_data(s.getDataPtr(), s.getLength());
1941
1942 return --cnt != 0;
1943 });
1944}
1945
1946void
1948 std::weak_ptr<Peer> const& wPeer,
1950 uint256 haveLedgerHash,
1952{
1953 using namespace std::chrono_literals;
1954 if (UptimeClock::now() > uptime + 1s)
1955 {
1956 JLOG(m_journal.info()) << "Fetch pack request got stale";
1957 return;
1958 }
1959
1961 {
1962 JLOG(m_journal.info()) << "Too busy to make fetch pack";
1963 return;
1964 }
1965
1966 auto peer = wPeer.lock();
1967
1968 if (!peer)
1969 return;
1970
1971 auto have = getLedgerByHash(haveLedgerHash);
1972
1973 if (!have)
1974 {
1975 JLOG(m_journal.info()) << "Peer requests fetch pack for ledger we don't have: " << have;
1976 peer->charge(Resource::feeRequestNoReply, "get_object ledger");
1977 return;
1978 }
1979
1980 if (have->open())
1981 {
1982 JLOG(m_journal.warn()) << "Peer requests fetch pack from open ledger: " << have;
1983 peer->charge(Resource::feeMalformedRequest, "get_object ledger open");
1984 return;
1985 }
1986
1987 if (have->header().seq < getEarliestFetch())
1988 {
1989 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
1990 peer->charge(Resource::feeMalformedRequest, "get_object ledger early");
1991 return;
1992 }
1993
1994 auto want = getLedgerByHash(have->header().parentHash);
1995
1996 if (!want)
1997 {
1998 JLOG(m_journal.info()) << "Peer requests fetch pack for ledger whose predecessor we "
1999 << "don't have: " << have;
2000 peer->charge(Resource::feeRequestNoReply, "get_object ledger no parent");
2001 return;
2002 }
2003
2004 try
2005 {
2006 Serializer hdr(128);
2007
2008 protocol::TMGetObjectByHash reply;
2009 reply.set_query(false);
2010
2011 if (request->has_seq())
2012 reply.set_seq(request->seq());
2013
2014 reply.set_ledgerhash(request->ledgerhash());
2015 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2016
2017 // Building a fetch pack:
2018 // 1. Add the header for the requested ledger.
2019 // 2. Add the nodes for the AccountStateMap of that ledger.
2020 // 3. If there are transactions, add the nodes for the
2021 // transactions of the ledger.
2022 // 4. If the FetchPack now contains at least 512 entries then stop.
2023 // 5. If not very much time has elapsed, then loop back and repeat
2024 // the same process adding the previous ledger to the FetchPack.
2025 do
2026 {
2027 std::uint32_t lSeq = want->header().seq;
2028
2029 {
2030 // Serialize the ledger header:
2031 hdr.erase();
2032
2034 addRaw(want->header(), hdr);
2035
2036 // Add the data
2037 protocol::TMIndexedObject* obj = reply.add_objects();
2038 obj->set_hash(want->header().hash.data(), want->header().hash.size());
2039 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2040 obj->set_ledgerseq(lSeq);
2041 }
2042
2043 populateFetchPack(want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2044
2045 // We use nullptr here because transaction maps are per ledger
2046 // and so the requestor is unlikely to already have it.
2047 if (want->header().txHash.isNonZero())
2048 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2049
2050 if (reply.objects().size() >= 512)
2051 break;
2052
2053 have = std::move(want);
2054 want = getLedgerByHash(have->header().parentHash);
2055 } while (want && UptimeClock::now() <= uptime + 1s);
2056
2057 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2058
2059 JLOG(m_journal.info()) << "Built fetch pack with " << reply.objects().size() << " nodes ("
2060 << msg->getBufferSize() << " bytes)";
2061
2062 peer->send(msg);
2063 }
2064 catch (std::exception const& ex)
2065 {
2066 JLOG(m_journal.warn()) << "Exception building fetch pack. Exception: " << ex.what();
2067 }
2068}
2069
2072{
2073 return fetch_packs_.getCacheSize();
2074}
2075
2076// Returns the minimum ledger sequence in SQL database, if any.
2082
2085{
2086 uint32_t first = 0, last = 0;
2087
2088 if (!getValidatedRange(first, last) || last < ledgerSeq)
2089 return {};
2090
2091 auto const lgr = getLedgerBySeq(ledgerSeq);
2092 if (!lgr || lgr->txs.empty())
2093 return {};
2094
2095 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2096 if (it->first && it->second && it->second->isFieldPresent(sfTransactionIndex) &&
2097 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2098 return it->first->getTransactionID();
2099
2100 return {};
2101}
2102
2103} // namespace xrpl
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition json_value.h:131
Provide a light-weight way to check active() before string formatting.
Definition Journal.h:181
A generic endpoint for log messages.
Definition Journal.h:41
Stream fatal() const
Definition Journal.h:325
Stream error() const
Definition Journal.h:319
Stream debug() const
Definition Journal.h:301
Stream info() const
Definition Journal.h:307
Stream trace() const
Severity stream access functions.
Definition Journal.h:295
Stream warn() const
Definition Journal.h:313
typename Clock::time_point time_point
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual PendingSaves & pendingSaves()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
virtual LedgerReplayer & getLedgerReplayer()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual TimeKeeper & timeKeeper()=0
virtual PathRequests & getPathRequests()=0
virtual OpenLedger & openLedger()=0
virtual RCLValidations & getValidations()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual SHAMapStore & getSHAMapStore()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual Overlay & overlay()=0
virtual JobQueue & getJobQueue()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual bool isStopping() const =0
virtual ValidatorList & validators()=0
virtual NetworkOPs & getOPs()=0
Holds transactions which were deferred to the next pass of consensus.
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
std::unordered_set< uint256, beast::uhash<> > features
Definition Config.h:257
bool LEDGER_REPLAY
Definition Config.h:204
virtual bool isFailure(uint256 const &h)=0
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:146
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition JobQueue.cpp:106
bool isStopping() const
Definition JobQueue.h:209
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
void sweep()
Remove stale cache entries.
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
void set(std::shared_ptr< Ledger const > ledger)
std::shared_ptr< Ledger const > get()
std::optional< LedgerIndex > minSqlSeq()
std::atomic< LedgerIndex > mValidLedgerSeq
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
bool haveLedger(std::uint32_t seq)
TaggedCache< uint256, Blob > fetch_packs_
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
void setValidLedger(std::shared_ptr< Ledger const > const &l)
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
std::recursive_mutex & peekMutex()
std::uint32_t fetch_seq_
std::chrono::seconds getValidatedLedgerAge()
TimeKeeper::time_point upgradeWarningPrevTime_
LedgerIndex getCurrentLedgerIndex()
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
bool storeLedger(std::shared_ptr< Ledger const > ledger)
void gotFetchPack(bool progress, std::uint32_t seq)
void tryFill(std::shared_ptr< Ledger const > ledger)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
bool newPFWork(char const *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
void clearPriorLedgers(LedgerIndex seq)
LedgerIndex const max_ledger_difference_
CanonicalTXSet mHeldTransactions
std::uint32_t const ledger_fetch_size_
void setBuildingLedger(LedgerIndex index)
std::pair< uint256, LedgerIndex > mLastValidLedger
bool isCaughtUp(std::string &reason)
std::size_t getFetchPackCacheSize() const
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the corresponding hash from peers.
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
std::atomic< std::uint32_t > mPubLedgerClose
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::uint32_t const fetch_depth_
std::atomic< LedgerIndex > mPubLedgerSeq
void clearLedger(std::uint32_t seq)
void clearLedgerCachePrior(LedgerIndex seq)
std::atomic_flag mGotFetchPackThread
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< Ledger const > getClosedLedger()
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
std::string getCompleteLedgers()
std::shared_ptr< Ledger const > getValidatedLedger()
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::uint32_t const ledger_history_
bool isValidated(ReadView const &ledger)
void fixMismatch(ReadView const &ledger)
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
LedgerIndex getValidLedgerIndex()
std::shared_ptr< Ledger const > mPathLedger
bool const standalone_
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > mHistLedger
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
LedgerHistory mLedgerHistory
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
std::chrono::seconds getPublishedLedgerAge()
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
RangeSet< std::uint32_t > mCompleteLedgers
LedgerHolder mValidLedger
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
LedgerHolder mClosedLedger
void doAdvance(std::unique_lock< std::recursive_mutex > &)
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
std::shared_ptr< ReadView const > getCurrentLedger()
beast::Journal m_journal
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::unique_ptr< LedgerReplay > replayData
std::unique_ptr< LedgerReplay > releaseReplay()
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
std::shared_ptr< Ledger const > mPubLedger
void failedSave(std::uint32_t seq, uint256 const &hash)
Application & app_
std::atomic< LedgerIndex > mBuildingLedgerSeq
std::recursive_mutex mCompleteLock
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
bool isLoadedLocal() const
std::uint32_t getLoadBase() const
virtual void setAmendmentBlocked()=0
virtual bool isBlocked()=0
virtual void processTransactionSet(CanonicalTXSet const &set)=0
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual bool isAmendmentWarned()=0
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
virtual bool isNeedNetworkLedger()=0
virtual void clearNeedNetworkLedger()=0
virtual void setAmendmentWarned()=0
Persistency layer for NodeObject.
Definition Database.h:32
std::uint32_t earliestLedgerSeq() const noexcept
Definition Database.h:194
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition Database.cpp:202
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
void setup(std::shared_ptr< ReadView const > const &ledger)
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
bool requestsPending() const
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
A view into a ledger.
Definition ReadView.h:32
virtual LedgerHeader const & header() const =0
Returns information about the ledger.
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
Rules controlling protocol behavior.
Definition Rules.h:19
uint256 const & as_uint256() const
Definition SHAMapHash.h:25
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition SHAMap.h:78
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
void skip(int num)
std::uint32_t get32()
void const * getDataPtr() const
Definition Serializer.h:198
int getLength() const
Definition Serializer.h:208
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:44
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:56
static time_point now()
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
std::size_t quorum() const
Get quorum value for current trusted key set.
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
bool isNonZero() const
Definition base_uint.h:514
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:198
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T is_same_v
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
STL namespace.
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition RangeSet.h:164
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition digest.h:205
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition chrono.h:94
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition RangeSet.h:35
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:598
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
bool pendSaveValidated(Application &app, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger Returns false on error.
Definition Ledger.cpp:924
SizedItem
Definition Config.h:25
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, char const *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition View.cpp:735
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition View.h:400
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition View.cpp:878
@ jtLEDGER_DATA
Definition Job.h:46
@ jtUPDATE_PF
Definition Job.h:36
@ jtPUBOLDLEDGER
Definition Job.h:24
@ jtADVANCE
Definition Job.h:47
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
static constexpr int MAX_LEDGER_GAP
@ ledgerMaster
ledger master data for signing
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition Slice.h:214
static constexpr int MAX_WRITE_LOAD_ACQUIRE
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T swap(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)