rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1#include <xrpld/app/consensus/RCLValidations.h>
2#include <xrpld/app/ledger/Ledger.h>
3#include <xrpld/app/ledger/LedgerMaster.h>
4#include <xrpld/app/ledger/LedgerReplayer.h>
5#include <xrpld/app/ledger/OpenLedger.h>
6#include <xrpld/app/ledger/OrderBookDB.h>
7#include <xrpld/app/ledger/PendingSaves.h>
8#include <xrpld/app/main/Application.h>
9#include <xrpld/app/misc/AmendmentTable.h>
10#include <xrpld/app/misc/LoadFeeTrack.h>
11#include <xrpld/app/misc/NetworkOPs.h>
12#include <xrpld/app/misc/SHAMapStore.h>
13#include <xrpld/app/misc/Transaction.h>
14#include <xrpld/app/misc/TxQ.h>
15#include <xrpld/app/misc/ValidatorList.h>
16#include <xrpld/app/paths/PathRequests.h>
17#include <xrpld/app/rdb/RelationalDatabase.h>
18#include <xrpld/core/TimeKeeper.h>
19#include <xrpld/overlay/Overlay.h>
20#include <xrpld/overlay/Peer.h>
21
22#include <xrpl/basics/Log.h>
23#include <xrpl/basics/MathUtilities.h>
24#include <xrpl/basics/UptimeClock.h>
25#include <xrpl/basics/contract.h>
26#include <xrpl/basics/safe_cast.h>
27#include <xrpl/basics/scope.h>
28#include <xrpl/beast/utility/instrumentation.h>
29#include <xrpl/protocol/BuildInfo.h>
30#include <xrpl/protocol/HashPrefix.h>
31#include <xrpl/protocol/digest.h>
32#include <xrpl/resource/Fees.h>
33
34#include <algorithm>
35#include <chrono>
36#include <cstdlib>
37#include <memory>
38#include <vector>
39
40namespace ripple {
41
42// Don't catch up more than 100 ledgers (cannot exceed 256)
43static constexpr int MAX_LEDGER_GAP{100};
44
45// Don't acquire history if ledger is too old
47
48// Don't acquire history if write load is too high
49static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
50
51// Helper function for LedgerMaster::doAdvance()
52// Return true if candidateLedger should be fetched from the network.
53static bool
55 std::uint32_t const currentLedger,
56 std::uint32_t const ledgerHistory,
57 std::optional<LedgerIndex> const minimumOnline,
58 std::uint32_t const candidateLedger,
60{
61 bool const ret = [&]() {
62 // Fetch ledger if it may be the current ledger
63 if (candidateLedger >= currentLedger)
64 return true;
65
66 // Or if it is within our configured history range:
67 if (currentLedger - candidateLedger <= ledgerHistory)
68 return true;
69
70 // Or if greater than or equal to a specific minimum ledger.
71 // Do nothing if the minimum ledger to keep online is unknown.
72 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
73 }();
74
75 JLOG(j.trace()) << "Missing ledger " << candidateLedger
76 << (ret ? " should" : " should NOT") << " be acquired";
77 return ret;
78}
79
81 Application& app,
83 beast::insight::Collector::ptr const& collector,
84 beast::Journal journal)
85 : app_(app)
86 , m_journal(journal)
87 , mLedgerHistory(collector, app)
88 , standalone_(app_.config().standalone())
89 , fetch_depth_(
90 app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
91 , ledger_history_(app_.config().LEDGER_HISTORY)
92 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
93 , fetch_packs_(
94 "FetchPack",
95 65536,
96 std::chrono::seconds{45},
98 app_.journal("TaggedCache"))
99 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
100{
101}
102
105{
106 return app_.openLedger().current()->info().seq;
107}
108
114
115bool
117 ReadView const& view,
119 char const* reason)
120{
121 auto validLedger = getValidatedLedger();
122
123 if (validLedger && !areCompatible(*validLedger, view, s, reason))
124 {
125 return false;
126 }
127
128 {
130
131 if ((mLastValidLedger.second != 0) &&
133 mLastValidLedger.first,
134 mLastValidLedger.second,
135 view,
136 s,
137 reason))
138 {
139 return false;
140 }
141 }
142
143 return true;
144}
145
148{
149 using namespace std::chrono_literals;
151 if (pubClose == 0s)
152 {
153 JLOG(m_journal.debug()) << "No published ledger";
154 return weeks{2};
155 }
156
157 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
158 ret -= pubClose;
159 ret = (ret > 0s) ? ret : 0s;
160 static std::chrono::seconds lastRet = -1s;
161
162 if (ret != lastRet)
163 {
164 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
165 lastRet = ret;
166 }
167 return ret;
168}
169
172{
173 using namespace std::chrono_literals;
174
176 if (valClose == 0s)
177 {
178 JLOG(m_journal.debug()) << "No validated ledger";
179 return weeks{2};
180 }
181
182 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
183 ret -= valClose;
184 ret = (ret > 0s) ? ret : 0s;
185 static std::chrono::seconds lastRet = -1s;
186
187 if (ret != lastRet)
188 {
189 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
190 lastRet = ret;
191 }
192 return ret;
193}
194
195bool
197{
198 using namespace std::chrono_literals;
199
200 if (getPublishedLedgerAge() > 3min)
201 {
202 reason = "No recently-published ledger";
203 return false;
204 }
205 std::uint32_t validClose = mValidLedgerSign.load();
207 if (!validClose || !pubClose)
208 {
209 reason = "No published ledger";
210 return false;
211 }
212 if (validClose > (pubClose + 90))
213 {
214 reason = "Published ledger lags validated ledger";
215 return false;
216 }
217 return true;
218}
219
220void
222{
224 std::optional<uint256> consensusHash;
225
226 if (!standalone_)
227 {
228 auto validations = app_.validators().negativeUNLFilter(
230 l->info().hash, l->info().seq));
231 times.reserve(validations.size());
232 for (auto const& val : validations)
233 times.push_back(val->getSignTime());
234
235 if (!validations.empty())
236 consensusHash = validations.front()->getConsensusHash();
237 }
238
239 NetClock::time_point signTime;
240
241 if (!times.empty() && times.size() >= app_.validators().quorum())
242 {
243 // Calculate the sample median
244 std::sort(times.begin(), times.end());
245 auto const t0 = times[(times.size() - 1) / 2];
246 auto const t1 = times[times.size() / 2];
247 signTime = t0 + (t1 - t0) / 2;
248 }
249 else
250 {
251 signTime = l->info().closeTime;
252 }
253
254 mValidLedger.set(l);
255 mValidLedgerSign = signTime.time_since_epoch().count();
256 XRPL_ASSERT(
258 l->info().seq + max_ledger_difference_ >
260 "ripple::LedgerMaster::setValidLedger : valid ledger sequence");
262 mValidLedgerSeq = l->info().seq;
263
266 mLedgerHistory.validatedLedger(l, consensusHash);
268 if (!app_.getOPs().isBlocked())
269 {
271 {
272 JLOG(m_journal.error()) << "One or more unsupported amendments "
273 "activated: server blocked.";
275 }
276 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
277 {
278 // Amendments can lose majority, so re-check periodically (every
279 // flag ledger), and clear the flag if appropriate. If an unknown
280 // amendment gains majority log a warning as soon as it's
281 // discovered, then again every flag ledger until the operator
282 // upgrades, the amendment loses majority, or the amendment goes
283 // live and the node gets blocked. Unlike being amendment blocked,
284 // this message may be logged more than once per session, because
285 // the node will otherwise function normally, and this gives
286 // operators an opportunity to see and resolve the warning.
287 if (auto const first =
289 {
290 JLOG(m_journal.error()) << "One or more unsupported amendments "
291 "reached majority. Upgrade before "
292 << to_string(*first)
293 << " to prevent your server from "
294 "becoming amendment blocked.";
296 }
297 else
299 }
300 }
301}
302
303void
305{
306 mPubLedger = l;
307 mPubLedgerClose = l->info().closeTime.time_since_epoch().count();
308 mPubLedgerSeq = l->info().seq;
309}
310
311void
313 std::shared_ptr<Transaction> const& transaction)
314{
316 mHeldTransactions.insert(transaction->getSTransaction());
317}
318
319// Validate a ledger's close time and sequence number if we're considering
320// jumping to that ledger. This helps defend against some rare hostile or
321// diverged majority scenarios.
322bool
324{
325 XRPL_ASSERT(ledger, "ripple::LedgerMaster::canBeCurrent : non-null input");
326
327 // Never jump to a candidate ledger that precedes our
328 // last validated ledger
329
330 auto validLedger = getValidatedLedger();
331 if (validLedger && (ledger->info().seq < validLedger->info().seq))
332 {
333 JLOG(m_journal.trace())
334 << "Candidate for current ledger has low seq " << ledger->info().seq
335 << " < " << validLedger->info().seq;
336 return false;
337 }
338
339 // Ensure this ledger's parent close time is within five minutes of
340 // our current time. If we already have a known fully-valid ledger
341 // we perform this check. Otherwise, we only do it if we've built a
342 // few ledgers as our clock can be off when we first start up
343
344 auto closeTime = app_.timeKeeper().closeTime();
345 auto ledgerClose = ledger->info().parentCloseTime;
346
347 using namespace std::chrono_literals;
348 if ((validLedger || (ledger->info().seq > 10)) &&
349 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) >
350 5min))
351 {
352 JLOG(m_journal.warn())
353 << "Candidate for current ledger has close time "
354 << to_string(ledgerClose) << " at network time "
355 << to_string(closeTime) << " seq " << ledger->info().seq;
356 return false;
357 }
358
359 if (validLedger)
360 {
361 // Sequence number must not be too high. We allow ten ledgers
362 // for time inaccuracies plus a maximum run rate of one ledger
363 // every two seconds. The goal is to prevent a malicious ledger
364 // from increasing our sequence unreasonably high
365
366 LedgerIndex maxSeq = validLedger->info().seq + 10;
367
368 if (closeTime > validLedger->info().parentCloseTime)
369 maxSeq += std::chrono::duration_cast<std::chrono::seconds>(
370 closeTime - validLedger->info().parentCloseTime)
371 .count() /
372 2;
373
374 if (ledger->info().seq > maxSeq)
375 {
376 JLOG(m_journal.warn())
377 << "Candidate for current ledger has high seq "
378 << ledger->info().seq << " > " << maxSeq;
379 return false;
380 }
381
382 JLOG(m_journal.trace())
383 << "Acceptable seq range: " << validLedger->info().seq
384 << " <= " << ledger->info().seq << " <= " << maxSeq;
385 }
386
387 return true;
388}
389
390void
392{
393 XRPL_ASSERT(lastClosed, "ripple::LedgerMaster::switchLCL : non-null input");
394 if (!lastClosed->isImmutable())
395 LogicError("mutable ledger in switchLCL");
396
397 if (lastClosed->open())
398 LogicError("The new last closed ledger is open!");
399
400 {
402 mClosedLedger.set(lastClosed);
403 }
404
405 if (standalone_)
406 {
407 setFullLedger(lastClosed, true, false);
408 tryAdvance();
409 }
410 else
411 {
412 checkAccept(lastClosed);
413 }
414}
415
416bool
417LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
418{
419 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
420}
421
422bool
424{
425 bool validated = ledger->info().validated;
426 // Returns true if we already had the ledger
427 return mLedgerHistory.insert(std::move(ledger), validated);
428}
429
435void
437{
438 CanonicalTXSet const set = [this]() {
440 // VFALCO NOTE The hash for an open ledger is undefined so we use
441 // something that is a reasonable substitute.
442 CanonicalTXSet set(app_.openLedger().current()->info().parentHash);
444 return set;
445 }();
446
447 if (!set.empty())
449}
450
458
459void
464
465bool
467{
469 return boost::icl::contains(mCompleteLedgers, seq);
470}
471
472void
478
479bool
481{
482 if (ledger.open())
483 return false;
484
485 if (ledger.info().validated)
486 return true;
487
488 auto const seq = ledger.info().seq;
489 try
490 {
491 // Use the skip list in the last validated ledger to see if ledger
492 // comes before the last validated ledger (and thus has been
493 // validated).
494 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
495
496 if (!hash || ledger.info().hash != *hash)
497 {
498 // This ledger's hash is not the hash of the validated ledger
499 if (hash)
500 {
501 XRPL_ASSERT(
502 hash->isNonZero(),
503 "ripple::LedgerMaster::isValidated : nonzero hash");
504 uint256 valHash =
506 if (valHash == ledger.info().hash)
507 {
508 // SQL database doesn't match ledger chain
509 clearLedger(seq);
510 }
511 }
512 return false;
513 }
514 }
515 catch (SHAMapMissingNode const& mn)
516 {
517 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
518 return false;
519 }
520
521 // Mark ledger as validated to save time if we see it again.
522 ledger.info().validated = true;
523 return true;
524}
525
526// returns Ledgers we have all the nodes for
527bool
529 std::uint32_t& minVal,
530 std::uint32_t& maxVal)
531{
532 // Validated ledger is likely not stored in the DB yet so we use the
533 // published ledger which is.
534 maxVal = mPubLedgerSeq.load();
535
536 if (!maxVal)
537 return false;
538
540 {
542 maybeMin = prevMissing(mCompleteLedgers, maxVal);
543 }
544
545 if (maybeMin == std::nullopt)
546 minVal = maxVal;
547 else
548 minVal = 1 + *maybeMin;
549
550 return true;
551}
552
553// Returns Ledgers we have all the nodes for and are indexed
554bool
556{
557 if (!getFullValidatedRange(minVal, maxVal))
558 return false;
559
560 // Remove from the validated range any ledger sequences that may not be
561 // fully updated in the database yet
562
563 auto const pendingSaves = app_.pendingSaves().getSnapshot();
564
565 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
566 {
567 // Ensure we shrink the tips as much as possible. If we have 7-9 and
568 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
569 // because then we'll have nothing when we could have 7.
570 while (pendingSaves.count(maxVal) > 0)
571 --maxVal;
572 while (pendingSaves.count(minVal) > 0)
573 ++minVal;
574
575 // Best effort for remaining exclusions
576 for (auto v : pendingSaves)
577 {
578 if ((v.first >= minVal) && (v.first <= maxVal))
579 {
580 if (v.first > ((minVal + maxVal) / 2))
581 maxVal = v.first - 1;
582 else
583 minVal = v.first + 1;
584 }
585 }
586
587 if (minVal > maxVal)
588 minVal = maxVal = 0;
589 }
590
591 return true;
592}
593
594// Get the earliest ledger we will let peers fetch
597{
598 // The earliest ledger we will let people fetch is ledger zero,
599 // unless that creates a larger range than allowed
600 std::uint32_t e = getClosedLedger()->info().seq;
601
602 if (e > fetch_depth_)
603 e -= fetch_depth_;
604 else
605 e = 0;
606 return e;
607}
608
609void
611{
612 std::uint32_t seq = ledger->info().seq;
613 uint256 prevHash = ledger->info().parentHash;
614
616
617 std::uint32_t minHas = seq;
618 std::uint32_t maxHas = seq;
619
621 while (!app_.getJobQueue().isStopping() && seq > 0)
622 {
623 {
625 minHas = seq;
626 --seq;
627
628 if (haveLedger(seq))
629 break;
630 }
631
632 auto it(ledgerHashes.find(seq));
633
634 if (it == ledgerHashes.end())
635 {
636 if (app_.isStopping())
637 return;
638
639 {
641 mCompleteLedgers.insert(range(minHas, maxHas));
642 }
643 maxHas = minHas;
645 (seq < 500) ? 0 : (seq - 499), seq);
646 it = ledgerHashes.find(seq);
647
648 if (it == ledgerHashes.end())
649 break;
650
651 if (!nodeStore.fetchNodeObject(
652 ledgerHashes.begin()->second.ledgerHash,
653 ledgerHashes.begin()->first))
654 {
655 // The ledger is not backed by the node store
656 JLOG(m_journal.warn()) << "SQL DB ledger sequence " << seq
657 << " mismatches node store";
658 break;
659 }
660 }
661
662 if (it->second.ledgerHash != prevHash)
663 break;
664
665 prevHash = it->second.parentHash;
666 }
667
668 {
670 mCompleteLedgers.insert(range(minHas, maxHas));
671 }
672 {
674 mFillInProgress = 0;
675 tryAdvance();
676 }
677}
678
681void
683{
684 LedgerIndex const ledgerIndex = missing + 1;
685
686 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
687 if (!haveHash || haveHash->isZero())
688 {
689 JLOG(m_journal.error())
690 << "No hash for fetch pack. Missing Index " << missing;
691 return;
692 }
693
694 // Select target Peer based on highest score. The score is randomized
695 // but biased in favor of Peers with low latency.
697 {
698 int maxScore = 0;
699 auto peerList = app_.overlay().getActivePeers();
700 for (auto const& peer : peerList)
701 {
702 if (peer->hasRange(missing, missing + 1))
703 {
704 int score = peer->getScore(true);
705 if (!target || (score > maxScore))
706 {
707 target = peer;
708 maxScore = score;
709 }
710 }
711 }
712 }
713
714 if (target)
715 {
716 protocol::TMGetObjectByHash tmBH;
717 tmBH.set_query(true);
718 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
719 tmBH.set_ledgerhash(haveHash->begin(), 32);
720 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
721
722 target->send(packet);
723 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
724 }
725 else
726 JLOG(m_journal.debug()) << "No peer for fetch pack";
727}
728
729void
731{
732 int invalidate = 0;
734
735 for (std::uint32_t lSeq = ledger.info().seq - 1; lSeq > 0; --lSeq)
736 {
737 if (haveLedger(lSeq))
738 {
739 try
740 {
741 hash = hashOfSeq(ledger, lSeq, m_journal);
742 }
743 catch (std::exception const& ex)
744 {
745 JLOG(m_journal.warn())
746 << "fixMismatch encounters partial ledger. Exception: "
747 << ex.what();
748 clearLedger(lSeq);
749 return;
750 }
751
752 if (hash)
753 {
754 // try to close the seam
755 auto otherLedger = getLedgerBySeq(lSeq);
756
757 if (otherLedger && (otherLedger->info().hash == *hash))
758 {
759 // we closed the seam
760 if (invalidate != 0)
761 {
762 JLOG(m_journal.warn())
763 << "Match at " << lSeq << ", " << invalidate
764 << " prior ledgers invalidated";
765 }
766
767 return;
768 }
769 }
770
771 clearLedger(lSeq);
772 ++invalidate;
773 }
774 }
775
776 // all prior ledgers invalidated
777 if (invalidate != 0)
778 {
779 JLOG(m_journal.warn())
780 << "All " << invalidate << " prior ledgers invalidated";
781 }
782}
783
784void
786 std::shared_ptr<Ledger const> const& ledger,
787 bool isSynchronous,
788 bool isCurrent)
789{
790 // A new ledger has been accepted as part of the trusted chain
791 JLOG(m_journal.debug()) << "Ledger " << ledger->info().seq
792 << " accepted :" << ledger->info().hash;
793 XRPL_ASSERT(
794 ledger->stateMap().getHash().isNonZero(),
795 "ripple::LedgerMaster::setFullLedger : nonzero ledger state hash");
796
797 ledger->setValidated();
798 ledger->setFull();
799
800 if (isCurrent)
801 mLedgerHistory.insert(ledger, true);
802
803 {
804 // Check the SQL database's entry for the sequence before this
805 // ledger, if it's not this ledger's parent, invalidate it
806 uint256 prevHash =
807 app_.getRelationalDatabase().getHashByIndex(ledger->info().seq - 1);
808 if (prevHash.isNonZero() && prevHash != ledger->info().parentHash)
809 clearLedger(ledger->info().seq - 1);
810 }
811
812 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
813
814 {
816 mCompleteLedgers.insert(ledger->info().seq);
817 }
818
819 {
821
822 if (ledger->info().seq > mValidLedgerSeq)
823 setValidLedger(ledger);
824 if (!mPubLedger)
825 {
826 setPubLedger(ledger);
827 app_.getOrderBookDB().setup(ledger);
828 }
829
830 if (ledger->info().seq != 0 && haveLedger(ledger->info().seq - 1))
831 {
832 // we think we have the previous ledger, double check
833 auto prevLedger = getLedgerBySeq(ledger->info().seq - 1);
834
835 if (!prevLedger ||
836 (prevLedger->info().hash != ledger->info().parentHash))
837 {
838 JLOG(m_journal.warn())
839 << "Acquired ledger invalidates previous ledger: "
840 << (prevLedger ? "hashMismatch" : "missingLedger");
841 fixMismatch(*ledger);
842 }
843 }
844 }
845}
846
847void
853
854// Check if the specified ledger can become the new last fully-validated
855// ledger.
856void
858{
859 std::size_t valCount = 0;
860
861 if (seq != 0)
862 {
863 // Ledger is too old
864 if (seq < mValidLedgerSeq)
865 return;
866
867 auto validations = app_.validators().negativeUNLFilter(
869 valCount = validations.size();
870 if (valCount >= app_.validators().quorum())
871 {
873 if (seq > mLastValidLedger.second)
874 mLastValidLedger = std::make_pair(hash, seq);
875 }
876
877 if (seq == mValidLedgerSeq)
878 return;
879
880 // Ledger could match the ledger we're already building
881 if (seq == mBuildingLedgerSeq)
882 return;
883 }
884
885 auto ledger = mLedgerHistory.getLedgerByHash(hash);
886
887 if (!ledger)
888 {
889 if ((seq != 0) && (getValidLedgerIndex() == 0))
890 {
891 // Set peers converged early if we can
892 if (valCount >= app_.validators().quorum())
894 }
895
896 // FIXME: We may not want to fetch a ledger with just one
897 // trusted validation
898 ledger = app_.getInboundLedgers().acquire(
900 }
901
902 if (ledger)
903 checkAccept(ledger);
904}
905
916
917void
919{
920 // Can we accept this ledger as our new last fully-validated ledger
921
922 if (!canBeCurrent(ledger))
923 return;
924
925 // Can we advance the last fully-validated ledger? If so, can we
926 // publish?
928
929 if (ledger->info().seq <= mValidLedgerSeq)
930 return;
931
932 auto const minVal = getNeededValidations();
933 auto validations = app_.validators().negativeUNLFilter(
935 ledger->info().hash, ledger->info().seq));
936 auto const tvc = validations.size();
937 if (tvc < minVal) // nothing we can do
938 {
939 JLOG(m_journal.trace())
940 << "Only " << tvc << " validations for " << ledger->info().hash;
941 return;
942 }
943
944 JLOG(m_journal.info()) << "Advancing accepted ledger to "
945 << ledger->info().seq << " with >= " << minVal
946 << " validations";
947
948 ledger->setValidated();
949 ledger->setFull();
950 setValidLedger(ledger);
951 if (!mPubLedger)
952 {
953 pendSaveValidated(app_, ledger, true, true);
954 setPubLedger(ledger);
955 app_.getOrderBookDB().setup(ledger);
956 }
957
958 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
959 auto fees = app_.getValidations().fees(ledger->info().hash, base);
960 {
961 auto fees2 =
962 app_.getValidations().fees(ledger->info().parentHash, base);
963 fees.reserve(fees.size() + fees2.size());
964 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
965 }
966 std::uint32_t fee;
967 if (!fees.empty())
968 {
969 std::sort(fees.begin(), fees.end());
970 if (auto stream = m_journal.debug())
971 {
973 s << "Received fees from validations: (" << fees.size() << ") ";
974 for (auto const fee1 : fees)
975 {
976 s << " " << fee1;
977 }
978 stream << s.str();
979 }
980 fee = fees[fees.size() / 2]; // median
981 }
982 else
983 {
984 fee = base;
985 }
986
988
989 tryAdvance();
990
991 if (ledger->seq() % 256 == 0)
992 {
993 // Check if the majority of validators run a higher version rippled
994 // software. If so print a warning.
995 //
996 // Validators include their rippled software version in the validation
997 // messages of every (flag - 1) ledger. We wait for one ledger time
998 // before checking the version information to accumulate more validation
999 // messages.
1000
1001 auto currentTime = app_.timeKeeper().now();
1002 bool needPrint = false;
1003
1004 // The variable upgradeWarningPrevTime_ will be set when and only when
1005 // the warning is printed.
1007 {
1008 // Have not printed the warning before, check if need to print.
1009 auto const vals = app_.getValidations().getTrustedForLedger(
1010 ledger->info().parentHash, ledger->info().seq - 1);
1011 std::size_t higherVersionCount = 0;
1012 std::size_t rippledCount = 0;
1013 for (auto const& v : vals)
1014 {
1015 if (v->isFieldPresent(sfServerVersion))
1016 {
1017 auto version = v->getFieldU64(sfServerVersion);
1018 higherVersionCount +=
1019 BuildInfo::isNewerVersion(version) ? 1 : 0;
1020 rippledCount +=
1021 BuildInfo::isRippledVersion(version) ? 1 : 0;
1022 }
1023 }
1024 // We report only if (1) we have accumulated validation messages
1025 // from 90% validators from the UNL, (2) 60% of validators
1026 // running the rippled implementation have higher version numbers,
1027 // and (3) the calculation won't cause divide-by-zero.
1028 if (higherVersionCount > 0 && rippledCount > 0)
1029 {
1030 constexpr std::size_t reportingPercent = 90;
1031 constexpr std::size_t cutoffPercent = 60;
1032 auto const unlSize{
1033 app_.validators().getQuorumKeys().second.size()};
1034 needPrint = unlSize > 0 &&
1035 calculatePercent(vals.size(), unlSize) >=
1036 reportingPercent &&
1037 calculatePercent(higherVersionCount, rippledCount) >=
1038 cutoffPercent;
1039 }
1040 }
1041 // To throttle the warning messages, instead of printing a warning
1042 // every flag ledger, we print every week.
1043 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
1044 {
1045 // Printed the warning before, and assuming most validators
1046 // do not downgrade, we keep printing the warning
1047 // until the local server is restarted.
1048 needPrint = true;
1049 }
1050
1051 if (needPrint)
1052 {
1053 upgradeWarningPrevTime_ = currentTime;
1054 auto const upgradeMsg =
1055 "Check for upgrade: "
1056 "A majority of trusted validators are "
1057 "running a newer version.";
1058 std::cerr << upgradeMsg << std::endl;
1059 JLOG(m_journal.error()) << upgradeMsg;
1060 }
1061 }
1062}
1063
1065void
1067 std::shared_ptr<Ledger const> const& ledger,
1068 uint256 const& consensusHash,
1069 Json::Value consensus)
1070{
1071 // Because we just built a ledger, we are no longer building one
1073
1074 // No need to process validations in standalone mode
1075 if (standalone_)
1076 return;
1077
1078 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1079
1080 if (ledger->info().seq <= mValidLedgerSeq)
1081 {
1082 auto stream = app_.journal("LedgerConsensus").info();
1083 JLOG(stream) << "Consensus built old ledger: " << ledger->info().seq
1084 << " <= " << mValidLedgerSeq;
1085 return;
1086 }
1087
1088 // See if this ledger can be the new fully-validated ledger
1089 checkAccept(ledger);
1090
1091 if (ledger->info().seq <= mValidLedgerSeq)
1092 {
1093 auto stream = app_.journal("LedgerConsensus").debug();
1094 JLOG(stream) << "Consensus ledger fully validated";
1095 return;
1096 }
1097
1098 // This ledger cannot be the new fully-validated ledger, but
1099 // maybe we saved up validations for some other ledger that can be
1100
1101 auto validations = app_.validators().negativeUNLFilter(
1103
1104 // Track validation counts with sequence numbers
1105 class valSeq
1106 {
1107 public:
1108 valSeq() : valCount_(0), ledgerSeq_(0)
1109 {
1110 ;
1111 }
1112
1113 void
1114 mergeValidation(LedgerIndex seq)
1115 {
1116 valCount_++;
1117
1118 // If we didn't already know the sequence, now we do
1119 if (ledgerSeq_ == 0)
1120 ledgerSeq_ = seq;
1121 }
1122
1123 std::size_t valCount_;
1124 LedgerIndex ledgerSeq_;
1125 };
1126
1127 // Count the number of current, trusted validations
1129 for (auto const& v : validations)
1130 {
1131 valSeq& vs = count[v->getLedgerHash()];
1132 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1133 }
1134
1135 auto const neededValidations = getNeededValidations();
1136 auto maxSeq = mValidLedgerSeq.load();
1137 auto maxLedger = ledger->info().hash;
1138
1139 // Of the ledgers with sufficient validations,
1140 // find the one with the highest sequence
1141 for (auto& v : count)
1142 if (v.second.valCount_ > neededValidations)
1143 {
1144 // If we still don't know the sequence, get it
1145 if (v.second.ledgerSeq_ == 0)
1146 {
1147 if (auto l = getLedgerByHash(v.first))
1148 v.second.ledgerSeq_ = l->info().seq;
1149 }
1150
1151 if (v.second.ledgerSeq_ > maxSeq)
1152 {
1153 maxSeq = v.second.ledgerSeq_;
1154 maxLedger = v.first;
1155 }
1156 }
1157
1158 if (maxSeq > mValidLedgerSeq)
1159 {
1160 auto stream = app_.journal("LedgerConsensus").debug();
1161 JLOG(stream) << "Consensus triggered check of ledger";
1162 checkAccept(maxLedger, maxSeq);
1163 }
1164}
1165
1168 LedgerIndex index,
1169 InboundLedger::Reason reason)
1170{
1171 // Try to get the hash of a ledger we need to fetch for history
1173 auto const& l{mHistLedger};
1174
1175 if (l && l->info().seq >= index)
1176 {
1177 ret = hashOfSeq(*l, index, m_journal);
1178 if (!ret)
1179 ret = walkHashBySeq(index, l, reason);
1180 }
1181
1182 if (!ret)
1183 ret = walkHashBySeq(index, reason);
1184
1185 return ret;
1186}
1187
1191{
1193
1194 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1195
1196 // No valid ledger, nothing to do
1197 if (mValidLedger.empty())
1198 {
1199 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1200 return {};
1201 }
1202
1203 if (!mPubLedger)
1204 {
1205 JLOG(m_journal.info())
1206 << "First published ledger will be " << mValidLedgerSeq;
1207 return {mValidLedger.get()};
1208 }
1209
1211 {
1212 JLOG(m_journal.warn()) << "Gap in validated ledger stream "
1213 << mPubLedgerSeq << " - " << mValidLedgerSeq - 1;
1214
1215 auto valLedger = mValidLedger.get();
1216 ret.push_back(valLedger);
1217 setPubLedger(valLedger);
1218 app_.getOrderBookDB().setup(valLedger);
1219
1220 return {valLedger};
1221 }
1222
1224 {
1225 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1226 return {};
1227 }
1228
1229 int acqCount = 0;
1230
1231 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1232 auto valLedger = mValidLedger.get();
1233 std::uint32_t valSeq = valLedger->info().seq;
1234
1235 scope_unlock sul{sl};
1236 try
1237 {
1238 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1239 {
1240 JLOG(m_journal.trace())
1241 << "Trying to fetch/publish valid ledger " << seq;
1242
1244 // This can throw
1245 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1246 // VFALCO TODO Restructure this code so that zero is not
1247 // used.
1248 if (!hash)
1249 hash = beast::zero; // kludge
1250 if (seq == valSeq)
1251 {
1252 // We need to publish the ledger we just fully validated
1253 ledger = valLedger;
1254 }
1255 else if (hash->isZero())
1256 {
1257 // LCOV_EXCL_START
1258 JLOG(m_journal.fatal()) << "Ledger: " << valSeq
1259 << " does not have hash for " << seq;
1260 UNREACHABLE(
1261 "ripple::LedgerMaster::findNewLedgersToPublish : ledger "
1262 "not found");
1263 // LCOV_EXCL_STOP
1264 }
1265 else
1266 {
1267 ledger = mLedgerHistory.getLedgerByHash(*hash);
1268 }
1269
1270 if (!app_.config().LEDGER_REPLAY)
1271 {
1272 // Can we try to acquire the ledger we need?
1273 if (!ledger && (++acqCount < ledger_fetch_size_))
1274 ledger = app_.getInboundLedgers().acquire(
1275 *hash, seq, InboundLedger::Reason::GENERIC);
1276 }
1277
1278 // Did we acquire the next ledger we need to publish?
1279 if (ledger && (ledger->info().seq == pubSeq))
1280 {
1281 ledger->setValidated();
1282 ret.push_back(ledger);
1283 ++pubSeq;
1284 }
1285 }
1286
1287 JLOG(m_journal.trace())
1288 << "ready to publish " << ret.size() << " ledgers.";
1289 }
1290 catch (std::exception const& ex)
1291 {
1292 JLOG(m_journal.error())
1293 << "Exception while trying to find ledgers to publish: "
1294 << ex.what();
1295 }
1296
1298 {
1299 /* Narrow down the gap of ledgers, and try to replay them.
1300 * When replaying a ledger gap, if the local node has
1301 * the start ledger, it saves an expensive InboundLedger
1302 * acquire. If the local node has the finish ledger, it
1303 * saves a skip list acquire.
1304 */
1305 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1306 auto finishLedger = valLedger;
1307 while (startLedger->seq() + 1 < finishLedger->seq())
1308 {
1309 if (auto const parent = mLedgerHistory.getLedgerByHash(
1310 finishLedger->info().parentHash);
1311 parent)
1312 {
1313 finishLedger = parent;
1314 }
1315 else
1316 {
1317 auto numberLedgers =
1318 finishLedger->seq() - startLedger->seq() + 1;
1319 JLOG(m_journal.debug())
1320 << "Publish LedgerReplays " << numberLedgers
1321 << " ledgers, from seq=" << startLedger->info().seq << ", "
1322 << startLedger->info().hash
1323 << " to seq=" << finishLedger->info().seq << ", "
1324 << finishLedger->info().hash;
1327 finishLedger->info().hash,
1328 numberLedgers);
1329 break;
1330 }
1331 }
1332 }
1333
1334 return ret;
1335}
1336
1337void
1339{
1341
1342 // Can't advance without at least one fully-valid ledger
1343 mAdvanceWork = true;
1345 {
1346 mAdvanceThread = true;
1347 app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
1349
1350 XRPL_ASSERT(
1352 "ripple::LedgerMaster::tryAdvance : has valid ledger");
1353
1354 JLOG(m_journal.trace()) << "advanceThread<";
1355
1356 try
1357 {
1358 doAdvance(sl);
1359 }
1360 catch (std::exception const& ex)
1361 {
1362 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1363 }
1364
1365 mAdvanceThread = false;
1366 JLOG(m_journal.trace()) << "advanceThread>";
1367 });
1368 }
1369}
1370
1371void
1373{
1374 {
1377 {
1379 mPathLedger.reset();
1380 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1381 return;
1382 }
1383 }
1384
1385 while (!app_.getJobQueue().isStopping())
1386 {
1387 JLOG(m_journal.debug()) << "updatePaths running";
1389 {
1391
1392 if (!mValidLedger.empty() &&
1393 (!mPathLedger || (mPathLedger->info().seq != mValidLedgerSeq)))
1394 { // We have a new valid ledger since the last full pathfinding
1396 lastLedger = mPathLedger;
1397 }
1398 else if (mPathFindNewRequest)
1399 { // We have a new request but no new ledger
1400 lastLedger = app_.openLedger().current();
1401 }
1402 else
1403 { // Nothing to do
1405 mPathLedger.reset();
1406 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1407 return;
1408 }
1409 }
1410
1411 if (!standalone_)
1412 { // don't pathfind with a ledger that's more than 60 seconds old
1413 using namespace std::chrono;
1414 auto age = time_point_cast<seconds>(app_.timeKeeper().closeTime()) -
1415 lastLedger->info().closeTime;
1416 if (age > 1min)
1417 {
1418 JLOG(m_journal.debug())
1419 << "Published ledger too old for updating paths";
1422 mPathLedger.reset();
1423 return;
1424 }
1425 }
1426
1427 try
1428 {
1429 auto& pathRequests = app_.getPathRequests();
1430 {
1432 if (!pathRequests.requestsPending())
1433 {
1435 mPathLedger.reset();
1436 JLOG(m_journal.debug())
1437 << "No path requests found. Nothing to do for updating "
1438 "paths. "
1439 << mPathFindThread << " jobs remaining";
1440 return;
1441 }
1442 }
1443 JLOG(m_journal.debug()) << "Updating paths";
1444 pathRequests.updateAll(lastLedger);
1445
1447 if (!pathRequests.requestsPending())
1448 {
1449 JLOG(m_journal.debug())
1450 << "No path requests left. No need for further updating "
1451 "paths";
1453 mPathLedger.reset();
1454 return;
1455 }
1456 }
1457 catch (SHAMapMissingNode const& mn)
1458 {
1459 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1460 if (lastLedger->open())
1461 {
1462 // our parent is the problem
1464 lastLedger->info().parentHash,
1465 lastLedger->info().seq - 1,
1467 }
1468 else
1469 {
1470 // this ledger is the problem
1472 lastLedger->info().hash,
1473 lastLedger->info().seq,
1475 }
1476 }
1477 }
1478}
1479
1480bool
1482{
1484 mPathFindNewRequest = newPFWork("pf:newRequest", ml);
1485 return mPathFindNewRequest;
1486}
1487
1488bool
1490{
1492 bool const ret = mPathFindNewRequest;
1493 mPathFindNewRequest = false;
1494 return ret;
1495}
1496
1497// If the order book is radically updated, we need to reprocess all
1498// pathfinding requests.
1499bool
1501{
1503 mPathLedger.reset();
1504
1505 return newPFWork("pf:newOBDB", ml);
1506}
1507
1510bool
1512 char const* name,
1514{
1515 if (!app_.isStopping() && mPathFindThread < 2 &&
1517 {
1518 JLOG(m_journal.debug())
1519 << "newPFWork: Creating job. path find threads: "
1520 << mPathFindThread;
1521 if (app_.getJobQueue().addJob(
1522 jtUPDATE_PF, name, [this]() { updatePaths(); }))
1523 {
1525 }
1526 }
1527 // If we're stopping don't give callers the expectation that their
1528 // request will be fulfilled, even if it may be serviced.
1529 return mPathFindThread > 0 && !app_.isStopping();
1530}
1531
1534{
1535 return m_mutex;
1536}
1537
1538// The current ledger is the ledger we believe new transactions should go in
1544
1550
1551Rules
1553{
1554 // Once we have a guarantee that there's always a last validated
1555 // ledger then we can dispense with the if.
1556
1557 // Return the Rules from the last validated ledger.
1558 if (auto const ledger = getValidatedLedger())
1559 return ledger->rules();
1560
1561 return Rules(app_.config().features);
1562}
1563
1564// This is the last ledger we published to clients and can lag the validated
1565// ledger.
1572
1579
1582{
1583 uint256 hash = getHashBySeq(ledgerIndex);
1584 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex)
1585 : std::nullopt;
1586}
1587
1590 LedgerHash const& ledgerHash,
1591 std::uint32_t index)
1592{
1593 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1594 if (nodeObject && (nodeObject->getData().size() >= 120))
1595 {
1596 SerialIter it(
1597 nodeObject->getData().data(), nodeObject->getData().size());
1598 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1599 {
1600 it.skip(
1601 4 + 8 + 32 + // seq drops parentHash
1602 32 + 32 + 4); // txHash acctHash parentClose
1604 }
1605 }
1606
1607 return std::nullopt;
1608}
1609
1610uint256
1612{
1614
1615 if (hash.isNonZero())
1616 return hash;
1617
1619}
1620
1623{
1624 std::optional<LedgerHash> ledgerHash;
1625
1626 if (auto referenceLedger = mValidLedger.get())
1627 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1628
1629 return ledgerHash;
1630}
1631
1634 std::uint32_t index,
1635 std::shared_ptr<ReadView const> const& referenceLedger,
1636 InboundLedger::Reason reason)
1637{
1638 if (!referenceLedger || (referenceLedger->info().seq < index))
1639 {
1640 // Nothing we can do. No validated ledger.
1641 return std::nullopt;
1642 }
1643
1644 // See if the hash for the ledger we need is in the reference ledger
1645 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1646 if (ledgerHash)
1647 return ledgerHash;
1648
1649 // The hash is not in the reference ledger. Get another ledger which can
1650 // be located easily and should contain the hash.
1651 LedgerIndex refIndex = getCandidateLedger(index);
1652 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1653 XRPL_ASSERT(refHash, "ripple::LedgerMaster::walkHashBySeq : found ledger");
1654 if (refHash)
1655 {
1656 // Try the hash and sequence of a better reference ledger just found
1657 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1658
1659 if (ledger)
1660 {
1661 try
1662 {
1663 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1664 }
1665 catch (SHAMapMissingNode const&)
1666 {
1667 ledger.reset();
1668 }
1669 }
1670
1671 // Try to acquire the complete ledger
1672 if (!ledger)
1673 {
1674 if (auto const l = app_.getInboundLedgers().acquire(
1675 *refHash, refIndex, reason))
1676 {
1677 ledgerHash = hashOfSeq(*l, index, m_journal);
1678 XRPL_ASSERT(
1679 ledgerHash,
1680 "ripple::LedgerMaster::walkHashBySeq : has complete "
1681 "ledger");
1682 }
1683 }
1684 }
1685 return ledgerHash;
1686}
1687
1690{
1691 if (index <= mValidLedgerSeq)
1692 {
1693 // Always prefer a validated ledger
1694 if (auto valid = mValidLedger.get())
1695 {
1696 if (valid->info().seq == index)
1697 return valid;
1698
1699 try
1700 {
1701 auto const hash = hashOfSeq(*valid, index, m_journal);
1702
1703 if (hash)
1705 }
1706 catch (std::exception const&)
1707 {
1708 // Missing nodes are already handled
1709 }
1710 }
1711 }
1712
1713 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1714 return ret;
1715
1716 auto ret = mClosedLedger.get();
1717 if (ret && (ret->info().seq == index))
1718 return ret;
1719
1720 clearLedger(index);
1721 return {};
1722}
1723
1726{
1727 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1728 return ret;
1729
1730 auto ret = mClosedLedger.get();
1731 if (ret && (ret->info().hash == hash))
1732 return ret;
1733
1734 return {};
1735}
1736
1737void
1743
1744void
1746{
1748 fetch_packs_.sweep();
1749}
1750
1751float
1756
1757void
1759{
1761 if (seq > 0)
1762 mCompleteLedgers.erase(range(0u, seq - 1));
1763}
1764
1765void
1770
1771void
1773{
1774 replayData = std::move(replay);
1775}
1776
1779{
1780 return std::move(replayData);
1781}
1782
1783void
1785 std::uint32_t missing,
1786 bool& progress,
1787 InboundLedger::Reason reason,
1789{
1790 scope_unlock sul{sl};
1791 if (auto hash = getLedgerHashForHistory(missing, reason))
1792 {
1793 XRPL_ASSERT(
1794 hash->isNonZero(),
1795 "ripple::LedgerMaster::fetchForHistory : found ledger");
1796 auto ledger = getLedgerByHash(*hash);
1797 if (!ledger)
1798 {
1800 {
1801 ledger =
1802 app_.getInboundLedgers().acquire(*hash, missing, reason);
1803 if (!ledger && missing != fetch_seq_ &&
1804 missing > app_.getNodeStore().earliestLedgerSeq())
1805 {
1806 JLOG(m_journal.trace())
1807 << "fetchForHistory want fetch pack " << missing;
1808 fetch_seq_ = missing;
1809 getFetchPack(missing, reason);
1810 }
1811 else
1812 JLOG(m_journal.trace())
1813 << "fetchForHistory no fetch pack for " << missing;
1814 }
1815 else
1816 JLOG(m_journal.debug())
1817 << "fetchForHistory found failed acquire";
1818 }
1819 if (ledger)
1820 {
1821 auto seq = ledger->info().seq;
1822 XRPL_ASSERT(
1823 seq == missing,
1824 "ripple::LedgerMaster::fetchForHistory : sequence match");
1825 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1826 setFullLedger(ledger, false, false);
1827 int fillInProgress;
1828 {
1830 mHistLedger = ledger;
1831 fillInProgress = mFillInProgress;
1832 }
1833 if (fillInProgress == 0 &&
1835 ledger->info().parentHash)
1836 {
1837 {
1838 // Previous ledger is in DB
1840 mFillInProgress = seq;
1841 }
1843 jtADVANCE, "tryFill", [this, ledger]() {
1844 tryFill(ledger);
1845 });
1846 }
1847 progress = true;
1848 }
1849 else
1850 {
1851 std::uint32_t fetchSz;
1852 // Do not fetch ledger sequences lower
1853 // than the earliest ledger sequence
1854 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1855 fetchSz = missing >= fetchSz
1856 ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1)
1857 : 0;
1858 try
1859 {
1860 for (std::uint32_t i = 0; i < fetchSz; ++i)
1861 {
1862 std::uint32_t seq = missing - i;
1863 if (auto h = getLedgerHashForHistory(seq, reason))
1864 {
1865 XRPL_ASSERT(
1866 h->isNonZero(),
1867 "ripple::LedgerMaster::fetchForHistory : "
1868 "prefetched ledger");
1869 app_.getInboundLedgers().acquire(*h, seq, reason);
1870 }
1871 }
1872 }
1873 catch (std::exception const& ex)
1874 {
1875 JLOG(m_journal.warn())
1876 << "Threw while prefetching: " << ex.what();
1877 }
1878 }
1879 }
1880 else
1881 {
1882 JLOG(m_journal.fatal())
1883 << "Can't find ledger following prevMissing " << missing;
1884 JLOG(m_journal.fatal())
1885 << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1886 JLOG(m_journal.fatal())
1887 << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1888 JLOG(m_journal.fatal())
1889 << "Acquire reason: "
1890 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY"
1891 : "NOT HISTORY");
1892 clearLedger(missing + 1);
1893 progress = true;
1894 }
1895}
1896
1897// Try to publish ledgers, acquire missing ledgers
1898void
1900{
1901 do
1902 {
1903 mAdvanceWork = false; // If there's work to do, we'll make progress
1904 bool progress = false;
1905
1906 auto const pubLedgers = findNewLedgersToPublish(sl);
1907 if (pubLedgers.empty())
1908 {
1914 {
1915 // We are in sync, so can acquire
1918 {
1920 missing = prevMissing(
1922 mPubLedger->info().seq,
1924 }
1925 if (missing)
1926 {
1927 JLOG(m_journal.trace())
1928 << "tryAdvance discovered missing " << *missing;
1929 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1934 *missing,
1935 m_journal))
1936 {
1937 JLOG(m_journal.trace())
1938 << "advanceThread should acquire";
1939 }
1940 else
1941 missing = std::nullopt;
1942 }
1943 if (missing)
1944 {
1945 fetchForHistory(*missing, progress, reason, sl);
1947 {
1948 JLOG(m_journal.debug())
1949 << "tryAdvance found last valid changed";
1950 progress = true;
1951 }
1952 }
1953 }
1954 else
1955 {
1956 mHistLedger.reset();
1957 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1958 }
1959 }
1960 else
1961 {
1962 JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size()
1963 << " ledgers to publish";
1964 for (auto const& ledger : pubLedgers)
1965 {
1966 {
1967 scope_unlock sul{sl};
1968 JLOG(m_journal.debug())
1969 << "tryAdvance publishing seq " << ledger->info().seq;
1970 setFullLedger(ledger, true, true);
1971 }
1972
1973 setPubLedger(ledger);
1974
1975 {
1976 scope_unlock sul{sl};
1977 app_.getOPs().pubLedger(ledger);
1978 }
1979 }
1980
1982 progress = newPFWork("pf:newLedger", sl);
1983 }
1984 if (progress)
1985 mAdvanceWork = true;
1986 } while (mAdvanceWork);
1987}
1988
1989void
1991{
1992 fetch_packs_.canonicalize_replace_client(hash, data);
1993}
1994
1997{
1998 Blob data;
1999 if (fetch_packs_.retrieve(hash, data))
2000 {
2001 fetch_packs_.del(hash, false);
2002 if (hash == sha512Half(makeSlice(data)))
2003 return data;
2004 }
2005 return std::nullopt;
2006}
2007
2008void
2019
2045static void
2047 SHAMap const& want,
2048 SHAMap const* have,
2049 std::uint32_t cnt,
2050 protocol::TMGetObjectByHash* into,
2051 std::uint32_t seq,
2052 bool withLeaves = true)
2053{
2054 XRPL_ASSERT(cnt, "ripple::populateFetchPack : nonzero count input");
2055
2056 Serializer s(1024);
2057
2058 want.visitDifferences(
2059 have,
2060 [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
2061 if (!withLeaves && n.isLeaf())
2062 return true;
2063
2064 s.erase();
2066
2067 auto const& hash = n.getHash().as_uint256();
2068
2069 protocol::TMIndexedObject* obj = into->add_objects();
2070 obj->set_ledgerseq(seq);
2071 obj->set_hash(hash.data(), hash.size());
2072 obj->set_data(s.getDataPtr(), s.getLength());
2073
2074 return --cnt != 0;
2075 });
2076}
2077
2078void
2080 std::weak_ptr<Peer> const& wPeer,
2082 uint256 haveLedgerHash,
2084{
2085 using namespace std::chrono_literals;
2086 if (UptimeClock::now() > uptime + 1s)
2087 {
2088 JLOG(m_journal.info()) << "Fetch pack request got stale";
2089 return;
2090 }
2091
2093 {
2094 JLOG(m_journal.info()) << "Too busy to make fetch pack";
2095 return;
2096 }
2097
2098 auto peer = wPeer.lock();
2099
2100 if (!peer)
2101 return;
2102
2103 auto have = getLedgerByHash(haveLedgerHash);
2104
2105 if (!have)
2106 {
2107 JLOG(m_journal.info())
2108 << "Peer requests fetch pack for ledger we don't have: " << have;
2109 peer->charge(Resource::feeRequestNoReply, "get_object ledger");
2110 return;
2111 }
2112
2113 if (have->open())
2114 {
2115 JLOG(m_journal.warn())
2116 << "Peer requests fetch pack from open ledger: " << have;
2117 peer->charge(Resource::feeMalformedRequest, "get_object ledger open");
2118 return;
2119 }
2120
2121 if (have->info().seq < getEarliestFetch())
2122 {
2123 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
2124 peer->charge(Resource::feeMalformedRequest, "get_object ledger early");
2125 return;
2126 }
2127
2128 auto want = getLedgerByHash(have->info().parentHash);
2129
2130 if (!want)
2131 {
2132 JLOG(m_journal.info())
2133 << "Peer requests fetch pack for ledger whose predecessor we "
2134 << "don't have: " << have;
2135 peer->charge(
2136 Resource::feeRequestNoReply, "get_object ledger no parent");
2137 return;
2138 }
2139
2140 try
2141 {
2142 Serializer hdr(128);
2143
2144 protocol::TMGetObjectByHash reply;
2145 reply.set_query(false);
2146
2147 if (request->has_seq())
2148 reply.set_seq(request->seq());
2149
2150 reply.set_ledgerhash(request->ledgerhash());
2151 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2152
2153 // Building a fetch pack:
2154 // 1. Add the header for the requested ledger.
2155 // 2. Add the nodes for the AccountStateMap of that ledger.
2156 // 3. If there are transactions, add the nodes for the
2157 // transactions of the ledger.
2158 // 4. If the FetchPack now contains at least 512 entries then stop.
2159 // 5. If not very much time has elapsed, then loop back and repeat
2160 // the same process adding the previous ledger to the FetchPack.
2161 do
2162 {
2163 std::uint32_t lSeq = want->info().seq;
2164
2165 {
2166 // Serialize the ledger header:
2167 hdr.erase();
2168
2170 addRaw(want->info(), hdr);
2171
2172 // Add the data
2173 protocol::TMIndexedObject* obj = reply.add_objects();
2174 obj->set_hash(
2175 want->info().hash.data(), want->info().hash.size());
2176 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2177 obj->set_ledgerseq(lSeq);
2178 }
2179
2181 want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2182
2183 // We use nullptr here because transaction maps are per ledger
2184 // and so the requestor is unlikely to already have it.
2185 if (want->info().txHash.isNonZero())
2186 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2187
2188 if (reply.objects().size() >= 512)
2189 break;
2190
2191 have = std::move(want);
2192 want = getLedgerByHash(have->info().parentHash);
2193 } while (want && UptimeClock::now() <= uptime + 1s);
2194
2195 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2196
2197 JLOG(m_journal.info())
2198 << "Built fetch pack with " << reply.objects().size() << " nodes ("
2199 << msg->getBufferSize() << " bytes)";
2200
2201 peer->send(msg);
2202 }
2203 catch (std::exception const& ex)
2204 {
2205 JLOG(m_journal.warn())
2206 << "Exception building fetch pach. Exception: " << ex.what();
2207 }
2208}
2209
2212{
2213 return fetch_packs_.getCacheSize();
2214}
2215
2216// Returns the minimum ledger sequence in SQL database, if any.
2222
2225{
2226 uint32_t first = 0, last = 0;
2227
2228 if (!getValidatedRange(first, last) || last < ledgerSeq)
2229 return {};
2230
2231 auto const lgr = getLedgerBySeq(ledgerSeq);
2232 if (!lgr || lgr->txs.empty())
2233 return {};
2234
2235 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2236 if (it->first && it->second &&
2237 it->second->isFieldPresent(sfTransactionIndex) &&
2238 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2239 return it->first->getTransactionID();
2240
2241 return {};
2242}
2243
2244} // namespace ripple
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition json_value.h:131
Provide a light-weight way to check active() before string formatting.
Definition Journal.h:186
A generic endpoint for log messages.
Definition Journal.h:41
Stream fatal() const
Definition Journal.h:333
Stream error() const
Definition Journal.h:327
Stream debug() const
Definition Journal.h:309
Stream info() const
Definition Journal.h:315
Stream trace() const
Severity stream access functions.
Definition Journal.h:303
Stream warn() const
Definition Journal.h:321
typename Clock::time_point time_point
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual SHAMapStore & getSHAMapStore()=0
virtual bool isStopping() const =0
virtual NodeStore::Database & getNodeStore()=0
virtual RCLValidations & getValidations()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual LedgerReplayer & getLedgerReplayer()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual PathRequests & getPathRequests()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
virtual AmendmentTable & getAmendmentTable()=0
virtual PendingSaves & pendingSaves()=0
Holds transactions which were deferred to the next pass of consensus.
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
bool LEDGER_REPLAY
Definition Config.h:204
std::unordered_set< uint256, beast::uhash<> > features
Definition Config.h:257
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
virtual bool isFailure(uint256 const &h)=0
bool isStopping() const
Definition JobQueue.h:213
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition JobQueue.cpp:123
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:149
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
void sweep()
Remove stale cache entries.
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
std::shared_ptr< Ledger const > get()
void set(std::shared_ptr< Ledger const > ledger)
bool haveLedger(std::uint32_t seq)
std::shared_ptr< Ledger const > getValidatedLedger()
void clearLedgerCachePrior(LedgerIndex seq)
RangeSet< std::uint32_t > mCompleteLedgers
void setBuildingLedger(LedgerIndex index)
std::unique_ptr< LedgerReplay > releaseReplay()
void failedSave(std::uint32_t seq, uint256 const &hash)
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::uint32_t const ledger_history_
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
std::unique_ptr< LedgerReplay > replayData
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
TimeKeeper::time_point upgradeWarningPrevTime_
LedgerHistory mLedgerHistory
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
void fixMismatch(ReadView const &ledger)
std::atomic< LedgerIndex > mPubLedgerSeq
void clearPriorLedgers(LedgerIndex seq)
std::shared_ptr< Ledger const > mPubLedger
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
std::atomic< LedgerIndex > mBuildingLedgerSeq
std::shared_ptr< ReadView const > getCurrentLedger()
void tryFill(std::shared_ptr< Ledger const > ledger)
std::uint32_t const fetch_depth_
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
bool isValidated(ReadView const &ledger)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
LedgerIndex const max_ledger_difference_
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
TaggedCache< uint256, Blob > fetch_packs_
bool newPFWork(char const *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
bool isCaughtUp(std::string &reason)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
beast::Journal m_journal
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void clearLedger(std::uint32_t seq)
std::pair< uint256, LedgerIndex > mLastValidLedger
std::shared_ptr< Ledger const > getClosedLedger()
std::optional< LedgerIndex > minSqlSeq()
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
CanonicalTXSet mHeldTransactions
std::uint32_t const ledger_fetch_size_
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
std::chrono::seconds getPublishedLedgerAge()
std::shared_ptr< Ledger const > mHistLedger
std::recursive_mutex mCompleteLock
std::string getCompleteLedgers()
std::atomic< LedgerIndex > mValidLedgerSeq
std::size_t getFetchPackCacheSize() const
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
void gotFetchPack(bool progress, std::uint32_t seq)
std::recursive_mutex & peekMutex()
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
std::shared_ptr< Ledger const > mPathLedger
void setValidLedger(std::shared_ptr< Ledger const > const &l)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
std::atomic< std::uint32_t > mPubLedgerClose
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
LedgerHolder mValidLedger
std::shared_ptr< ReadView const > getPublishedLedger()
std::atomic_flag mGotFetchPackThread
void doAdvance(std::unique_lock< std::recursive_mutex > &)
LedgerHolder mClosedLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
LedgerIndex getCurrentLedgerIndex()
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t fetch_seq_
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
bool isLoadedLocal() const
std::uint32_t getLoadBase() const
virtual bool isBlocked()=0
virtual void setAmendmentWarned()=0
virtual void setAmendmentBlocked()=0
virtual void clearNeedNetworkLedger()=0
virtual bool isAmendmentWarned()=0
virtual bool isNeedNetworkLedger()=0
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual void processTransactionSet(CanonicalTXSet const &set)=0
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
Persistency layer for NodeObject.
Definition Database.h:32
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition Database.cpp:221
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::uint32_t earliestLedgerSeq() const noexcept
Definition Database.h:202
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
void setup(std::shared_ptr< ReadView const > const &ledger)
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
bool requestsPending() const
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
A view into a ledger.
Definition ReadView.h:32
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual LedgerInfo const & info() const =0
Returns information about the ledger.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
Rules controlling protocol behavior.
Definition Rules.h:19
uint256 const & as_uint256() const
Definition SHAMapHash.h:25
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition SHAMap.h:78
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
void skip(int num)
std::uint32_t get32()
int getLength() const
Definition Serializer.h:214
void const * getDataPtr() const
Definition Serializer.h:204
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:45
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:57
static time_point now()
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
std::size_t quorum() const
Get quorum value for current trusted key set.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
bool isNonZero() const
Definition base_uint.h:526
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:212
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T is_same_v
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
SizedItem
Definition Config.h:25
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, char const *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition View.cpp:780
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition View.h:410
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition RangeSet.h:164
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition View.cpp:942
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
static constexpr int MAX_LEDGER_GAP
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition Slice.h:225
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition chrono.h:100
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:611
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition RangeSet.h:35
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
@ ledgerMaster
ledger master data for signing
static constexpr int MAX_WRITE_LOAD_ACQUIRE
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
@ jtLEDGER_DATA
Definition Job.h:47
@ jtUPDATE_PF
Definition Job.h:37
@ jtPUBOLDLEDGER
Definition Job.h:25
@ jtADVANCE
Definition Job.h:48
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition digest.h:205
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
bool pendSaveValidated(Application &app, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger Returns false on error.
Definition Ledger.cpp:978
STL namespace.
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T swap(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)