rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/Ledger.h>
22#include <xrpld/app/ledger/LedgerMaster.h>
23#include <xrpld/app/ledger/LedgerReplayer.h>
24#include <xrpld/app/ledger/OpenLedger.h>
25#include <xrpld/app/ledger/OrderBookDB.h>
26#include <xrpld/app/ledger/PendingSaves.h>
27#include <xrpld/app/main/Application.h>
28#include <xrpld/app/misc/AmendmentTable.h>
29#include <xrpld/app/misc/HashRouter.h>
30#include <xrpld/app/misc/LoadFeeTrack.h>
31#include <xrpld/app/misc/NetworkOPs.h>
32#include <xrpld/app/misc/SHAMapStore.h>
33#include <xrpld/app/misc/Transaction.h>
34#include <xrpld/app/misc/TxQ.h>
35#include <xrpld/app/misc/ValidatorList.h>
36#include <xrpld/app/paths/PathRequests.h>
37#include <xrpld/app/rdb/RelationalDatabase.h>
38#include <xrpld/app/tx/apply.h>
39#include <xrpld/core/DatabaseCon.h>
40#include <xrpld/core/TimeKeeper.h>
41#include <xrpld/overlay/Overlay.h>
42#include <xrpld/overlay/Peer.h>
43#include <xrpl/basics/Log.h>
44#include <xrpl/basics/MathUtilities.h>
45#include <xrpl/basics/TaggedCache.h>
46#include <xrpl/basics/UptimeClock.h>
47#include <xrpl/basics/contract.h>
48#include <xrpl/basics/safe_cast.h>
49#include <xrpl/basics/scope.h>
50#include <xrpl/beast/utility/instrumentation.h>
51#include <xrpl/protocol/BuildInfo.h>
52#include <xrpl/protocol/HashPrefix.h>
53#include <xrpl/protocol/digest.h>
54#include <xrpl/resource/Fees.h>
55
56#include <algorithm>
57#include <chrono>
58#include <cstdlib>
59#include <limits>
60#include <memory>
61#include <vector>
62
63namespace ripple {
64
65// Don't catch up more than 100 ledgers (cannot exceed 256)
66static constexpr int MAX_LEDGER_GAP{100};
67
68// Don't acquire history if ledger is too old
70
71// Don't acquire history if write load is too high
72static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
73
74// Helper function for LedgerMaster::doAdvance()
75// Return true if candidateLedger should be fetched from the network.
76static bool
78 std::uint32_t const currentLedger,
79 std::uint32_t const ledgerHistory,
80 std::optional<LedgerIndex> const minimumOnline,
81 std::uint32_t const candidateLedger,
83{
84 bool const ret = [&]() {
85 // Fetch ledger if it may be the current ledger
86 if (candidateLedger >= currentLedger)
87 return true;
88
89 // Or if it is within our configured history range:
90 if (currentLedger - candidateLedger <= ledgerHistory)
91 return true;
92
93 // Or if greater than or equal to a specific minimum ledger.
94 // Do nothing if the minimum ledger to keep online is unknown.
95 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
96 }();
97
98 JLOG(j.trace()) << "Missing ledger " << candidateLedger
99 << (ret ? " should" : " should NOT") << " be acquired";
100 return ret;
101}
102
104 Application& app,
106 beast::insight::Collector::ptr const& collector,
107 beast::Journal journal)
108 : app_(app)
109 , m_journal(journal)
110 , mLedgerHistory(collector, app)
111 , standalone_(app_.config().standalone())
112 , fetch_depth_(
113 app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
114 , ledger_history_(app_.config().LEDGER_HISTORY)
115 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
116 , fetch_packs_(
117 "FetchPack",
118 65536,
119 std::chrono::seconds{45},
120 stopwatch,
121 app_.journal("TaggedCache"))
122 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
123{
124}
125
128{
129 return app_.openLedger().current()->info().seq;
130}
131
134{
135 return mValidLedgerSeq;
136}
137
138bool
140 ReadView const& view,
142 char const* reason)
143{
144 auto validLedger = getValidatedLedger();
145
146 if (validLedger && !areCompatible(*validLedger, view, s, reason))
147 {
148 return false;
149 }
150
151 {
153
154 if ((mLastValidLedger.second != 0) &&
156 mLastValidLedger.first,
157 mLastValidLedger.second,
158 view,
159 s,
160 reason))
161 {
162 return false;
163 }
164 }
165
166 return true;
167}
168
171{
172 using namespace std::chrono_literals;
174 if (pubClose == 0s)
175 {
176 JLOG(m_journal.debug()) << "No published ledger";
177 return weeks{2};
178 }
179
180 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
181 ret -= pubClose;
182 ret = (ret > 0s) ? ret : 0s;
183 static std::chrono::seconds lastRet = -1s;
184
185 if (ret != lastRet)
186 {
187 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
188 lastRet = ret;
189 }
190 return ret;
191}
192
195{
196 using namespace std::chrono_literals;
197
199 if (valClose == 0s)
200 {
201 JLOG(m_journal.debug()) << "No validated ledger";
202 return weeks{2};
203 }
204
205 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
206 ret -= valClose;
207 ret = (ret > 0s) ? ret : 0s;
208 static std::chrono::seconds lastRet = -1s;
209
210 if (ret != lastRet)
211 {
212 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
213 lastRet = ret;
214 }
215 return ret;
216}
217
218bool
220{
221 using namespace std::chrono_literals;
222
223 if (getPublishedLedgerAge() > 3min)
224 {
225 reason = "No recently-published ledger";
226 return false;
227 }
228 std::uint32_t validClose = mValidLedgerSign.load();
230 if (!validClose || !pubClose)
231 {
232 reason = "No published ledger";
233 return false;
234 }
235 if (validClose > (pubClose + 90))
236 {
237 reason = "Published ledger lags validated ledger";
238 return false;
239 }
240 return true;
241}
242
243void
245{
247 std::optional<uint256> consensusHash;
248
249 if (!standalone_)
250 {
251 auto validations = app_.validators().negativeUNLFilter(
253 l->info().hash, l->info().seq));
254 times.reserve(validations.size());
255 for (auto const& val : validations)
256 times.push_back(val->getSignTime());
257
258 if (!validations.empty())
259 consensusHash = validations.front()->getConsensusHash();
260 }
261
262 NetClock::time_point signTime;
263
264 if (!times.empty() && times.size() >= app_.validators().quorum())
265 {
266 // Calculate the sample median
267 std::sort(times.begin(), times.end());
268 auto const t0 = times[(times.size() - 1) / 2];
269 auto const t1 = times[times.size() / 2];
270 signTime = t0 + (t1 - t0) / 2;
271 }
272 else
273 {
274 signTime = l->info().closeTime;
275 }
276
277 mValidLedger.set(l);
278 mValidLedgerSign = signTime.time_since_epoch().count();
279 XRPL_ASSERT(
281 l->info().seq + max_ledger_difference_ >
283 "ripple::LedgerMaster::setValidLedger : valid ledger sequence");
285 mValidLedgerSeq = l->info().seq;
286
289 mLedgerHistory.validatedLedger(l, consensusHash);
291 if (!app_.getOPs().isBlocked())
292 {
294 {
295 JLOG(m_journal.error()) << "One or more unsupported amendments "
296 "activated: server blocked.";
298 }
299 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
300 {
301 // Amendments can lose majority, so re-check periodically (every
302 // flag ledger), and clear the flag if appropriate. If an unknown
303 // amendment gains majority log a warning as soon as it's
304 // discovered, then again every flag ledger until the operator
305 // upgrades, the amendment loses majority, or the amendment goes
306 // live and the node gets blocked. Unlike being amendment blocked,
307 // this message may be logged more than once per session, because
308 // the node will otherwise function normally, and this gives
309 // operators an opportunity to see and resolve the warning.
310 if (auto const first =
312 {
313 JLOG(m_journal.error()) << "One or more unsupported amendments "
314 "reached majority. Upgrade before "
315 << to_string(*first)
316 << " to prevent your server from "
317 "becoming amendment blocked.";
319 }
320 else
322 }
323 }
324}
325
326void
328{
329 mPubLedger = l;
330 mPubLedgerClose = l->info().closeTime.time_since_epoch().count();
331 mPubLedgerSeq = l->info().seq;
332}
333
334void
336 std::shared_ptr<Transaction> const& transaction)
337{
339 mHeldTransactions.insert(transaction->getSTransaction());
340}
341
342// Validate a ledger's close time and sequence number if we're considering
343// jumping to that ledger. This helps defend against some rare hostile or
344// diverged majority scenarios.
345bool
347{
348 XRPL_ASSERT(ledger, "ripple::LedgerMaster::canBeCurrent : non-null input");
349
350 // Never jump to a candidate ledger that precedes our
351 // last validated ledger
352
353 auto validLedger = getValidatedLedger();
354 if (validLedger && (ledger->info().seq < validLedger->info().seq))
355 {
356 JLOG(m_journal.trace())
357 << "Candidate for current ledger has low seq " << ledger->info().seq
358 << " < " << validLedger->info().seq;
359 return false;
360 }
361
362 // Ensure this ledger's parent close time is within five minutes of
363 // our current time. If we already have a known fully-valid ledger
364 // we perform this check. Otherwise, we only do it if we've built a
365 // few ledgers as our clock can be off when we first start up
366
367 auto closeTime = app_.timeKeeper().closeTime();
368 auto ledgerClose = ledger->info().parentCloseTime;
369
370 using namespace std::chrono_literals;
371 if ((validLedger || (ledger->info().seq > 10)) &&
372 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) >
373 5min))
374 {
375 JLOG(m_journal.warn())
376 << "Candidate for current ledger has close time "
377 << to_string(ledgerClose) << " at network time "
378 << to_string(closeTime) << " seq " << ledger->info().seq;
379 return false;
380 }
381
382 if (validLedger)
383 {
384 // Sequence number must not be too high. We allow ten ledgers
385 // for time inaccuracies plus a maximum run rate of one ledger
386 // every two seconds. The goal is to prevent a malicious ledger
387 // from increasing our sequence unreasonably high
388
389 LedgerIndex maxSeq = validLedger->info().seq + 10;
390
391 if (closeTime > validLedger->info().parentCloseTime)
392 maxSeq += std::chrono::duration_cast<std::chrono::seconds>(
393 closeTime - validLedger->info().parentCloseTime)
394 .count() /
395 2;
396
397 if (ledger->info().seq > maxSeq)
398 {
399 JLOG(m_journal.warn())
400 << "Candidate for current ledger has high seq "
401 << ledger->info().seq << " > " << maxSeq;
402 return false;
403 }
404
405 JLOG(m_journal.trace())
406 << "Acceptable seq range: " << validLedger->info().seq
407 << " <= " << ledger->info().seq << " <= " << maxSeq;
408 }
409
410 return true;
411}
412
413void
415{
416 XRPL_ASSERT(lastClosed, "ripple::LedgerMaster::switchLCL : non-null input");
417 if (!lastClosed->isImmutable())
418 LogicError("mutable ledger in switchLCL");
419
420 if (lastClosed->open())
421 LogicError("The new last closed ledger is open!");
422
423 {
425 mClosedLedger.set(lastClosed);
426 }
427
428 if (standalone_)
429 {
430 setFullLedger(lastClosed, true, false);
431 tryAdvance();
432 }
433 else
434 {
435 checkAccept(lastClosed);
436 }
437}
438
439bool
440LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
441{
442 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
443}
444
445bool
447{
448 bool validated = ledger->info().validated;
449 // Returns true if we already had the ledger
450 return mLedgerHistory.insert(std::move(ledger), validated);
451}
452
458void
460{
462
464 bool any = false;
465 for (auto const& it : mHeldTransactions)
466 {
467 ApplyFlags flags = tapNONE;
468 auto const result =
469 app_.getTxQ().apply(app_, view, it.second, flags, j);
470 any |= result.applied;
471 }
472 return any;
473 });
474
475 // VFALCO TODO recreate the CanonicalTxSet object instead of resetting
476 // it.
477 // VFALCO NOTE The hash for an open ledger is undefined so we use
478 // something that is a reasonable substitute.
479 mHeldTransactions.reset(app_.openLedger().current()->info().parentHash);
480}
481
484{
486
488}
489
490void
492{
493 mBuildingLedgerSeq.store(i);
494}
495
496bool
498{
500 return boost::icl::contains(mCompleteLedgers, seq);
501}
502
503void
505{
507 mCompleteLedgers.erase(seq);
508}
509
510bool
512{
513 if (ledger.open())
514 return false;
515
516 if (ledger.info().validated)
517 return true;
518
519 auto const seq = ledger.info().seq;
520 try
521 {
522 // Use the skip list in the last validated ledger to see if ledger
523 // comes before the last validated ledger (and thus has been
524 // validated).
525 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
526
527 if (!hash || ledger.info().hash != *hash)
528 {
529 // This ledger's hash is not the hash of the validated ledger
530 if (hash)
531 {
532 XRPL_ASSERT(
533 hash->isNonZero(),
534 "ripple::LedgerMaster::isValidated : nonzero hash");
535 uint256 valHash =
537 if (valHash == ledger.info().hash)
538 {
539 // SQL database doesn't match ledger chain
540 clearLedger(seq);
541 }
542 }
543 return false;
544 }
545 }
546 catch (SHAMapMissingNode const& mn)
547 {
548 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
549 return false;
550 }
551
552 // Mark ledger as validated to save time if we see it again.
553 ledger.info().validated = true;
554 return true;
555}
556
557// returns Ledgers we have all the nodes for
558bool
560 std::uint32_t& minVal,
561 std::uint32_t& maxVal)
562{
563 // Validated ledger is likely not stored in the DB yet so we use the
564 // published ledger which is.
565 maxVal = mPubLedgerSeq.load();
566
567 if (!maxVal)
568 return false;
569
571 {
573 maybeMin = prevMissing(mCompleteLedgers, maxVal);
574 }
575
576 if (maybeMin == std::nullopt)
577 minVal = maxVal;
578 else
579 minVal = 1 + *maybeMin;
580
581 return true;
582}
583
584// Returns Ledgers we have all the nodes for and are indexed
585bool
587{
588 if (!getFullValidatedRange(minVal, maxVal))
589 return false;
590
591 // Remove from the validated range any ledger sequences that may not be
592 // fully updated in the database yet
593
594 auto const pendingSaves = app_.pendingSaves().getSnapshot();
595
596 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
597 {
598 // Ensure we shrink the tips as much as possible. If we have 7-9 and
599 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
600 // because then we'll have nothing when we could have 7.
601 while (pendingSaves.count(maxVal) > 0)
602 --maxVal;
603 while (pendingSaves.count(minVal) > 0)
604 ++minVal;
605
606 // Best effort for remaining exclusions
607 for (auto v : pendingSaves)
608 {
609 if ((v.first >= minVal) && (v.first <= maxVal))
610 {
611 if (v.first > ((minVal + maxVal) / 2))
612 maxVal = v.first - 1;
613 else
614 minVal = v.first + 1;
615 }
616 }
617
618 if (minVal > maxVal)
619 minVal = maxVal = 0;
620 }
621
622 return true;
623}
624
625// Get the earliest ledger we will let peers fetch
628{
629 // The earliest ledger we will let people fetch is ledger zero,
630 // unless that creates a larger range than allowed
631 std::uint32_t e = getClosedLedger()->info().seq;
632
633 if (e > fetch_depth_)
634 e -= fetch_depth_;
635 else
636 e = 0;
637 return e;
638}
639
640void
642{
643 std::uint32_t seq = ledger->info().seq;
644 uint256 prevHash = ledger->info().parentHash;
645
647
648 std::uint32_t minHas = seq;
649 std::uint32_t maxHas = seq;
650
652 while (!app_.getJobQueue().isStopping() && seq > 0)
653 {
654 {
656 minHas = seq;
657 --seq;
658
659 if (haveLedger(seq))
660 break;
661 }
662
663 auto it(ledgerHashes.find(seq));
664
665 if (it == ledgerHashes.end())
666 {
667 if (app_.isStopping())
668 return;
669
670 {
672 mCompleteLedgers.insert(range(minHas, maxHas));
673 }
674 maxHas = minHas;
676 (seq < 500) ? 0 : (seq - 499), seq);
677 it = ledgerHashes.find(seq);
678
679 if (it == ledgerHashes.end())
680 break;
681
682 if (!nodeStore.fetchNodeObject(
683 ledgerHashes.begin()->second.ledgerHash,
684 ledgerHashes.begin()->first))
685 {
686 // The ledger is not backed by the node store
687 JLOG(m_journal.warn()) << "SQL DB ledger sequence " << seq
688 << " mismatches node store";
689 break;
690 }
691 }
692
693 if (it->second.ledgerHash != prevHash)
694 break;
695
696 prevHash = it->second.parentHash;
697 }
698
699 {
701 mCompleteLedgers.insert(range(minHas, maxHas));
702 }
703 {
705 mFillInProgress = 0;
706 tryAdvance();
707 }
708}
709
712void
714{
715 LedgerIndex const ledgerIndex = missing + 1;
716
717 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
718 if (!haveHash || haveHash->isZero())
719 {
720 JLOG(m_journal.error())
721 << "No hash for fetch pack. Missing Index " << missing;
722 return;
723 }
724
725 // Select target Peer based on highest score. The score is randomized
726 // but biased in favor of Peers with low latency.
728 {
729 int maxScore = 0;
730 auto peerList = app_.overlay().getActivePeers();
731 for (auto const& peer : peerList)
732 {
733 if (peer->hasRange(missing, missing + 1))
734 {
735 int score = peer->getScore(true);
736 if (!target || (score > maxScore))
737 {
738 target = peer;
739 maxScore = score;
740 }
741 }
742 }
743 }
744
745 if (target)
746 {
747 protocol::TMGetObjectByHash tmBH;
748 tmBH.set_query(true);
749 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
750 tmBH.set_ledgerhash(haveHash->begin(), 32);
751 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
752
753 target->send(packet);
754 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
755 }
756 else
757 JLOG(m_journal.debug()) << "No peer for fetch pack";
758}
759
760void
762{
763 int invalidate = 0;
765
766 for (std::uint32_t lSeq = ledger.info().seq - 1; lSeq > 0; --lSeq)
767 {
768 if (haveLedger(lSeq))
769 {
770 try
771 {
772 hash = hashOfSeq(ledger, lSeq, m_journal);
773 }
774 catch (std::exception const& ex)
775 {
776 JLOG(m_journal.warn())
777 << "fixMismatch encounters partial ledger. Exception: "
778 << ex.what();
779 clearLedger(lSeq);
780 return;
781 }
782
783 if (hash)
784 {
785 // try to close the seam
786 auto otherLedger = getLedgerBySeq(lSeq);
787
788 if (otherLedger && (otherLedger->info().hash == *hash))
789 {
790 // we closed the seam
791 if (invalidate != 0)
792 {
793 JLOG(m_journal.warn())
794 << "Match at " << lSeq << ", " << invalidate
795 << " prior ledgers invalidated";
796 }
797
798 return;
799 }
800 }
801
802 clearLedger(lSeq);
803 ++invalidate;
804 }
805 }
806
807 // all prior ledgers invalidated
808 if (invalidate != 0)
809 {
810 JLOG(m_journal.warn())
811 << "All " << invalidate << " prior ledgers invalidated";
812 }
813}
814
815void
817 std::shared_ptr<Ledger const> const& ledger,
818 bool isSynchronous,
819 bool isCurrent)
820{
821 // A new ledger has been accepted as part of the trusted chain
822 JLOG(m_journal.debug()) << "Ledger " << ledger->info().seq
823 << " accepted :" << ledger->info().hash;
824 XRPL_ASSERT(
825 ledger->stateMap().getHash().isNonZero(),
826 "ripple::LedgerMaster::setFullLedger : nonzero ledger state hash");
827
828 ledger->setValidated();
829 ledger->setFull();
830
831 if (isCurrent)
832 mLedgerHistory.insert(ledger, true);
833
834 {
835 // Check the SQL database's entry for the sequence before this
836 // ledger, if it's not this ledger's parent, invalidate it
837 uint256 prevHash =
838 app_.getRelationalDatabase().getHashByIndex(ledger->info().seq - 1);
839 if (prevHash.isNonZero() && prevHash != ledger->info().parentHash)
840 clearLedger(ledger->info().seq - 1);
841 }
842
843 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
844
845 {
847 mCompleteLedgers.insert(ledger->info().seq);
848 }
849
850 {
852
853 if (ledger->info().seq > mValidLedgerSeq)
854 setValidLedger(ledger);
855 if (!mPubLedger)
856 {
857 setPubLedger(ledger);
858 app_.getOrderBookDB().setup(ledger);
859 }
860
861 if (ledger->info().seq != 0 && haveLedger(ledger->info().seq - 1))
862 {
863 // we think we have the previous ledger, double check
864 auto prevLedger = getLedgerBySeq(ledger->info().seq - 1);
865
866 if (!prevLedger ||
867 (prevLedger->info().hash != ledger->info().parentHash))
868 {
869 JLOG(m_journal.warn())
870 << "Acquired ledger invalidates previous ledger: "
871 << (prevLedger ? "hashMismatch" : "missingLedger");
872 fixMismatch(*ledger);
873 }
874 }
875 }
876}
877
878void
880{
881 clearLedger(seq);
883}
884
885// Check if the specified ledger can become the new last fully-validated
886// ledger.
887void
889{
890 std::size_t valCount = 0;
891
892 if (seq != 0)
893 {
894 // Ledger is too old
895 if (seq < mValidLedgerSeq)
896 return;
897
898 auto validations = app_.validators().negativeUNLFilter(
900 valCount = validations.size();
901 if (valCount >= app_.validators().quorum())
902 {
904 if (seq > mLastValidLedger.second)
905 mLastValidLedger = std::make_pair(hash, seq);
906 }
907
908 if (seq == mValidLedgerSeq)
909 return;
910
911 // Ledger could match the ledger we're already building
912 if (seq == mBuildingLedgerSeq)
913 return;
914 }
915
916 auto ledger = mLedgerHistory.getLedgerByHash(hash);
917
918 if (!ledger)
919 {
920 if ((seq != 0) && (getValidLedgerIndex() == 0))
921 {
922 // Set peers converged early if we can
923 if (valCount >= app_.validators().quorum())
925 }
926
927 // FIXME: We may not want to fetch a ledger with just one
928 // trusted validation
929 ledger = app_.getInboundLedgers().acquire(
931 }
932
933 if (ledger)
934 checkAccept(ledger);
935}
936
944{
945 return standalone_ ? 0 : app_.validators().quorum();
946}
947
948void
950{
951 // Can we accept this ledger as our new last fully-validated ledger
952
953 if (!canBeCurrent(ledger))
954 return;
955
956 // Can we advance the last fully-validated ledger? If so, can we
957 // publish?
959
960 if (ledger->info().seq <= mValidLedgerSeq)
961 return;
962
963 auto const minVal = getNeededValidations();
964 auto validations = app_.validators().negativeUNLFilter(
966 ledger->info().hash, ledger->info().seq));
967 auto const tvc = validations.size();
968 if (tvc < minVal) // nothing we can do
969 {
970 JLOG(m_journal.trace())
971 << "Only " << tvc << " validations for " << ledger->info().hash;
972 return;
973 }
974
975 JLOG(m_journal.info()) << "Advancing accepted ledger to "
976 << ledger->info().seq << " with >= " << minVal
977 << " validations";
978
979 ledger->setValidated();
980 ledger->setFull();
981 setValidLedger(ledger);
982 if (!mPubLedger)
983 {
984 pendSaveValidated(app_, ledger, true, true);
985 setPubLedger(ledger);
986 app_.getOrderBookDB().setup(ledger);
987 }
988
989 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
990 auto fees = app_.getValidations().fees(ledger->info().hash, base);
991 {
992 auto fees2 =
993 app_.getValidations().fees(ledger->info().parentHash, base);
994 fees.reserve(fees.size() + fees2.size());
995 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
996 }
997 std::uint32_t fee;
998 if (!fees.empty())
999 {
1000 std::sort(fees.begin(), fees.end());
1001 if (auto stream = m_journal.debug())
1002 {
1004 s << "Received fees from validations: (" << fees.size() << ") ";
1005 for (auto const fee1 : fees)
1006 {
1007 s << " " << fee1;
1008 }
1009 stream << s.str();
1010 }
1011 fee = fees[fees.size() / 2]; // median
1012 }
1013 else
1014 {
1015 fee = base;
1016 }
1017
1019
1020 tryAdvance();
1021
1022 if (ledger->seq() % 256 == 0)
1023 {
1024 // Check if the majority of validators run a higher version rippled
1025 // software. If so print a warning.
1026 //
1027 // Once the HardenedValidations amendment is enabled, validators include
1028 // their rippled software version in the validation messages of every
1029 // (flag - 1) ledger. We wait for one ledger time before checking the
1030 // version information to accumulate more validation messages.
1031
1032 auto currentTime = app_.timeKeeper().now();
1033 bool needPrint = false;
1034
1035 // The variable upgradeWarningPrevTime_ will be set when and only when
1036 // the warning is printed.
1038 {
1039 // Have not printed the warning before, check if need to print.
1040 auto const vals = app_.getValidations().getTrustedForLedger(
1041 ledger->info().parentHash, ledger->info().seq - 1);
1042 std::size_t higherVersionCount = 0;
1043 std::size_t rippledCount = 0;
1044 for (auto const& v : vals)
1045 {
1046 if (v->isFieldPresent(sfServerVersion))
1047 {
1048 auto version = v->getFieldU64(sfServerVersion);
1049 higherVersionCount +=
1050 BuildInfo::isNewerVersion(version) ? 1 : 0;
1051 rippledCount +=
1052 BuildInfo::isRippledVersion(version) ? 1 : 0;
1053 }
1054 }
1055 // We report only if (1) we have accumulated validation messages
1056 // from 90% validators from the UNL, (2) 60% of validators
1057 // running the rippled implementation have higher version numbers,
1058 // and (3) the calculation won't cause divide-by-zero.
1059 if (higherVersionCount > 0 && rippledCount > 0)
1060 {
1061 constexpr std::size_t reportingPercent = 90;
1062 constexpr std::size_t cutoffPercent = 60;
1063 auto const unlSize{
1064 app_.validators().getQuorumKeys().second.size()};
1065 needPrint = unlSize > 0 &&
1066 calculatePercent(vals.size(), unlSize) >=
1067 reportingPercent &&
1068 calculatePercent(higherVersionCount, rippledCount) >=
1069 cutoffPercent;
1070 }
1071 }
1072 // To throttle the warning messages, instead of printing a warning
1073 // every flag ledger, we print every week.
1074 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
1075 {
1076 // Printed the warning before, and assuming most validators
1077 // do not downgrade, we keep printing the warning
1078 // until the local server is restarted.
1079 needPrint = true;
1080 }
1081
1082 if (needPrint)
1083 {
1084 upgradeWarningPrevTime_ = currentTime;
1085 auto const upgradeMsg =
1086 "Check for upgrade: "
1087 "A majority of trusted validators are "
1088 "running a newer version.";
1089 std::cerr << upgradeMsg << std::endl;
1090 JLOG(m_journal.error()) << upgradeMsg;
1091 }
1092 }
1093}
1094
1096void
1098 std::shared_ptr<Ledger const> const& ledger,
1099 uint256 const& consensusHash,
1100 Json::Value consensus)
1101{
1102 // Because we just built a ledger, we are no longer building one
1104
1105 // No need to process validations in standalone mode
1106 if (standalone_)
1107 return;
1108
1109 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1110
1111 if (ledger->info().seq <= mValidLedgerSeq)
1112 {
1113 auto stream = app_.journal("LedgerConsensus").info();
1114 JLOG(stream) << "Consensus built old ledger: " << ledger->info().seq
1115 << " <= " << mValidLedgerSeq;
1116 return;
1117 }
1118
1119 // See if this ledger can be the new fully-validated ledger
1120 checkAccept(ledger);
1121
1122 if (ledger->info().seq <= mValidLedgerSeq)
1123 {
1124 auto stream = app_.journal("LedgerConsensus").debug();
1125 JLOG(stream) << "Consensus ledger fully validated";
1126 return;
1127 }
1128
1129 // This ledger cannot be the new fully-validated ledger, but
1130 // maybe we saved up validations for some other ledger that can be
1131
1132 auto validations = app_.validators().negativeUNLFilter(
1134
1135 // Track validation counts with sequence numbers
1136 class valSeq
1137 {
1138 public:
1139 valSeq() : valCount_(0), ledgerSeq_(0)
1140 {
1141 ;
1142 }
1143
1144 void
1145 mergeValidation(LedgerIndex seq)
1146 {
1147 valCount_++;
1148
1149 // If we didn't already know the sequence, now we do
1150 if (ledgerSeq_ == 0)
1151 ledgerSeq_ = seq;
1152 }
1153
1154 std::size_t valCount_;
1155 LedgerIndex ledgerSeq_;
1156 };
1157
1158 // Count the number of current, trusted validations
1160 for (auto const& v : validations)
1161 {
1162 valSeq& vs = count[v->getLedgerHash()];
1163 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1164 }
1165
1166 auto const neededValidations = getNeededValidations();
1167 auto maxSeq = mValidLedgerSeq.load();
1168 auto maxLedger = ledger->info().hash;
1169
1170 // Of the ledgers with sufficient validations,
1171 // find the one with the highest sequence
1172 for (auto& v : count)
1173 if (v.second.valCount_ > neededValidations)
1174 {
1175 // If we still don't know the sequence, get it
1176 if (v.second.ledgerSeq_ == 0)
1177 {
1178 if (auto l = getLedgerByHash(v.first))
1179 v.second.ledgerSeq_ = l->info().seq;
1180 }
1181
1182 if (v.second.ledgerSeq_ > maxSeq)
1183 {
1184 maxSeq = v.second.ledgerSeq_;
1185 maxLedger = v.first;
1186 }
1187 }
1188
1189 if (maxSeq > mValidLedgerSeq)
1190 {
1191 auto stream = app_.journal("LedgerConsensus").debug();
1192 JLOG(stream) << "Consensus triggered check of ledger";
1193 checkAccept(maxLedger, maxSeq);
1194 }
1195}
1196
1199 LedgerIndex index,
1200 InboundLedger::Reason reason)
1201{
1202 // Try to get the hash of a ledger we need to fetch for history
1204 auto const& l{mHistLedger};
1205
1206 if (l && l->info().seq >= index)
1207 {
1208 ret = hashOfSeq(*l, index, m_journal);
1209 if (!ret)
1210 ret = walkHashBySeq(index, l, reason);
1211 }
1212
1213 if (!ret)
1214 ret = walkHashBySeq(index, reason);
1215
1216 return ret;
1217}
1218
1222{
1224
1225 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1226
1227 // No valid ledger, nothing to do
1228 if (mValidLedger.empty())
1229 {
1230 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1231 return {};
1232 }
1233
1234 if (!mPubLedger)
1235 {
1236 JLOG(m_journal.info())
1237 << "First published ledger will be " << mValidLedgerSeq;
1238 return {mValidLedger.get()};
1239 }
1240
1242 {
1243 JLOG(m_journal.warn()) << "Gap in validated ledger stream "
1244 << mPubLedgerSeq << " - " << mValidLedgerSeq - 1;
1245
1246 auto valLedger = mValidLedger.get();
1247 ret.push_back(valLedger);
1248 setPubLedger(valLedger);
1249 app_.getOrderBookDB().setup(valLedger);
1250
1251 return {valLedger};
1252 }
1253
1255 {
1256 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1257 return {};
1258 }
1259
1260 int acqCount = 0;
1261
1262 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1263 auto valLedger = mValidLedger.get();
1264 std::uint32_t valSeq = valLedger->info().seq;
1265
1266 scope_unlock sul{sl};
1267 try
1268 {
1269 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1270 {
1271 JLOG(m_journal.trace())
1272 << "Trying to fetch/publish valid ledger " << seq;
1273
1275 // This can throw
1276 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1277 // VFALCO TODO Restructure this code so that zero is not
1278 // used.
1279 if (!hash)
1280 hash = beast::zero; // kludge
1281 if (seq == valSeq)
1282 {
1283 // We need to publish the ledger we just fully validated
1284 ledger = valLedger;
1285 }
1286 else if (hash->isZero())
1287 {
1288 JLOG(m_journal.fatal()) << "Ledger: " << valSeq
1289 << " does not have hash for " << seq;
1290 UNREACHABLE(
1291 "ripple::LedgerMaster::findNewLedgersToPublish : ledger "
1292 "not found");
1293 }
1294 else
1295 {
1296 ledger = mLedgerHistory.getLedgerByHash(*hash);
1297 }
1298
1299 if (!app_.config().LEDGER_REPLAY)
1300 {
1301 // Can we try to acquire the ledger we need?
1302 if (!ledger && (++acqCount < ledger_fetch_size_))
1303 ledger = app_.getInboundLedgers().acquire(
1304 *hash, seq, InboundLedger::Reason::GENERIC);
1305 }
1306
1307 // Did we acquire the next ledger we need to publish?
1308 if (ledger && (ledger->info().seq == pubSeq))
1309 {
1310 ledger->setValidated();
1311 ret.push_back(ledger);
1312 ++pubSeq;
1313 }
1314 }
1315
1316 JLOG(m_journal.trace())
1317 << "ready to publish " << ret.size() << " ledgers.";
1318 }
1319 catch (std::exception const& ex)
1320 {
1321 JLOG(m_journal.error())
1322 << "Exception while trying to find ledgers to publish: "
1323 << ex.what();
1324 }
1325
1327 {
1328 /* Narrow down the gap of ledgers, and try to replay them.
1329 * When replaying a ledger gap, if the local node has
1330 * the start ledger, it saves an expensive InboundLedger
1331 * acquire. If the local node has the finish ledger, it
1332 * saves a skip list acquire.
1333 */
1334 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1335 auto finishLedger = valLedger;
1336 while (startLedger->seq() + 1 < finishLedger->seq())
1337 {
1338 if (auto const parent = mLedgerHistory.getLedgerByHash(
1339 finishLedger->info().parentHash);
1340 parent)
1341 {
1342 finishLedger = parent;
1343 }
1344 else
1345 {
1346 auto numberLedgers =
1347 finishLedger->seq() - startLedger->seq() + 1;
1348 JLOG(m_journal.debug())
1349 << "Publish LedgerReplays " << numberLedgers
1350 << " ledgers, from seq=" << startLedger->info().seq << ", "
1351 << startLedger->info().hash
1352 << " to seq=" << finishLedger->info().seq << ", "
1353 << finishLedger->info().hash;
1356 finishLedger->info().hash,
1357 numberLedgers);
1358 break;
1359 }
1360 }
1361 }
1362
1363 return ret;
1364}
1365
1366void
1368{
1370
1371 // Can't advance without at least one fully-valid ledger
1372 mAdvanceWork = true;
1374 {
1375 mAdvanceThread = true;
1376 app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
1378
1379 XRPL_ASSERT(
1381 "ripple::LedgerMaster::tryAdvance : has valid ledger");
1382
1383 JLOG(m_journal.trace()) << "advanceThread<";
1384
1385 try
1386 {
1387 doAdvance(sl);
1388 }
1389 catch (std::exception const& ex)
1390 {
1391 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1392 }
1393
1394 mAdvanceThread = false;
1395 JLOG(m_journal.trace()) << "advanceThread>";
1396 });
1397 }
1398}
1399
1400void
1402{
1403 {
1406 {
1408 mPathLedger.reset();
1409 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1410 return;
1411 }
1412 }
1413
1414 while (!app_.getJobQueue().isStopping())
1415 {
1416 JLOG(m_journal.debug()) << "updatePaths running";
1418 {
1420
1421 if (!mValidLedger.empty() &&
1422 (!mPathLedger || (mPathLedger->info().seq != mValidLedgerSeq)))
1423 { // We have a new valid ledger since the last full pathfinding
1425 lastLedger = mPathLedger;
1426 }
1427 else if (mPathFindNewRequest)
1428 { // We have a new request but no new ledger
1429 lastLedger = app_.openLedger().current();
1430 }
1431 else
1432 { // Nothing to do
1434 mPathLedger.reset();
1435 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1436 return;
1437 }
1438 }
1439
1440 if (!standalone_)
1441 { // don't pathfind with a ledger that's more than 60 seconds old
1442 using namespace std::chrono;
1443 auto age = time_point_cast<seconds>(app_.timeKeeper().closeTime()) -
1444 lastLedger->info().closeTime;
1445 if (age > 1min)
1446 {
1447 JLOG(m_journal.debug())
1448 << "Published ledger too old for updating paths";
1451 mPathLedger.reset();
1452 return;
1453 }
1454 }
1455
1456 try
1457 {
1458 auto& pathRequests = app_.getPathRequests();
1459 {
1461 if (!pathRequests.requestsPending())
1462 {
1464 mPathLedger.reset();
1465 JLOG(m_journal.debug())
1466 << "No path requests found. Nothing to do for updating "
1467 "paths. "
1468 << mPathFindThread << " jobs remaining";
1469 return;
1470 }
1471 }
1472 JLOG(m_journal.debug()) << "Updating paths";
1473 pathRequests.updateAll(lastLedger);
1474
1476 if (!pathRequests.requestsPending())
1477 {
1478 JLOG(m_journal.debug())
1479 << "No path requests left. No need for further updating "
1480 "paths";
1482 mPathLedger.reset();
1483 return;
1484 }
1485 }
1486 catch (SHAMapMissingNode const& mn)
1487 {
1488 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1489 if (lastLedger->open())
1490 {
1491 // our parent is the problem
1493 lastLedger->info().parentHash,
1494 lastLedger->info().seq - 1,
1496 }
1497 else
1498 {
1499 // this ledger is the problem
1501 lastLedger->info().hash,
1502 lastLedger->info().seq,
1504 }
1505 }
1506 }
1507}
1508
1509bool
1511{
1513 mPathFindNewRequest = newPFWork("pf:newRequest", ml);
1514 return mPathFindNewRequest;
1515}
1516
1517bool
1519{
1521 bool const ret = mPathFindNewRequest;
1522 mPathFindNewRequest = false;
1523 return ret;
1524}
1525
1526// If the order book is radically updated, we need to reprocess all
1527// pathfinding requests.
1528bool
1530{
1532 mPathLedger.reset();
1533
1534 return newPFWork("pf:newOBDB", ml);
1535}
1536
1539bool
1541 const char* name,
1543{
1544 if (!app_.isStopping() && mPathFindThread < 2 &&
1546 {
1547 JLOG(m_journal.debug())
1548 << "newPFWork: Creating job. path find threads: "
1549 << mPathFindThread;
1550 if (app_.getJobQueue().addJob(
1551 jtUPDATE_PF, name, [this]() { updatePaths(); }))
1552 {
1554 }
1555 }
1556 // If we're stopping don't give callers the expectation that their
1557 // request will be fulfilled, even if it may be serviced.
1558 return mPathFindThread > 0 && !app_.isStopping();
1559}
1560
1563{
1564 return m_mutex;
1565}
1566
1567// The current ledger is the ledger we believe new transactions should go in
1570{
1571 return app_.openLedger().current();
1572}
1573
1576{
1577 return mValidLedger.get();
1578}
1579
1580Rules
1582{
1583 // Once we have a guarantee that there's always a last validated
1584 // ledger then we can dispense with the if.
1585
1586 // Return the Rules from the last validated ledger.
1587 if (auto const ledger = getValidatedLedger())
1588 return ledger->rules();
1589
1590 return Rules(app_.config().features);
1591}
1592
1593// This is the last ledger we published to clients and can lag the validated
1594// ledger.
1597{
1599 return mPubLedger;
1600}
1601
1604{
1607}
1608
1611{
1612 uint256 hash = getHashBySeq(ledgerIndex);
1613 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex)
1614 : std::nullopt;
1615}
1616
1619 LedgerHash const& ledgerHash,
1620 std::uint32_t index)
1621{
1622 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1623 if (nodeObject && (nodeObject->getData().size() >= 120))
1624 {
1625 SerialIter it(
1626 nodeObject->getData().data(), nodeObject->getData().size());
1627 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1628 {
1629 it.skip(
1630 4 + 8 + 32 + // seq drops parentHash
1631 32 + 32 + 4); // txHash acctHash parentClose
1633 }
1634 }
1635
1636 return std::nullopt;
1637}
1638
1639uint256
1641{
1643
1644 if (hash.isNonZero())
1645 return hash;
1646
1648}
1649
1652{
1653 std::optional<LedgerHash> ledgerHash;
1654
1655 if (auto referenceLedger = mValidLedger.get())
1656 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1657
1658 return ledgerHash;
1659}
1660
1663 std::uint32_t index,
1664 std::shared_ptr<ReadView const> const& referenceLedger,
1665 InboundLedger::Reason reason)
1666{
1667 if (!referenceLedger || (referenceLedger->info().seq < index))
1668 {
1669 // Nothing we can do. No validated ledger.
1670 return std::nullopt;
1671 }
1672
1673 // See if the hash for the ledger we need is in the reference ledger
1674 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1675 if (ledgerHash)
1676 return ledgerHash;
1677
1678 // The hash is not in the reference ledger. Get another ledger which can
1679 // be located easily and should contain the hash.
1680 LedgerIndex refIndex = getCandidateLedger(index);
1681 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1682 XRPL_ASSERT(refHash, "ripple::LedgerMaster::walkHashBySeq : found ledger");
1683 if (refHash)
1684 {
1685 // Try the hash and sequence of a better reference ledger just found
1686 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1687
1688 if (ledger)
1689 {
1690 try
1691 {
1692 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1693 }
1694 catch (SHAMapMissingNode const&)
1695 {
1696 ledger.reset();
1697 }
1698 }
1699
1700 // Try to acquire the complete ledger
1701 if (!ledger)
1702 {
1703 if (auto const l = app_.getInboundLedgers().acquire(
1704 *refHash, refIndex, reason))
1705 {
1706 ledgerHash = hashOfSeq(*l, index, m_journal);
1707 XRPL_ASSERT(
1708 ledgerHash,
1709 "ripple::LedgerMaster::walkHashBySeq : has complete "
1710 "ledger");
1711 }
1712 }
1713 }
1714 return ledgerHash;
1715}
1716
1719{
1720 if (index <= mValidLedgerSeq)
1721 {
1722 // Always prefer a validated ledger
1723 if (auto valid = mValidLedger.get())
1724 {
1725 if (valid->info().seq == index)
1726 return valid;
1727
1728 try
1729 {
1730 auto const hash = hashOfSeq(*valid, index, m_journal);
1731
1732 if (hash)
1734 }
1735 catch (std::exception const&)
1736 {
1737 // Missing nodes are already handled
1738 }
1739 }
1740 }
1741
1742 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1743 return ret;
1744
1745 auto ret = mClosedLedger.get();
1746 if (ret && (ret->info().seq == index))
1747 return ret;
1748
1749 clearLedger(index);
1750 return {};
1751}
1752
1755{
1756 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1757 return ret;
1758
1759 auto ret = mClosedLedger.get();
1760 if (ret && (ret->info().hash == hash))
1761 return ret;
1762
1763 return {};
1764}
1765
1766void
1768{
1770 mCompleteLedgers.insert(range(minV, maxV));
1771}
1772
1773void
1775{
1777 fetch_packs_.sweep();
1778}
1779
1780float
1782{
1784}
1785
1786void
1788{
1790 if (seq > 0)
1791 mCompleteLedgers.erase(range(0u, seq - 1));
1792}
1793
1794void
1796{
1798}
1799
1800void
1802{
1803 replayData = std::move(replay);
1804}
1805
1808{
1809 return std::move(replayData);
1810}
1811
1812void
1814 std::uint32_t missing,
1815 bool& progress,
1816 InboundLedger::Reason reason,
1818{
1819 scope_unlock sul{sl};
1820 if (auto hash = getLedgerHashForHistory(missing, reason))
1821 {
1822 XRPL_ASSERT(
1823 hash->isNonZero(),
1824 "ripple::LedgerMaster::fetchForHistory : found ledger");
1825 auto ledger = getLedgerByHash(*hash);
1826 if (!ledger)
1827 {
1829 {
1830 ledger =
1831 app_.getInboundLedgers().acquire(*hash, missing, reason);
1832 if (!ledger && missing != fetch_seq_ &&
1833 missing > app_.getNodeStore().earliestLedgerSeq())
1834 {
1835 JLOG(m_journal.trace())
1836 << "fetchForHistory want fetch pack " << missing;
1837 fetch_seq_ = missing;
1838 getFetchPack(missing, reason);
1839 }
1840 else
1841 JLOG(m_journal.trace())
1842 << "fetchForHistory no fetch pack for " << missing;
1843 }
1844 else
1845 JLOG(m_journal.debug())
1846 << "fetchForHistory found failed acquire";
1847 }
1848 if (ledger)
1849 {
1850 auto seq = ledger->info().seq;
1851 XRPL_ASSERT(
1852 seq == missing,
1853 "ripple::LedgerMaster::fetchForHistory : sequence match");
1854 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1855 setFullLedger(ledger, false, false);
1856 int fillInProgress;
1857 {
1859 mHistLedger = ledger;
1860 fillInProgress = mFillInProgress;
1861 }
1862 if (fillInProgress == 0 &&
1864 ledger->info().parentHash)
1865 {
1866 {
1867 // Previous ledger is in DB
1869 mFillInProgress = seq;
1870 }
1872 jtADVANCE, "tryFill", [this, ledger]() {
1873 tryFill(ledger);
1874 });
1875 }
1876 progress = true;
1877 }
1878 else
1879 {
1880 std::uint32_t fetchSz;
1881 // Do not fetch ledger sequences lower
1882 // than the earliest ledger sequence
1883 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1884 fetchSz = missing >= fetchSz
1885 ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1)
1886 : 0;
1887 try
1888 {
1889 for (std::uint32_t i = 0; i < fetchSz; ++i)
1890 {
1891 std::uint32_t seq = missing - i;
1892 if (auto h = getLedgerHashForHistory(seq, reason))
1893 {
1894 XRPL_ASSERT(
1895 h->isNonZero(),
1896 "ripple::LedgerMaster::fetchForHistory : "
1897 "prefetched ledger");
1898 app_.getInboundLedgers().acquire(*h, seq, reason);
1899 }
1900 }
1901 }
1902 catch (std::exception const& ex)
1903 {
1904 JLOG(m_journal.warn())
1905 << "Threw while prefetching: " << ex.what();
1906 }
1907 }
1908 }
1909 else
1910 {
1911 JLOG(m_journal.fatal())
1912 << "Can't find ledger following prevMissing " << missing;
1913 JLOG(m_journal.fatal())
1914 << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1915 JLOG(m_journal.fatal())
1916 << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1917 JLOG(m_journal.fatal())
1918 << "Acquire reason: "
1919 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY"
1920 : "NOT HISTORY");
1921 clearLedger(missing + 1);
1922 progress = true;
1923 }
1924}
1925
1926// Try to publish ledgers, acquire missing ledgers
1927void
1929{
1930 do
1931 {
1932 mAdvanceWork = false; // If there's work to do, we'll make progress
1933 bool progress = false;
1934
1935 auto const pubLedgers = findNewLedgersToPublish(sl);
1936 if (pubLedgers.empty())
1937 {
1943 {
1944 // We are in sync, so can acquire
1947 {
1949 missing = prevMissing(
1951 mPubLedger->info().seq,
1953 }
1954 if (missing)
1955 {
1956 JLOG(m_journal.trace())
1957 << "tryAdvance discovered missing " << *missing;
1958 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1963 *missing,
1964 m_journal))
1965 {
1966 JLOG(m_journal.trace())
1967 << "advanceThread should acquire";
1968 }
1969 else
1970 missing = std::nullopt;
1971 }
1972 if (missing)
1973 {
1974 fetchForHistory(*missing, progress, reason, sl);
1976 {
1977 JLOG(m_journal.debug())
1978 << "tryAdvance found last valid changed";
1979 progress = true;
1980 }
1981 }
1982 }
1983 else
1984 {
1985 mHistLedger.reset();
1986 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1987 }
1988 }
1989 else
1990 {
1991 JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size()
1992 << " ledgers to publish";
1993 for (auto const& ledger : pubLedgers)
1994 {
1995 {
1996 scope_unlock sul{sl};
1997 JLOG(m_journal.debug())
1998 << "tryAdvance publishing seq " << ledger->info().seq;
1999 setFullLedger(ledger, true, true);
2000 }
2001
2002 setPubLedger(ledger);
2003
2004 {
2005 scope_unlock sul{sl};
2006 app_.getOPs().pubLedger(ledger);
2007 }
2008 }
2009
2011 progress = newPFWork("pf:newLedger", sl);
2012 }
2013 if (progress)
2014 mAdvanceWork = true;
2015 } while (mAdvanceWork);
2016}
2017
2018void
2020{
2021 fetch_packs_.canonicalize_replace_client(hash, data);
2022}
2023
2026{
2027 Blob data;
2028 if (fetch_packs_.retrieve(hash, data))
2029 {
2030 fetch_packs_.del(hash, false);
2031 if (hash == sha512Half(makeSlice(data)))
2032 return data;
2033 }
2034 return std::nullopt;
2035}
2036
2037void
2039{
2040 if (!mGotFetchPackThread.test_and_set(std::memory_order_acquire))
2041 {
2042 app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&]() {
2044 mGotFetchPackThread.clear(std::memory_order_release);
2045 });
2046 }
2047}
2048
2074static void
2076 SHAMap const& want,
2077 SHAMap const* have,
2078 std::uint32_t cnt,
2079 protocol::TMGetObjectByHash* into,
2080 std::uint32_t seq,
2081 bool withLeaves = true)
2082{
2083 XRPL_ASSERT(cnt, "ripple::populateFetchPack : nonzero count input");
2084
2085 Serializer s(1024);
2086
2087 want.visitDifferences(
2088 have,
2089 [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
2090 if (!withLeaves && n.isLeaf())
2091 return true;
2092
2093 s.erase();
2095
2096 auto const& hash = n.getHash().as_uint256();
2097
2098 protocol::TMIndexedObject* obj = into->add_objects();
2099 obj->set_ledgerseq(seq);
2100 obj->set_hash(hash.data(), hash.size());
2101 obj->set_data(s.getDataPtr(), s.getLength());
2102
2103 return --cnt != 0;
2104 });
2105}
2106
2107void
2109 std::weak_ptr<Peer> const& wPeer,
2111 uint256 haveLedgerHash,
2113{
2114 using namespace std::chrono_literals;
2115 if (UptimeClock::now() > uptime + 1s)
2116 {
2117 JLOG(m_journal.info()) << "Fetch pack request got stale";
2118 return;
2119 }
2120
2122 {
2123 JLOG(m_journal.info()) << "Too busy to make fetch pack";
2124 return;
2125 }
2126
2127 auto peer = wPeer.lock();
2128
2129 if (!peer)
2130 return;
2131
2132 auto have = getLedgerByHash(haveLedgerHash);
2133
2134 if (!have)
2135 {
2136 JLOG(m_journal.info())
2137 << "Peer requests fetch pack for ledger we don't have: " << have;
2138 peer->charge(Resource::feeRequestNoReply, "get_object ledger");
2139 return;
2140 }
2141
2142 if (have->open())
2143 {
2144 JLOG(m_journal.warn())
2145 << "Peer requests fetch pack from open ledger: " << have;
2146 peer->charge(Resource::feeMalformedRequest, "get_object ledger open");
2147 return;
2148 }
2149
2150 if (have->info().seq < getEarliestFetch())
2151 {
2152 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
2153 peer->charge(Resource::feeMalformedRequest, "get_object ledger early");
2154 return;
2155 }
2156
2157 auto want = getLedgerByHash(have->info().parentHash);
2158
2159 if (!want)
2160 {
2161 JLOG(m_journal.info())
2162 << "Peer requests fetch pack for ledger whose predecessor we "
2163 << "don't have: " << have;
2164 peer->charge(
2165 Resource::feeRequestNoReply, "get_object ledger no parent");
2166 return;
2167 }
2168
2169 try
2170 {
2171 Serializer hdr(128);
2172
2173 protocol::TMGetObjectByHash reply;
2174 reply.set_query(false);
2175
2176 if (request->has_seq())
2177 reply.set_seq(request->seq());
2178
2179 reply.set_ledgerhash(request->ledgerhash());
2180 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2181
2182 // Building a fetch pack:
2183 // 1. Add the header for the requested ledger.
2184 // 2. Add the nodes for the AccountStateMap of that ledger.
2185 // 3. If there are transactions, add the nodes for the
2186 // transactions of the ledger.
2187 // 4. If the FetchPack now contains at least 512 entries then stop.
2188 // 5. If not very much time has elapsed, then loop back and repeat
2189 // the same process adding the previous ledger to the FetchPack.
2190 do
2191 {
2192 std::uint32_t lSeq = want->info().seq;
2193
2194 {
2195 // Serialize the ledger header:
2196 hdr.erase();
2197
2199 addRaw(want->info(), hdr);
2200
2201 // Add the data
2202 protocol::TMIndexedObject* obj = reply.add_objects();
2203 obj->set_hash(
2204 want->info().hash.data(), want->info().hash.size());
2205 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2206 obj->set_ledgerseq(lSeq);
2207 }
2208
2210 want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2211
2212 // We use nullptr here because transaction maps are per ledger
2213 // and so the requestor is unlikely to already have it.
2214 if (want->info().txHash.isNonZero())
2215 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2216
2217 if (reply.objects().size() >= 512)
2218 break;
2219
2220 have = std::move(want);
2221 want = getLedgerByHash(have->info().parentHash);
2222 } while (want && UptimeClock::now() <= uptime + 1s);
2223
2224 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2225
2226 JLOG(m_journal.info())
2227 << "Built fetch pack with " << reply.objects().size() << " nodes ("
2228 << msg->getBufferSize() << " bytes)";
2229
2230 peer->send(msg);
2231 }
2232 catch (std::exception const& ex)
2233 {
2234 JLOG(m_journal.warn())
2235 << "Exception building fetch pach. Exception: " << ex.what();
2236 }
2237}
2238
2241{
2242 return fetch_packs_.getCacheSize();
2243}
2244
2245// Returns the minimum ledger sequence in SQL database, if any.
2248{
2250}
2251
2254{
2255 uint32_t first = 0, last = 0;
2256
2257 if (!getValidatedRange(first, last) || last < ledgerSeq)
2258 return {};
2259
2260 auto const lgr = getLedgerBySeq(ledgerSeq);
2261 if (!lgr || lgr->txs.empty())
2262 return {};
2263
2264 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2265 if (it->first && it->second &&
2266 it->second->isFieldPresent(sfTransactionIndex) &&
2267 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2268 return it->first->getTransactionID();
2269
2270 return {};
2271}
2272
2273} // namespace ripple
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition: json_value.h:147
Provide a light-weight way to check active() before string formatting.
Definition: Journal.h:194
A generic endpoint for log messages.
Definition: Journal.h:59
Stream fatal() const
Definition: Journal.h:341
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
typename Clock::time_point time_point
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual SHAMapStore & getSHAMapStore()=0
virtual bool isStopping() const =0
virtual NodeStore::Database & getNodeStore()=0
virtual RCLValidations & getValidations()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual LedgerReplayer & getLedgerReplayer()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual PathRequests & getPathRequests()=0
virtual TxQ & getTxQ()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
virtual AmendmentTable & getAmendmentTable()=0
virtual PendingSaves & pendingSaves()=0
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
void reset(LedgerHash const &salt)
bool LEDGER_REPLAY
Definition: Config.h:230
std::unordered_set< uint256, beast::uhash<> > features
Definition: Config.h:284
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
virtual bool isFailure(uint256 const &h)=0
bool isStopping() const
Definition: JobQueue.h:230
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:140
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
Definition: LedgerHistory.h:53
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
void sweep()
Remove stale cache entries.
Definition: LedgerHistory.h:76
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
std::shared_ptr< Ledger const > get()
Definition: LedgerHolder.h:56
void set(std::shared_ptr< Ledger const > ledger)
Definition: LedgerHolder.h:44
bool haveLedger(std::uint32_t seq)
std::shared_ptr< Ledger const > getValidatedLedger()
void clearLedgerCachePrior(LedgerIndex seq)
RangeSet< std::uint32_t > mCompleteLedgers
Definition: LedgerMaster.h:349
void setBuildingLedger(LedgerIndex index)
std::unique_ptr< LedgerReplay > releaseReplay()
void failedSave(std::uint32_t seq, uint256 const &hash)
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::uint32_t const ledger_history_
Definition: LedgerMaster.h:377
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
std::unique_ptr< LedgerReplay > replayData
Definition: LedgerMaster.h:346
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
Application & app_
Definition: LedgerMaster.h:318
TimeKeeper::time_point upgradeWarningPrevTime_
Definition: LedgerMaster.h:390
LedgerHistory mLedgerHistory
Definition: LedgerMaster.h:341
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
void fixMismatch(ReadView const &ledger)
std::atomic< LedgerIndex > mPubLedgerSeq
Definition: LedgerMaster.h:365
void clearPriorLedgers(LedgerIndex seq)
std::shared_ptr< Ledger const > mPubLedger
Definition: LedgerMaster.h:330
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
std::atomic< LedgerIndex > mBuildingLedgerSeq
Definition: LedgerMaster.h:368
std::shared_ptr< ReadView const > getCurrentLedger()
void tryFill(std::shared_ptr< Ledger const > ledger)
std::uint32_t const fetch_depth_
Definition: LedgerMaster.h:374
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
bool isValidated(ReadView const &ledger)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
Definition: LedgerMaster.h:321
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
LedgerIndex const max_ledger_difference_
Definition: LedgerMaster.h:387
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
TaggedCache< uint256, Blob > fetch_packs_
Definition: LedgerMaster.h:381
bool const standalone_
Definition: LedgerMaster.h:371
bool isCaughtUp(std::string &reason)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
bool newPFWork(const char *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
beast::Journal m_journal
Definition: LedgerMaster.h:319
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void clearLedger(std::uint32_t seq)
std::pair< uint256, LedgerIndex > mLastValidLedger
Definition: LedgerMaster.h:339
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:80
std::optional< LedgerIndex > minSqlSeq()
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
Definition: LedgerMaster.h:366
CanonicalTXSet mHeldTransactions
Definition: LedgerMaster.h:343
std::uint32_t const ledger_fetch_size_
Definition: LedgerMaster.h:379
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
std::chrono::seconds getPublishedLedgerAge()
std::shared_ptr< Ledger const > mHistLedger
Definition: LedgerMaster.h:336
std::recursive_mutex mCompleteLock
Definition: LedgerMaster.h:348
std::string getCompleteLedgers()
std::atomic< LedgerIndex > mValidLedgerSeq
Definition: LedgerMaster.h:367
std::size_t getFetchPackCacheSize() const
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
void gotFetchPack(bool progress, std::uint32_t seq)
std::recursive_mutex & peekMutex()
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
std::shared_ptr< Ledger const > mPathLedger
Definition: LedgerMaster.h:333
void setValidLedger(std::shared_ptr< Ledger const > const &l)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
std::atomic< std::uint32_t > mPubLedgerClose
Definition: LedgerMaster.h:364
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
LedgerHolder mValidLedger
Definition: LedgerMaster.h:327
std::shared_ptr< ReadView const > getPublishedLedger()
std::atomic_flag mGotFetchPackThread
Definition: LedgerMaster.h:361
void doAdvance(std::unique_lock< std::recursive_mutex > &)
LedgerHolder mClosedLedger
Definition: LedgerMaster.h:324
bool storeLedger(std::shared_ptr< Ledger const > ledger)
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
LedgerIndex getCurrentLedgerIndex()
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t fetch_seq_
Definition: LedgerMaster.h:383
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
Definition: LoadFeeTrack.h:61
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:127
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:90
virtual bool isBlocked()=0
virtual void setAmendmentWarned()=0
virtual void setAmendmentBlocked()=0
virtual void clearNeedNetworkLedger()=0
virtual bool isAmendmentWarned()=0
virtual bool isNeedNetworkLedger()=0
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
Persistency layer for NodeObject.
Definition: Database.h:50
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition: Database.cpp:239
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::uint32_t earliestLedgerSeq() const noexcept
Definition: Database.h:220
bool modify(modify_type const &f)
Modify the open ledger.
Definition: OpenLedger.cpp:57
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:50
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
void setup(std::shared_ptr< ReadView const > const &ledger)
Definition: OrderBookDB.cpp:38
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
bool requestsPending() const
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
Definition: PendingSaves.h:137
A view into a ledger.
Definition: ReadView.h:55
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual LedgerInfo const & info() const =0
Returns information about the ledger.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
Rules controlling protocol behavior.
Definition: Rules.h:35
uint256 const & as_uint256() const
Definition: SHAMapHash.h:44
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:96
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
Definition: SHAMapSync.cpp:100
void skip(int num)
Definition: Serializer.cpp:330
std::uint32_t get32()
Definition: Serializer.cpp:364
int getLength() const
Definition: Serializer.h:233
const void * getDataPtr() const
Definition: Serializer.h:223
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
static time_point now()
Definition: UptimeClock.cpp:63
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
Definition: Validations.h:1058
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
Definition: Validations.h:999
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
Definition: Validations.h:1081
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
std::size_t quorum() const
Get quorum value for current trusted key set.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
bool isNonZero() const
Definition: base_uint.h:544
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
Definition: BuildInfo.cpp:169
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Definition: BuildInfo.cpp:162
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(PreclaimContext const &ctx, AccountID const &src)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
SizedItem
Definition: Config.h:51
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition: View.h:331
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition: RangeSet.h:183
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:148
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition: View.cpp:836
static constexpr int MAX_LEDGER_GAP
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
Definition: MathUtilities.h:44
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:243
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:120
ApplyResult apply(Application &app, OpenView &view, STTx const &tx, ApplyFlags flags, beast::Journal journal)
Apply a transaction to an OpenView.
Definition: apply.cpp:109
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition: RangeSet.h:54
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
ApplyFlags
Definition: ApplyView.h:30
@ tapNONE
Definition: ApplyView.h:31
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
@ ledgerMaster
ledger master data for signing
static constexpr int MAX_WRITE_LOAD_ACQUIRE
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
@ jtLEDGER_DATA
Definition: Job.h:66
@ jtUPDATE_PF
Definition: Job.h:56
@ jtPUBOLDLEDGER
Definition: Job.h:44
@ jtADVANCE
Definition: Job.h:67
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:223
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, const char *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition: View.cpp:674
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
Definition: contract.cpp:48
bool pendSaveValidated(Application &app, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger Returns false on error.
Definition: Ledger.cpp:1002
STL namespace.
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)