rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/Ledger.h>
22#include <xrpld/app/ledger/LedgerMaster.h>
23#include <xrpld/app/ledger/LedgerReplayer.h>
24#include <xrpld/app/ledger/OpenLedger.h>
25#include <xrpld/app/ledger/OrderBookDB.h>
26#include <xrpld/app/ledger/PendingSaves.h>
27#include <xrpld/app/main/Application.h>
28#include <xrpld/app/misc/AmendmentTable.h>
29#include <xrpld/app/misc/HashRouter.h>
30#include <xrpld/app/misc/LoadFeeTrack.h>
31#include <xrpld/app/misc/NetworkOPs.h>
32#include <xrpld/app/misc/SHAMapStore.h>
33#include <xrpld/app/misc/Transaction.h>
34#include <xrpld/app/misc/TxQ.h>
35#include <xrpld/app/misc/ValidatorList.h>
36#include <xrpld/app/paths/PathRequests.h>
37#include <xrpld/app/rdb/RelationalDatabase.h>
38#include <xrpld/app/tx/apply.h>
39#include <xrpld/core/DatabaseCon.h>
40#include <xrpld/core/TimeKeeper.h>
41#include <xrpld/overlay/Overlay.h>
42#include <xrpld/overlay/Peer.h>
43
44#include <xrpl/basics/Log.h>
45#include <xrpl/basics/MathUtilities.h>
46#include <xrpl/basics/TaggedCache.h>
47#include <xrpl/basics/UptimeClock.h>
48#include <xrpl/basics/contract.h>
49#include <xrpl/basics/safe_cast.h>
50#include <xrpl/basics/scope.h>
51#include <xrpl/beast/utility/instrumentation.h>
52#include <xrpl/protocol/BuildInfo.h>
53#include <xrpl/protocol/HashPrefix.h>
54#include <xrpl/protocol/digest.h>
55#include <xrpl/resource/Fees.h>
56
57#include <algorithm>
58#include <chrono>
59#include <cstdlib>
60#include <limits>
61#include <memory>
62#include <vector>
63
64namespace ripple {
65
66// Don't catch up more than 100 ledgers (cannot exceed 256)
67static constexpr int MAX_LEDGER_GAP{100};
68
69// Don't acquire history if ledger is too old
71
72// Don't acquire history if write load is too high
73static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
74
75// Helper function for LedgerMaster::doAdvance()
76// Return true if candidateLedger should be fetched from the network.
77static bool
79 std::uint32_t const currentLedger,
80 std::uint32_t const ledgerHistory,
81 std::optional<LedgerIndex> const minimumOnline,
82 std::uint32_t const candidateLedger,
84{
85 bool const ret = [&]() {
86 // Fetch ledger if it may be the current ledger
87 if (candidateLedger >= currentLedger)
88 return true;
89
90 // Or if it is within our configured history range:
91 if (currentLedger - candidateLedger <= ledgerHistory)
92 return true;
93
94 // Or if greater than or equal to a specific minimum ledger.
95 // Do nothing if the minimum ledger to keep online is unknown.
96 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
97 }();
98
99 JLOG(j.trace()) << "Missing ledger " << candidateLedger
100 << (ret ? " should" : " should NOT") << " be acquired";
101 return ret;
102}
103
105 Application& app,
107 beast::insight::Collector::ptr const& collector,
108 beast::Journal journal)
109 : app_(app)
110 , m_journal(journal)
111 , mLedgerHistory(collector, app)
112 , standalone_(app_.config().standalone())
113 , fetch_depth_(
114 app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
115 , ledger_history_(app_.config().LEDGER_HISTORY)
116 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
117 , fetch_packs_(
118 "FetchPack",
119 65536,
120 std::chrono::seconds{45},
121 stopwatch,
122 app_.journal("TaggedCache"))
123 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
124{
125}
126
129{
130 return app_.openLedger().current()->info().seq;
131}
132
135{
136 return mValidLedgerSeq;
137}
138
139bool
141 ReadView const& view,
143 char const* reason)
144{
145 auto validLedger = getValidatedLedger();
146
147 if (validLedger && !areCompatible(*validLedger, view, s, reason))
148 {
149 return false;
150 }
151
152 {
154
155 if ((mLastValidLedger.second != 0) &&
157 mLastValidLedger.first,
158 mLastValidLedger.second,
159 view,
160 s,
161 reason))
162 {
163 return false;
164 }
165 }
166
167 return true;
168}
169
172{
173 using namespace std::chrono_literals;
175 if (pubClose == 0s)
176 {
177 JLOG(m_journal.debug()) << "No published ledger";
178 return weeks{2};
179 }
180
181 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
182 ret -= pubClose;
183 ret = (ret > 0s) ? ret : 0s;
184 static std::chrono::seconds lastRet = -1s;
185
186 if (ret != lastRet)
187 {
188 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
189 lastRet = ret;
190 }
191 return ret;
192}
193
196{
197 using namespace std::chrono_literals;
198
200 if (valClose == 0s)
201 {
202 JLOG(m_journal.debug()) << "No validated ledger";
203 return weeks{2};
204 }
205
206 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
207 ret -= valClose;
208 ret = (ret > 0s) ? ret : 0s;
209 static std::chrono::seconds lastRet = -1s;
210
211 if (ret != lastRet)
212 {
213 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
214 lastRet = ret;
215 }
216 return ret;
217}
218
219bool
221{
222 using namespace std::chrono_literals;
223
224 if (getPublishedLedgerAge() > 3min)
225 {
226 reason = "No recently-published ledger";
227 return false;
228 }
229 std::uint32_t validClose = mValidLedgerSign.load();
231 if (!validClose || !pubClose)
232 {
233 reason = "No published ledger";
234 return false;
235 }
236 if (validClose > (pubClose + 90))
237 {
238 reason = "Published ledger lags validated ledger";
239 return false;
240 }
241 return true;
242}
243
244void
246{
248 std::optional<uint256> consensusHash;
249
250 if (!standalone_)
251 {
252 auto validations = app_.validators().negativeUNLFilter(
254 l->info().hash, l->info().seq));
255 times.reserve(validations.size());
256 for (auto const& val : validations)
257 times.push_back(val->getSignTime());
258
259 if (!validations.empty())
260 consensusHash = validations.front()->getConsensusHash();
261 }
262
263 NetClock::time_point signTime;
264
265 if (!times.empty() && times.size() >= app_.validators().quorum())
266 {
267 // Calculate the sample median
268 std::sort(times.begin(), times.end());
269 auto const t0 = times[(times.size() - 1) / 2];
270 auto const t1 = times[times.size() / 2];
271 signTime = t0 + (t1 - t0) / 2;
272 }
273 else
274 {
275 signTime = l->info().closeTime;
276 }
277
278 mValidLedger.set(l);
279 mValidLedgerSign = signTime.time_since_epoch().count();
280 XRPL_ASSERT(
282 l->info().seq + max_ledger_difference_ >
284 "ripple::LedgerMaster::setValidLedger : valid ledger sequence");
286 mValidLedgerSeq = l->info().seq;
287
290 mLedgerHistory.validatedLedger(l, consensusHash);
292 if (!app_.getOPs().isBlocked())
293 {
295 {
296 JLOG(m_journal.error()) << "One or more unsupported amendments "
297 "activated: server blocked.";
299 }
300 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
301 {
302 // Amendments can lose majority, so re-check periodically (every
303 // flag ledger), and clear the flag if appropriate. If an unknown
304 // amendment gains majority log a warning as soon as it's
305 // discovered, then again every flag ledger until the operator
306 // upgrades, the amendment loses majority, or the amendment goes
307 // live and the node gets blocked. Unlike being amendment blocked,
308 // this message may be logged more than once per session, because
309 // the node will otherwise function normally, and this gives
310 // operators an opportunity to see and resolve the warning.
311 if (auto const first =
313 {
314 JLOG(m_journal.error()) << "One or more unsupported amendments "
315 "reached majority. Upgrade before "
316 << to_string(*first)
317 << " to prevent your server from "
318 "becoming amendment blocked.";
320 }
321 else
323 }
324 }
325}
326
327void
329{
330 mPubLedger = l;
331 mPubLedgerClose = l->info().closeTime.time_since_epoch().count();
332 mPubLedgerSeq = l->info().seq;
333}
334
335void
337 std::shared_ptr<Transaction> const& transaction)
338{
340 mHeldTransactions.insert(transaction->getSTransaction());
341}
342
343// Validate a ledger's close time and sequence number if we're considering
344// jumping to that ledger. This helps defend against some rare hostile or
345// diverged majority scenarios.
346bool
348{
349 XRPL_ASSERT(ledger, "ripple::LedgerMaster::canBeCurrent : non-null input");
350
351 // Never jump to a candidate ledger that precedes our
352 // last validated ledger
353
354 auto validLedger = getValidatedLedger();
355 if (validLedger && (ledger->info().seq < validLedger->info().seq))
356 {
357 JLOG(m_journal.trace())
358 << "Candidate for current ledger has low seq " << ledger->info().seq
359 << " < " << validLedger->info().seq;
360 return false;
361 }
362
363 // Ensure this ledger's parent close time is within five minutes of
364 // our current time. If we already have a known fully-valid ledger
365 // we perform this check. Otherwise, we only do it if we've built a
366 // few ledgers as our clock can be off when we first start up
367
368 auto closeTime = app_.timeKeeper().closeTime();
369 auto ledgerClose = ledger->info().parentCloseTime;
370
371 using namespace std::chrono_literals;
372 if ((validLedger || (ledger->info().seq > 10)) &&
373 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) >
374 5min))
375 {
376 JLOG(m_journal.warn())
377 << "Candidate for current ledger has close time "
378 << to_string(ledgerClose) << " at network time "
379 << to_string(closeTime) << " seq " << ledger->info().seq;
380 return false;
381 }
382
383 if (validLedger)
384 {
385 // Sequence number must not be too high. We allow ten ledgers
386 // for time inaccuracies plus a maximum run rate of one ledger
387 // every two seconds. The goal is to prevent a malicious ledger
388 // from increasing our sequence unreasonably high
389
390 LedgerIndex maxSeq = validLedger->info().seq + 10;
391
392 if (closeTime > validLedger->info().parentCloseTime)
393 maxSeq += std::chrono::duration_cast<std::chrono::seconds>(
394 closeTime - validLedger->info().parentCloseTime)
395 .count() /
396 2;
397
398 if (ledger->info().seq > maxSeq)
399 {
400 JLOG(m_journal.warn())
401 << "Candidate for current ledger has high seq "
402 << ledger->info().seq << " > " << maxSeq;
403 return false;
404 }
405
406 JLOG(m_journal.trace())
407 << "Acceptable seq range: " << validLedger->info().seq
408 << " <= " << ledger->info().seq << " <= " << maxSeq;
409 }
410
411 return true;
412}
413
414void
416{
417 XRPL_ASSERT(lastClosed, "ripple::LedgerMaster::switchLCL : non-null input");
418 if (!lastClosed->isImmutable())
419 LogicError("mutable ledger in switchLCL");
420
421 if (lastClosed->open())
422 LogicError("The new last closed ledger is open!");
423
424 {
426 mClosedLedger.set(lastClosed);
427 }
428
429 if (standalone_)
430 {
431 setFullLedger(lastClosed, true, false);
432 tryAdvance();
433 }
434 else
435 {
436 checkAccept(lastClosed);
437 }
438}
439
440bool
441LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
442{
443 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
444}
445
446bool
448{
449 bool validated = ledger->info().validated;
450 // Returns true if we already had the ledger
451 return mLedgerHistory.insert(std::move(ledger), validated);
452}
453
459void
461{
463
465 bool any = false;
466 for (auto const& it : mHeldTransactions)
467 {
468 ApplyFlags flags = tapNONE;
469 auto const result =
470 app_.getTxQ().apply(app_, view, it.second, flags, j);
471 any |= result.applied;
472 }
473 return any;
474 });
475
476 // VFALCO TODO recreate the CanonicalTxSet object instead of resetting
477 // it.
478 // VFALCO NOTE The hash for an open ledger is undefined so we use
479 // something that is a reasonable substitute.
480 mHeldTransactions.reset(app_.openLedger().current()->info().parentHash);
481}
482
485{
487
489}
490
491void
493{
494 mBuildingLedgerSeq.store(i);
495}
496
497bool
499{
501 return boost::icl::contains(mCompleteLedgers, seq);
502}
503
504void
506{
508 mCompleteLedgers.erase(seq);
509}
510
511bool
513{
514 if (ledger.open())
515 return false;
516
517 if (ledger.info().validated)
518 return true;
519
520 auto const seq = ledger.info().seq;
521 try
522 {
523 // Use the skip list in the last validated ledger to see if ledger
524 // comes before the last validated ledger (and thus has been
525 // validated).
526 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
527
528 if (!hash || ledger.info().hash != *hash)
529 {
530 // This ledger's hash is not the hash of the validated ledger
531 if (hash)
532 {
533 XRPL_ASSERT(
534 hash->isNonZero(),
535 "ripple::LedgerMaster::isValidated : nonzero hash");
536 uint256 valHash =
538 if (valHash == ledger.info().hash)
539 {
540 // SQL database doesn't match ledger chain
541 clearLedger(seq);
542 }
543 }
544 return false;
545 }
546 }
547 catch (SHAMapMissingNode const& mn)
548 {
549 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
550 return false;
551 }
552
553 // Mark ledger as validated to save time if we see it again.
554 ledger.info().validated = true;
555 return true;
556}
557
558// returns Ledgers we have all the nodes for
559bool
561 std::uint32_t& minVal,
562 std::uint32_t& maxVal)
563{
564 // Validated ledger is likely not stored in the DB yet so we use the
565 // published ledger which is.
566 maxVal = mPubLedgerSeq.load();
567
568 if (!maxVal)
569 return false;
570
572 {
574 maybeMin = prevMissing(mCompleteLedgers, maxVal);
575 }
576
577 if (maybeMin == std::nullopt)
578 minVal = maxVal;
579 else
580 minVal = 1 + *maybeMin;
581
582 return true;
583}
584
585// Returns Ledgers we have all the nodes for and are indexed
586bool
588{
589 if (!getFullValidatedRange(minVal, maxVal))
590 return false;
591
592 // Remove from the validated range any ledger sequences that may not be
593 // fully updated in the database yet
594
595 auto const pendingSaves = app_.pendingSaves().getSnapshot();
596
597 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
598 {
599 // Ensure we shrink the tips as much as possible. If we have 7-9 and
600 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
601 // because then we'll have nothing when we could have 7.
602 while (pendingSaves.count(maxVal) > 0)
603 --maxVal;
604 while (pendingSaves.count(minVal) > 0)
605 ++minVal;
606
607 // Best effort for remaining exclusions
608 for (auto v : pendingSaves)
609 {
610 if ((v.first >= minVal) && (v.first <= maxVal))
611 {
612 if (v.first > ((minVal + maxVal) / 2))
613 maxVal = v.first - 1;
614 else
615 minVal = v.first + 1;
616 }
617 }
618
619 if (minVal > maxVal)
620 minVal = maxVal = 0;
621 }
622
623 return true;
624}
625
626// Get the earliest ledger we will let peers fetch
629{
630 // The earliest ledger we will let people fetch is ledger zero,
631 // unless that creates a larger range than allowed
632 std::uint32_t e = getClosedLedger()->info().seq;
633
634 if (e > fetch_depth_)
635 e -= fetch_depth_;
636 else
637 e = 0;
638 return e;
639}
640
641void
643{
644 std::uint32_t seq = ledger->info().seq;
645 uint256 prevHash = ledger->info().parentHash;
646
648
649 std::uint32_t minHas = seq;
650 std::uint32_t maxHas = seq;
651
653 while (!app_.getJobQueue().isStopping() && seq > 0)
654 {
655 {
657 minHas = seq;
658 --seq;
659
660 if (haveLedger(seq))
661 break;
662 }
663
664 auto it(ledgerHashes.find(seq));
665
666 if (it == ledgerHashes.end())
667 {
668 if (app_.isStopping())
669 return;
670
671 {
673 mCompleteLedgers.insert(range(minHas, maxHas));
674 }
675 maxHas = minHas;
677 (seq < 500) ? 0 : (seq - 499), seq);
678 it = ledgerHashes.find(seq);
679
680 if (it == ledgerHashes.end())
681 break;
682
683 if (!nodeStore.fetchNodeObject(
684 ledgerHashes.begin()->second.ledgerHash,
685 ledgerHashes.begin()->first))
686 {
687 // The ledger is not backed by the node store
688 JLOG(m_journal.warn()) << "SQL DB ledger sequence " << seq
689 << " mismatches node store";
690 break;
691 }
692 }
693
694 if (it->second.ledgerHash != prevHash)
695 break;
696
697 prevHash = it->second.parentHash;
698 }
699
700 {
702 mCompleteLedgers.insert(range(minHas, maxHas));
703 }
704 {
706 mFillInProgress = 0;
707 tryAdvance();
708 }
709}
710
713void
715{
716 LedgerIndex const ledgerIndex = missing + 1;
717
718 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
719 if (!haveHash || haveHash->isZero())
720 {
721 JLOG(m_journal.error())
722 << "No hash for fetch pack. Missing Index " << missing;
723 return;
724 }
725
726 // Select target Peer based on highest score. The score is randomized
727 // but biased in favor of Peers with low latency.
729 {
730 int maxScore = 0;
731 auto peerList = app_.overlay().getActivePeers();
732 for (auto const& peer : peerList)
733 {
734 if (peer->hasRange(missing, missing + 1))
735 {
736 int score = peer->getScore(true);
737 if (!target || (score > maxScore))
738 {
739 target = peer;
740 maxScore = score;
741 }
742 }
743 }
744 }
745
746 if (target)
747 {
748 protocol::TMGetObjectByHash tmBH;
749 tmBH.set_query(true);
750 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
751 tmBH.set_ledgerhash(haveHash->begin(), 32);
752 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
753
754 target->send(packet);
755 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
756 }
757 else
758 JLOG(m_journal.debug()) << "No peer for fetch pack";
759}
760
761void
763{
764 int invalidate = 0;
766
767 for (std::uint32_t lSeq = ledger.info().seq - 1; lSeq > 0; --lSeq)
768 {
769 if (haveLedger(lSeq))
770 {
771 try
772 {
773 hash = hashOfSeq(ledger, lSeq, m_journal);
774 }
775 catch (std::exception const& ex)
776 {
777 JLOG(m_journal.warn())
778 << "fixMismatch encounters partial ledger. Exception: "
779 << ex.what();
780 clearLedger(lSeq);
781 return;
782 }
783
784 if (hash)
785 {
786 // try to close the seam
787 auto otherLedger = getLedgerBySeq(lSeq);
788
789 if (otherLedger && (otherLedger->info().hash == *hash))
790 {
791 // we closed the seam
792 if (invalidate != 0)
793 {
794 JLOG(m_journal.warn())
795 << "Match at " << lSeq << ", " << invalidate
796 << " prior ledgers invalidated";
797 }
798
799 return;
800 }
801 }
802
803 clearLedger(lSeq);
804 ++invalidate;
805 }
806 }
807
808 // all prior ledgers invalidated
809 if (invalidate != 0)
810 {
811 JLOG(m_journal.warn())
812 << "All " << invalidate << " prior ledgers invalidated";
813 }
814}
815
816void
818 std::shared_ptr<Ledger const> const& ledger,
819 bool isSynchronous,
820 bool isCurrent)
821{
822 // A new ledger has been accepted as part of the trusted chain
823 JLOG(m_journal.debug()) << "Ledger " << ledger->info().seq
824 << " accepted :" << ledger->info().hash;
825 XRPL_ASSERT(
826 ledger->stateMap().getHash().isNonZero(),
827 "ripple::LedgerMaster::setFullLedger : nonzero ledger state hash");
828
829 ledger->setValidated();
830 ledger->setFull();
831
832 if (isCurrent)
833 mLedgerHistory.insert(ledger, true);
834
835 {
836 // Check the SQL database's entry for the sequence before this
837 // ledger, if it's not this ledger's parent, invalidate it
838 uint256 prevHash =
839 app_.getRelationalDatabase().getHashByIndex(ledger->info().seq - 1);
840 if (prevHash.isNonZero() && prevHash != ledger->info().parentHash)
841 clearLedger(ledger->info().seq - 1);
842 }
843
844 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
845
846 {
848 mCompleteLedgers.insert(ledger->info().seq);
849 }
850
851 {
853
854 if (ledger->info().seq > mValidLedgerSeq)
855 setValidLedger(ledger);
856 if (!mPubLedger)
857 {
858 setPubLedger(ledger);
859 app_.getOrderBookDB().setup(ledger);
860 }
861
862 if (ledger->info().seq != 0 && haveLedger(ledger->info().seq - 1))
863 {
864 // we think we have the previous ledger, double check
865 auto prevLedger = getLedgerBySeq(ledger->info().seq - 1);
866
867 if (!prevLedger ||
868 (prevLedger->info().hash != ledger->info().parentHash))
869 {
870 JLOG(m_journal.warn())
871 << "Acquired ledger invalidates previous ledger: "
872 << (prevLedger ? "hashMismatch" : "missingLedger");
873 fixMismatch(*ledger);
874 }
875 }
876 }
877}
878
879void
881{
882 clearLedger(seq);
884}
885
886// Check if the specified ledger can become the new last fully-validated
887// ledger.
888void
890{
891 std::size_t valCount = 0;
892
893 if (seq != 0)
894 {
895 // Ledger is too old
896 if (seq < mValidLedgerSeq)
897 return;
898
899 auto validations = app_.validators().negativeUNLFilter(
901 valCount = validations.size();
902 if (valCount >= app_.validators().quorum())
903 {
905 if (seq > mLastValidLedger.second)
906 mLastValidLedger = std::make_pair(hash, seq);
907 }
908
909 if (seq == mValidLedgerSeq)
910 return;
911
912 // Ledger could match the ledger we're already building
913 if (seq == mBuildingLedgerSeq)
914 return;
915 }
916
917 auto ledger = mLedgerHistory.getLedgerByHash(hash);
918
919 if (!ledger)
920 {
921 if ((seq != 0) && (getValidLedgerIndex() == 0))
922 {
923 // Set peers converged early if we can
924 if (valCount >= app_.validators().quorum())
926 }
927
928 // FIXME: We may not want to fetch a ledger with just one
929 // trusted validation
930 ledger = app_.getInboundLedgers().acquire(
932 }
933
934 if (ledger)
935 checkAccept(ledger);
936}
937
945{
946 return standalone_ ? 0 : app_.validators().quorum();
947}
948
949void
951{
952 // Can we accept this ledger as our new last fully-validated ledger
953
954 if (!canBeCurrent(ledger))
955 return;
956
957 // Can we advance the last fully-validated ledger? If so, can we
958 // publish?
960
961 if (ledger->info().seq <= mValidLedgerSeq)
962 return;
963
964 auto const minVal = getNeededValidations();
965 auto validations = app_.validators().negativeUNLFilter(
967 ledger->info().hash, ledger->info().seq));
968 auto const tvc = validations.size();
969 if (tvc < minVal) // nothing we can do
970 {
971 JLOG(m_journal.trace())
972 << "Only " << tvc << " validations for " << ledger->info().hash;
973 return;
974 }
975
976 JLOG(m_journal.info()) << "Advancing accepted ledger to "
977 << ledger->info().seq << " with >= " << minVal
978 << " validations";
979
980 ledger->setValidated();
981 ledger->setFull();
982 setValidLedger(ledger);
983 if (!mPubLedger)
984 {
985 pendSaveValidated(app_, ledger, true, true);
986 setPubLedger(ledger);
987 app_.getOrderBookDB().setup(ledger);
988 }
989
990 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
991 auto fees = app_.getValidations().fees(ledger->info().hash, base);
992 {
993 auto fees2 =
994 app_.getValidations().fees(ledger->info().parentHash, base);
995 fees.reserve(fees.size() + fees2.size());
996 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
997 }
998 std::uint32_t fee;
999 if (!fees.empty())
1000 {
1001 std::sort(fees.begin(), fees.end());
1002 if (auto stream = m_journal.debug())
1003 {
1005 s << "Received fees from validations: (" << fees.size() << ") ";
1006 for (auto const fee1 : fees)
1007 {
1008 s << " " << fee1;
1009 }
1010 stream << s.str();
1011 }
1012 fee = fees[fees.size() / 2]; // median
1013 }
1014 else
1015 {
1016 fee = base;
1017 }
1018
1020
1021 tryAdvance();
1022
1023 if (ledger->seq() % 256 == 0)
1024 {
1025 // Check if the majority of validators run a higher version rippled
1026 // software. If so print a warning.
1027 //
1028 // Once the HardenedValidations amendment is enabled, validators include
1029 // their rippled software version in the validation messages of every
1030 // (flag - 1) ledger. We wait for one ledger time before checking the
1031 // version information to accumulate more validation messages.
1032
1033 auto currentTime = app_.timeKeeper().now();
1034 bool needPrint = false;
1035
1036 // The variable upgradeWarningPrevTime_ will be set when and only when
1037 // the warning is printed.
1039 {
1040 // Have not printed the warning before, check if need to print.
1041 auto const vals = app_.getValidations().getTrustedForLedger(
1042 ledger->info().parentHash, ledger->info().seq - 1);
1043 std::size_t higherVersionCount = 0;
1044 std::size_t rippledCount = 0;
1045 for (auto const& v : vals)
1046 {
1047 if (v->isFieldPresent(sfServerVersion))
1048 {
1049 auto version = v->getFieldU64(sfServerVersion);
1050 higherVersionCount +=
1051 BuildInfo::isNewerVersion(version) ? 1 : 0;
1052 rippledCount +=
1053 BuildInfo::isRippledVersion(version) ? 1 : 0;
1054 }
1055 }
1056 // We report only if (1) we have accumulated validation messages
1057 // from 90% validators from the UNL, (2) 60% of validators
1058 // running the rippled implementation have higher version numbers,
1059 // and (3) the calculation won't cause divide-by-zero.
1060 if (higherVersionCount > 0 && rippledCount > 0)
1061 {
1062 constexpr std::size_t reportingPercent = 90;
1063 constexpr std::size_t cutoffPercent = 60;
1064 auto const unlSize{
1065 app_.validators().getQuorumKeys().second.size()};
1066 needPrint = unlSize > 0 &&
1067 calculatePercent(vals.size(), unlSize) >=
1068 reportingPercent &&
1069 calculatePercent(higherVersionCount, rippledCount) >=
1070 cutoffPercent;
1071 }
1072 }
1073 // To throttle the warning messages, instead of printing a warning
1074 // every flag ledger, we print every week.
1075 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
1076 {
1077 // Printed the warning before, and assuming most validators
1078 // do not downgrade, we keep printing the warning
1079 // until the local server is restarted.
1080 needPrint = true;
1081 }
1082
1083 if (needPrint)
1084 {
1085 upgradeWarningPrevTime_ = currentTime;
1086 auto const upgradeMsg =
1087 "Check for upgrade: "
1088 "A majority of trusted validators are "
1089 "running a newer version.";
1090 std::cerr << upgradeMsg << std::endl;
1091 JLOG(m_journal.error()) << upgradeMsg;
1092 }
1093 }
1094}
1095
1097void
1099 std::shared_ptr<Ledger const> const& ledger,
1100 uint256 const& consensusHash,
1101 Json::Value consensus)
1102{
1103 // Because we just built a ledger, we are no longer building one
1105
1106 // No need to process validations in standalone mode
1107 if (standalone_)
1108 return;
1109
1110 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1111
1112 if (ledger->info().seq <= mValidLedgerSeq)
1113 {
1114 auto stream = app_.journal("LedgerConsensus").info();
1115 JLOG(stream) << "Consensus built old ledger: " << ledger->info().seq
1116 << " <= " << mValidLedgerSeq;
1117 return;
1118 }
1119
1120 // See if this ledger can be the new fully-validated ledger
1121 checkAccept(ledger);
1122
1123 if (ledger->info().seq <= mValidLedgerSeq)
1124 {
1125 auto stream = app_.journal("LedgerConsensus").debug();
1126 JLOG(stream) << "Consensus ledger fully validated";
1127 return;
1128 }
1129
1130 // This ledger cannot be the new fully-validated ledger, but
1131 // maybe we saved up validations for some other ledger that can be
1132
1133 auto validations = app_.validators().negativeUNLFilter(
1135
1136 // Track validation counts with sequence numbers
1137 class valSeq
1138 {
1139 public:
1140 valSeq() : valCount_(0), ledgerSeq_(0)
1141 {
1142 ;
1143 }
1144
1145 void
1146 mergeValidation(LedgerIndex seq)
1147 {
1148 valCount_++;
1149
1150 // If we didn't already know the sequence, now we do
1151 if (ledgerSeq_ == 0)
1152 ledgerSeq_ = seq;
1153 }
1154
1155 std::size_t valCount_;
1156 LedgerIndex ledgerSeq_;
1157 };
1158
1159 // Count the number of current, trusted validations
1161 for (auto const& v : validations)
1162 {
1163 valSeq& vs = count[v->getLedgerHash()];
1164 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1165 }
1166
1167 auto const neededValidations = getNeededValidations();
1168 auto maxSeq = mValidLedgerSeq.load();
1169 auto maxLedger = ledger->info().hash;
1170
1171 // Of the ledgers with sufficient validations,
1172 // find the one with the highest sequence
1173 for (auto& v : count)
1174 if (v.second.valCount_ > neededValidations)
1175 {
1176 // If we still don't know the sequence, get it
1177 if (v.second.ledgerSeq_ == 0)
1178 {
1179 if (auto l = getLedgerByHash(v.first))
1180 v.second.ledgerSeq_ = l->info().seq;
1181 }
1182
1183 if (v.second.ledgerSeq_ > maxSeq)
1184 {
1185 maxSeq = v.second.ledgerSeq_;
1186 maxLedger = v.first;
1187 }
1188 }
1189
1190 if (maxSeq > mValidLedgerSeq)
1191 {
1192 auto stream = app_.journal("LedgerConsensus").debug();
1193 JLOG(stream) << "Consensus triggered check of ledger";
1194 checkAccept(maxLedger, maxSeq);
1195 }
1196}
1197
1200 LedgerIndex index,
1201 InboundLedger::Reason reason)
1202{
1203 // Try to get the hash of a ledger we need to fetch for history
1205 auto const& l{mHistLedger};
1206
1207 if (l && l->info().seq >= index)
1208 {
1209 ret = hashOfSeq(*l, index, m_journal);
1210 if (!ret)
1211 ret = walkHashBySeq(index, l, reason);
1212 }
1213
1214 if (!ret)
1215 ret = walkHashBySeq(index, reason);
1216
1217 return ret;
1218}
1219
1223{
1225
1226 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1227
1228 // No valid ledger, nothing to do
1229 if (mValidLedger.empty())
1230 {
1231 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1232 return {};
1233 }
1234
1235 if (!mPubLedger)
1236 {
1237 JLOG(m_journal.info())
1238 << "First published ledger will be " << mValidLedgerSeq;
1239 return {mValidLedger.get()};
1240 }
1241
1243 {
1244 JLOG(m_journal.warn()) << "Gap in validated ledger stream "
1245 << mPubLedgerSeq << " - " << mValidLedgerSeq - 1;
1246
1247 auto valLedger = mValidLedger.get();
1248 ret.push_back(valLedger);
1249 setPubLedger(valLedger);
1250 app_.getOrderBookDB().setup(valLedger);
1251
1252 return {valLedger};
1253 }
1254
1256 {
1257 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1258 return {};
1259 }
1260
1261 int acqCount = 0;
1262
1263 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1264 auto valLedger = mValidLedger.get();
1265 std::uint32_t valSeq = valLedger->info().seq;
1266
1267 scope_unlock sul{sl};
1268 try
1269 {
1270 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1271 {
1272 JLOG(m_journal.trace())
1273 << "Trying to fetch/publish valid ledger " << seq;
1274
1276 // This can throw
1277 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1278 // VFALCO TODO Restructure this code so that zero is not
1279 // used.
1280 if (!hash)
1281 hash = beast::zero; // kludge
1282 if (seq == valSeq)
1283 {
1284 // We need to publish the ledger we just fully validated
1285 ledger = valLedger;
1286 }
1287 else if (hash->isZero())
1288 {
1289 JLOG(m_journal.fatal()) << "Ledger: " << valSeq
1290 << " does not have hash for " << seq;
1291 UNREACHABLE(
1292 "ripple::LedgerMaster::findNewLedgersToPublish : ledger "
1293 "not found");
1294 }
1295 else
1296 {
1297 ledger = mLedgerHistory.getLedgerByHash(*hash);
1298 }
1299
1300 if (!app_.config().LEDGER_REPLAY)
1301 {
1302 // Can we try to acquire the ledger we need?
1303 if (!ledger && (++acqCount < ledger_fetch_size_))
1304 ledger = app_.getInboundLedgers().acquire(
1305 *hash, seq, InboundLedger::Reason::GENERIC);
1306 }
1307
1308 // Did we acquire the next ledger we need to publish?
1309 if (ledger && (ledger->info().seq == pubSeq))
1310 {
1311 ledger->setValidated();
1312 ret.push_back(ledger);
1313 ++pubSeq;
1314 }
1315 }
1316
1317 JLOG(m_journal.trace())
1318 << "ready to publish " << ret.size() << " ledgers.";
1319 }
1320 catch (std::exception const& ex)
1321 {
1322 JLOG(m_journal.error())
1323 << "Exception while trying to find ledgers to publish: "
1324 << ex.what();
1325 }
1326
1328 {
1329 /* Narrow down the gap of ledgers, and try to replay them.
1330 * When replaying a ledger gap, if the local node has
1331 * the start ledger, it saves an expensive InboundLedger
1332 * acquire. If the local node has the finish ledger, it
1333 * saves a skip list acquire.
1334 */
1335 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1336 auto finishLedger = valLedger;
1337 while (startLedger->seq() + 1 < finishLedger->seq())
1338 {
1339 if (auto const parent = mLedgerHistory.getLedgerByHash(
1340 finishLedger->info().parentHash);
1341 parent)
1342 {
1343 finishLedger = parent;
1344 }
1345 else
1346 {
1347 auto numberLedgers =
1348 finishLedger->seq() - startLedger->seq() + 1;
1349 JLOG(m_journal.debug())
1350 << "Publish LedgerReplays " << numberLedgers
1351 << " ledgers, from seq=" << startLedger->info().seq << ", "
1352 << startLedger->info().hash
1353 << " to seq=" << finishLedger->info().seq << ", "
1354 << finishLedger->info().hash;
1357 finishLedger->info().hash,
1358 numberLedgers);
1359 break;
1360 }
1361 }
1362 }
1363
1364 return ret;
1365}
1366
1367void
1369{
1371
1372 // Can't advance without at least one fully-valid ledger
1373 mAdvanceWork = true;
1375 {
1376 mAdvanceThread = true;
1377 app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
1379
1380 XRPL_ASSERT(
1382 "ripple::LedgerMaster::tryAdvance : has valid ledger");
1383
1384 JLOG(m_journal.trace()) << "advanceThread<";
1385
1386 try
1387 {
1388 doAdvance(sl);
1389 }
1390 catch (std::exception const& ex)
1391 {
1392 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1393 }
1394
1395 mAdvanceThread = false;
1396 JLOG(m_journal.trace()) << "advanceThread>";
1397 });
1398 }
1399}
1400
1401void
1403{
1404 {
1407 {
1409 mPathLedger.reset();
1410 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1411 return;
1412 }
1413 }
1414
1415 while (!app_.getJobQueue().isStopping())
1416 {
1417 JLOG(m_journal.debug()) << "updatePaths running";
1419 {
1421
1422 if (!mValidLedger.empty() &&
1423 (!mPathLedger || (mPathLedger->info().seq != mValidLedgerSeq)))
1424 { // We have a new valid ledger since the last full pathfinding
1426 lastLedger = mPathLedger;
1427 }
1428 else if (mPathFindNewRequest)
1429 { // We have a new request but no new ledger
1430 lastLedger = app_.openLedger().current();
1431 }
1432 else
1433 { // Nothing to do
1435 mPathLedger.reset();
1436 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1437 return;
1438 }
1439 }
1440
1441 if (!standalone_)
1442 { // don't pathfind with a ledger that's more than 60 seconds old
1443 using namespace std::chrono;
1444 auto age = time_point_cast<seconds>(app_.timeKeeper().closeTime()) -
1445 lastLedger->info().closeTime;
1446 if (age > 1min)
1447 {
1448 JLOG(m_journal.debug())
1449 << "Published ledger too old for updating paths";
1452 mPathLedger.reset();
1453 return;
1454 }
1455 }
1456
1457 try
1458 {
1459 auto& pathRequests = app_.getPathRequests();
1460 {
1462 if (!pathRequests.requestsPending())
1463 {
1465 mPathLedger.reset();
1466 JLOG(m_journal.debug())
1467 << "No path requests found. Nothing to do for updating "
1468 "paths. "
1469 << mPathFindThread << " jobs remaining";
1470 return;
1471 }
1472 }
1473 JLOG(m_journal.debug()) << "Updating paths";
1474 pathRequests.updateAll(lastLedger);
1475
1477 if (!pathRequests.requestsPending())
1478 {
1479 JLOG(m_journal.debug())
1480 << "No path requests left. No need for further updating "
1481 "paths";
1483 mPathLedger.reset();
1484 return;
1485 }
1486 }
1487 catch (SHAMapMissingNode const& mn)
1488 {
1489 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1490 if (lastLedger->open())
1491 {
1492 // our parent is the problem
1494 lastLedger->info().parentHash,
1495 lastLedger->info().seq - 1,
1497 }
1498 else
1499 {
1500 // this ledger is the problem
1502 lastLedger->info().hash,
1503 lastLedger->info().seq,
1505 }
1506 }
1507 }
1508}
1509
1510bool
1512{
1514 mPathFindNewRequest = newPFWork("pf:newRequest", ml);
1515 return mPathFindNewRequest;
1516}
1517
1518bool
1520{
1522 bool const ret = mPathFindNewRequest;
1523 mPathFindNewRequest = false;
1524 return ret;
1525}
1526
1527// If the order book is radically updated, we need to reprocess all
1528// pathfinding requests.
1529bool
1531{
1533 mPathLedger.reset();
1534
1535 return newPFWork("pf:newOBDB", ml);
1536}
1537
1540bool
1542 const char* name,
1544{
1545 if (!app_.isStopping() && mPathFindThread < 2 &&
1547 {
1548 JLOG(m_journal.debug())
1549 << "newPFWork: Creating job. path find threads: "
1550 << mPathFindThread;
1551 if (app_.getJobQueue().addJob(
1552 jtUPDATE_PF, name, [this]() { updatePaths(); }))
1553 {
1555 }
1556 }
1557 // If we're stopping don't give callers the expectation that their
1558 // request will be fulfilled, even if it may be serviced.
1559 return mPathFindThread > 0 && !app_.isStopping();
1560}
1561
1564{
1565 return m_mutex;
1566}
1567
1568// The current ledger is the ledger we believe new transactions should go in
1571{
1572 return app_.openLedger().current();
1573}
1574
1577{
1578 return mValidLedger.get();
1579}
1580
1581Rules
1583{
1584 // Once we have a guarantee that there's always a last validated
1585 // ledger then we can dispense with the if.
1586
1587 // Return the Rules from the last validated ledger.
1588 if (auto const ledger = getValidatedLedger())
1589 return ledger->rules();
1590
1591 return Rules(app_.config().features);
1592}
1593
1594// This is the last ledger we published to clients and can lag the validated
1595// ledger.
1598{
1600 return mPubLedger;
1601}
1602
1605{
1608}
1609
1612{
1613 uint256 hash = getHashBySeq(ledgerIndex);
1614 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex)
1615 : std::nullopt;
1616}
1617
1620 LedgerHash const& ledgerHash,
1621 std::uint32_t index)
1622{
1623 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1624 if (nodeObject && (nodeObject->getData().size() >= 120))
1625 {
1626 SerialIter it(
1627 nodeObject->getData().data(), nodeObject->getData().size());
1628 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1629 {
1630 it.skip(
1631 4 + 8 + 32 + // seq drops parentHash
1632 32 + 32 + 4); // txHash acctHash parentClose
1634 }
1635 }
1636
1637 return std::nullopt;
1638}
1639
1640uint256
1642{
1644
1645 if (hash.isNonZero())
1646 return hash;
1647
1649}
1650
1653{
1654 std::optional<LedgerHash> ledgerHash;
1655
1656 if (auto referenceLedger = mValidLedger.get())
1657 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1658
1659 return ledgerHash;
1660}
1661
1664 std::uint32_t index,
1665 std::shared_ptr<ReadView const> const& referenceLedger,
1666 InboundLedger::Reason reason)
1667{
1668 if (!referenceLedger || (referenceLedger->info().seq < index))
1669 {
1670 // Nothing we can do. No validated ledger.
1671 return std::nullopt;
1672 }
1673
1674 // See if the hash for the ledger we need is in the reference ledger
1675 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1676 if (ledgerHash)
1677 return ledgerHash;
1678
1679 // The hash is not in the reference ledger. Get another ledger which can
1680 // be located easily and should contain the hash.
1681 LedgerIndex refIndex = getCandidateLedger(index);
1682 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1683 XRPL_ASSERT(refHash, "ripple::LedgerMaster::walkHashBySeq : found ledger");
1684 if (refHash)
1685 {
1686 // Try the hash and sequence of a better reference ledger just found
1687 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1688
1689 if (ledger)
1690 {
1691 try
1692 {
1693 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1694 }
1695 catch (SHAMapMissingNode const&)
1696 {
1697 ledger.reset();
1698 }
1699 }
1700
1701 // Try to acquire the complete ledger
1702 if (!ledger)
1703 {
1704 if (auto const l = app_.getInboundLedgers().acquire(
1705 *refHash, refIndex, reason))
1706 {
1707 ledgerHash = hashOfSeq(*l, index, m_journal);
1708 XRPL_ASSERT(
1709 ledgerHash,
1710 "ripple::LedgerMaster::walkHashBySeq : has complete "
1711 "ledger");
1712 }
1713 }
1714 }
1715 return ledgerHash;
1716}
1717
1720{
1721 if (index <= mValidLedgerSeq)
1722 {
1723 // Always prefer a validated ledger
1724 if (auto valid = mValidLedger.get())
1725 {
1726 if (valid->info().seq == index)
1727 return valid;
1728
1729 try
1730 {
1731 auto const hash = hashOfSeq(*valid, index, m_journal);
1732
1733 if (hash)
1735 }
1736 catch (std::exception const&)
1737 {
1738 // Missing nodes are already handled
1739 }
1740 }
1741 }
1742
1743 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1744 return ret;
1745
1746 auto ret = mClosedLedger.get();
1747 if (ret && (ret->info().seq == index))
1748 return ret;
1749
1750 clearLedger(index);
1751 return {};
1752}
1753
1756{
1757 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1758 return ret;
1759
1760 auto ret = mClosedLedger.get();
1761 if (ret && (ret->info().hash == hash))
1762 return ret;
1763
1764 return {};
1765}
1766
1767void
1769{
1771 mCompleteLedgers.insert(range(minV, maxV));
1772}
1773
1774void
1776{
1778 fetch_packs_.sweep();
1779}
1780
1781float
1783{
1785}
1786
1787void
1789{
1791 if (seq > 0)
1792 mCompleteLedgers.erase(range(0u, seq - 1));
1793}
1794
1795void
1797{
1799}
1800
1801void
1803{
1804 replayData = std::move(replay);
1805}
1806
1809{
1810 return std::move(replayData);
1811}
1812
1813void
1815 std::uint32_t missing,
1816 bool& progress,
1817 InboundLedger::Reason reason,
1819{
1820 scope_unlock sul{sl};
1821 if (auto hash = getLedgerHashForHistory(missing, reason))
1822 {
1823 XRPL_ASSERT(
1824 hash->isNonZero(),
1825 "ripple::LedgerMaster::fetchForHistory : found ledger");
1826 auto ledger = getLedgerByHash(*hash);
1827 if (!ledger)
1828 {
1830 {
1831 ledger =
1832 app_.getInboundLedgers().acquire(*hash, missing, reason);
1833 if (!ledger && missing != fetch_seq_ &&
1834 missing > app_.getNodeStore().earliestLedgerSeq())
1835 {
1836 JLOG(m_journal.trace())
1837 << "fetchForHistory want fetch pack " << missing;
1838 fetch_seq_ = missing;
1839 getFetchPack(missing, reason);
1840 }
1841 else
1842 JLOG(m_journal.trace())
1843 << "fetchForHistory no fetch pack for " << missing;
1844 }
1845 else
1846 JLOG(m_journal.debug())
1847 << "fetchForHistory found failed acquire";
1848 }
1849 if (ledger)
1850 {
1851 auto seq = ledger->info().seq;
1852 XRPL_ASSERT(
1853 seq == missing,
1854 "ripple::LedgerMaster::fetchForHistory : sequence match");
1855 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1856 setFullLedger(ledger, false, false);
1857 int fillInProgress;
1858 {
1860 mHistLedger = ledger;
1861 fillInProgress = mFillInProgress;
1862 }
1863 if (fillInProgress == 0 &&
1865 ledger->info().parentHash)
1866 {
1867 {
1868 // Previous ledger is in DB
1870 mFillInProgress = seq;
1871 }
1873 jtADVANCE, "tryFill", [this, ledger]() {
1874 tryFill(ledger);
1875 });
1876 }
1877 progress = true;
1878 }
1879 else
1880 {
1881 std::uint32_t fetchSz;
1882 // Do not fetch ledger sequences lower
1883 // than the earliest ledger sequence
1884 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1885 fetchSz = missing >= fetchSz
1886 ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1)
1887 : 0;
1888 try
1889 {
1890 for (std::uint32_t i = 0; i < fetchSz; ++i)
1891 {
1892 std::uint32_t seq = missing - i;
1893 if (auto h = getLedgerHashForHistory(seq, reason))
1894 {
1895 XRPL_ASSERT(
1896 h->isNonZero(),
1897 "ripple::LedgerMaster::fetchForHistory : "
1898 "prefetched ledger");
1899 app_.getInboundLedgers().acquire(*h, seq, reason);
1900 }
1901 }
1902 }
1903 catch (std::exception const& ex)
1904 {
1905 JLOG(m_journal.warn())
1906 << "Threw while prefetching: " << ex.what();
1907 }
1908 }
1909 }
1910 else
1911 {
1912 JLOG(m_journal.fatal())
1913 << "Can't find ledger following prevMissing " << missing;
1914 JLOG(m_journal.fatal())
1915 << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1916 JLOG(m_journal.fatal())
1917 << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1918 JLOG(m_journal.fatal())
1919 << "Acquire reason: "
1920 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY"
1921 : "NOT HISTORY");
1922 clearLedger(missing + 1);
1923 progress = true;
1924 }
1925}
1926
1927// Try to publish ledgers, acquire missing ledgers
1928void
1930{
1931 do
1932 {
1933 mAdvanceWork = false; // If there's work to do, we'll make progress
1934 bool progress = false;
1935
1936 auto const pubLedgers = findNewLedgersToPublish(sl);
1937 if (pubLedgers.empty())
1938 {
1944 {
1945 // We are in sync, so can acquire
1948 {
1950 missing = prevMissing(
1952 mPubLedger->info().seq,
1954 }
1955 if (missing)
1956 {
1957 JLOG(m_journal.trace())
1958 << "tryAdvance discovered missing " << *missing;
1959 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1964 *missing,
1965 m_journal))
1966 {
1967 JLOG(m_journal.trace())
1968 << "advanceThread should acquire";
1969 }
1970 else
1971 missing = std::nullopt;
1972 }
1973 if (missing)
1974 {
1975 fetchForHistory(*missing, progress, reason, sl);
1977 {
1978 JLOG(m_journal.debug())
1979 << "tryAdvance found last valid changed";
1980 progress = true;
1981 }
1982 }
1983 }
1984 else
1985 {
1986 mHistLedger.reset();
1987 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1988 }
1989 }
1990 else
1991 {
1992 JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size()
1993 << " ledgers to publish";
1994 for (auto const& ledger : pubLedgers)
1995 {
1996 {
1997 scope_unlock sul{sl};
1998 JLOG(m_journal.debug())
1999 << "tryAdvance publishing seq " << ledger->info().seq;
2000 setFullLedger(ledger, true, true);
2001 }
2002
2003 setPubLedger(ledger);
2004
2005 {
2006 scope_unlock sul{sl};
2007 app_.getOPs().pubLedger(ledger);
2008 }
2009 }
2010
2012 progress = newPFWork("pf:newLedger", sl);
2013 }
2014 if (progress)
2015 mAdvanceWork = true;
2016 } while (mAdvanceWork);
2017}
2018
2019void
2021{
2022 fetch_packs_.canonicalize_replace_client(hash, data);
2023}
2024
2027{
2028 Blob data;
2029 if (fetch_packs_.retrieve(hash, data))
2030 {
2031 fetch_packs_.del(hash, false);
2032 if (hash == sha512Half(makeSlice(data)))
2033 return data;
2034 }
2035 return std::nullopt;
2036}
2037
2038void
2040{
2041 if (!mGotFetchPackThread.test_and_set(std::memory_order_acquire))
2042 {
2043 app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&]() {
2045 mGotFetchPackThread.clear(std::memory_order_release);
2046 });
2047 }
2048}
2049
2075static void
2077 SHAMap const& want,
2078 SHAMap const* have,
2079 std::uint32_t cnt,
2080 protocol::TMGetObjectByHash* into,
2081 std::uint32_t seq,
2082 bool withLeaves = true)
2083{
2084 XRPL_ASSERT(cnt, "ripple::populateFetchPack : nonzero count input");
2085
2086 Serializer s(1024);
2087
2088 want.visitDifferences(
2089 have,
2090 [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
2091 if (!withLeaves && n.isLeaf())
2092 return true;
2093
2094 s.erase();
2096
2097 auto const& hash = n.getHash().as_uint256();
2098
2099 protocol::TMIndexedObject* obj = into->add_objects();
2100 obj->set_ledgerseq(seq);
2101 obj->set_hash(hash.data(), hash.size());
2102 obj->set_data(s.getDataPtr(), s.getLength());
2103
2104 return --cnt != 0;
2105 });
2106}
2107
2108void
2110 std::weak_ptr<Peer> const& wPeer,
2112 uint256 haveLedgerHash,
2114{
2115 using namespace std::chrono_literals;
2116 if (UptimeClock::now() > uptime + 1s)
2117 {
2118 JLOG(m_journal.info()) << "Fetch pack request got stale";
2119 return;
2120 }
2121
2123 {
2124 JLOG(m_journal.info()) << "Too busy to make fetch pack";
2125 return;
2126 }
2127
2128 auto peer = wPeer.lock();
2129
2130 if (!peer)
2131 return;
2132
2133 auto have = getLedgerByHash(haveLedgerHash);
2134
2135 if (!have)
2136 {
2137 JLOG(m_journal.info())
2138 << "Peer requests fetch pack for ledger we don't have: " << have;
2139 peer->charge(Resource::feeRequestNoReply, "get_object ledger");
2140 return;
2141 }
2142
2143 if (have->open())
2144 {
2145 JLOG(m_journal.warn())
2146 << "Peer requests fetch pack from open ledger: " << have;
2147 peer->charge(Resource::feeMalformedRequest, "get_object ledger open");
2148 return;
2149 }
2150
2151 if (have->info().seq < getEarliestFetch())
2152 {
2153 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
2154 peer->charge(Resource::feeMalformedRequest, "get_object ledger early");
2155 return;
2156 }
2157
2158 auto want = getLedgerByHash(have->info().parentHash);
2159
2160 if (!want)
2161 {
2162 JLOG(m_journal.info())
2163 << "Peer requests fetch pack for ledger whose predecessor we "
2164 << "don't have: " << have;
2165 peer->charge(
2166 Resource::feeRequestNoReply, "get_object ledger no parent");
2167 return;
2168 }
2169
2170 try
2171 {
2172 Serializer hdr(128);
2173
2174 protocol::TMGetObjectByHash reply;
2175 reply.set_query(false);
2176
2177 if (request->has_seq())
2178 reply.set_seq(request->seq());
2179
2180 reply.set_ledgerhash(request->ledgerhash());
2181 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2182
2183 // Building a fetch pack:
2184 // 1. Add the header for the requested ledger.
2185 // 2. Add the nodes for the AccountStateMap of that ledger.
2186 // 3. If there are transactions, add the nodes for the
2187 // transactions of the ledger.
2188 // 4. If the FetchPack now contains at least 512 entries then stop.
2189 // 5. If not very much time has elapsed, then loop back and repeat
2190 // the same process adding the previous ledger to the FetchPack.
2191 do
2192 {
2193 std::uint32_t lSeq = want->info().seq;
2194
2195 {
2196 // Serialize the ledger header:
2197 hdr.erase();
2198
2200 addRaw(want->info(), hdr);
2201
2202 // Add the data
2203 protocol::TMIndexedObject* obj = reply.add_objects();
2204 obj->set_hash(
2205 want->info().hash.data(), want->info().hash.size());
2206 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2207 obj->set_ledgerseq(lSeq);
2208 }
2209
2211 want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2212
2213 // We use nullptr here because transaction maps are per ledger
2214 // and so the requestor is unlikely to already have it.
2215 if (want->info().txHash.isNonZero())
2216 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2217
2218 if (reply.objects().size() >= 512)
2219 break;
2220
2221 have = std::move(want);
2222 want = getLedgerByHash(have->info().parentHash);
2223 } while (want && UptimeClock::now() <= uptime + 1s);
2224
2225 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2226
2227 JLOG(m_journal.info())
2228 << "Built fetch pack with " << reply.objects().size() << " nodes ("
2229 << msg->getBufferSize() << " bytes)";
2230
2231 peer->send(msg);
2232 }
2233 catch (std::exception const& ex)
2234 {
2235 JLOG(m_journal.warn())
2236 << "Exception building fetch pach. Exception: " << ex.what();
2237 }
2238}
2239
2242{
2243 return fetch_packs_.getCacheSize();
2244}
2245
2246// Returns the minimum ledger sequence in SQL database, if any.
2249{
2251}
2252
2255{
2256 uint32_t first = 0, last = 0;
2257
2258 if (!getValidatedRange(first, last) || last < ledgerSeq)
2259 return {};
2260
2261 auto const lgr = getLedgerBySeq(ledgerSeq);
2262 if (!lgr || lgr->txs.empty())
2263 return {};
2264
2265 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2266 if (it->first && it->second &&
2267 it->second->isFieldPresent(sfTransactionIndex) &&
2268 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2269 return it->first->getTransactionID();
2270
2271 return {};
2272}
2273
2274} // namespace ripple
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition: json_value.h:148
Provide a light-weight way to check active() before string formatting.
Definition: Journal.h:205
A generic endpoint for log messages.
Definition: Journal.h:60
Stream fatal() const
Definition: Journal.h:352
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
typename Clock::time_point time_point
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual SHAMapStore & getSHAMapStore()=0
virtual bool isStopping() const =0
virtual NodeStore::Database & getNodeStore()=0
virtual RCLValidations & getValidations()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual LedgerReplayer & getLedgerReplayer()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual PathRequests & getPathRequests()=0
virtual TxQ & getTxQ()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
virtual AmendmentTable & getAmendmentTable()=0
virtual PendingSaves & pendingSaves()=0
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
void reset(LedgerHash const &salt)
bool LEDGER_REPLAY
Definition: Config.h:223
std::unordered_set< uint256, beast::uhash<> > features
Definition: Config.h:277
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
virtual bool isFailure(uint256 const &h)=0
bool isStopping() const
Definition: JobQueue.h:230
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:142
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
Definition: LedgerHistory.h:53
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
void sweep()
Remove stale cache entries.
Definition: LedgerHistory.h:76
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
std::shared_ptr< Ledger const > get()
Definition: LedgerHolder.h:57
void set(std::shared_ptr< Ledger const > ledger)
Definition: LedgerHolder.h:45
bool haveLedger(std::uint32_t seq)
std::shared_ptr< Ledger const > getValidatedLedger()
void clearLedgerCachePrior(LedgerIndex seq)
RangeSet< std::uint32_t > mCompleteLedgers
Definition: LedgerMaster.h:348
void setBuildingLedger(LedgerIndex index)
std::unique_ptr< LedgerReplay > releaseReplay()
void failedSave(std::uint32_t seq, uint256 const &hash)
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::uint32_t const ledger_history_
Definition: LedgerMaster.h:376
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
std::unique_ptr< LedgerReplay > replayData
Definition: LedgerMaster.h:345
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
Application & app_
Definition: LedgerMaster.h:317
TimeKeeper::time_point upgradeWarningPrevTime_
Definition: LedgerMaster.h:389
LedgerHistory mLedgerHistory
Definition: LedgerMaster.h:340
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
void fixMismatch(ReadView const &ledger)
std::atomic< LedgerIndex > mPubLedgerSeq
Definition: LedgerMaster.h:364
void clearPriorLedgers(LedgerIndex seq)
std::shared_ptr< Ledger const > mPubLedger
Definition: LedgerMaster.h:329
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
std::atomic< LedgerIndex > mBuildingLedgerSeq
Definition: LedgerMaster.h:367
std::shared_ptr< ReadView const > getCurrentLedger()
void tryFill(std::shared_ptr< Ledger const > ledger)
std::uint32_t const fetch_depth_
Definition: LedgerMaster.h:373
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
bool isValidated(ReadView const &ledger)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
Definition: LedgerMaster.h:320
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
LedgerIndex const max_ledger_difference_
Definition: LedgerMaster.h:386
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
TaggedCache< uint256, Blob > fetch_packs_
Definition: LedgerMaster.h:380
bool const standalone_
Definition: LedgerMaster.h:370
bool isCaughtUp(std::string &reason)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
bool newPFWork(const char *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
beast::Journal m_journal
Definition: LedgerMaster.h:318
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void clearLedger(std::uint32_t seq)
std::pair< uint256, LedgerIndex > mLastValidLedger
Definition: LedgerMaster.h:338
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:79
std::optional< LedgerIndex > minSqlSeq()
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
Definition: LedgerMaster.h:365
CanonicalTXSet mHeldTransactions
Definition: LedgerMaster.h:342
std::uint32_t const ledger_fetch_size_
Definition: LedgerMaster.h:378
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
std::chrono::seconds getPublishedLedgerAge()
std::shared_ptr< Ledger const > mHistLedger
Definition: LedgerMaster.h:335
std::recursive_mutex mCompleteLock
Definition: LedgerMaster.h:347
std::string getCompleteLedgers()
std::atomic< LedgerIndex > mValidLedgerSeq
Definition: LedgerMaster.h:366
std::size_t getFetchPackCacheSize() const
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
void gotFetchPack(bool progress, std::uint32_t seq)
std::recursive_mutex & peekMutex()
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
std::shared_ptr< Ledger const > mPathLedger
Definition: LedgerMaster.h:332
void setValidLedger(std::shared_ptr< Ledger const > const &l)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
std::atomic< std::uint32_t > mPubLedgerClose
Definition: LedgerMaster.h:363
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
LedgerHolder mValidLedger
Definition: LedgerMaster.h:326
std::shared_ptr< ReadView const > getPublishedLedger()
std::atomic_flag mGotFetchPackThread
Definition: LedgerMaster.h:360
void doAdvance(std::unique_lock< std::recursive_mutex > &)
LedgerHolder mClosedLedger
Definition: LedgerMaster.h:323
bool storeLedger(std::shared_ptr< Ledger const > ledger)
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
LedgerIndex getCurrentLedgerIndex()
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t fetch_seq_
Definition: LedgerMaster.h:382
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
Definition: LoadFeeTrack.h:60
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:126
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
virtual bool isBlocked()=0
virtual void setAmendmentWarned()=0
virtual void setAmendmentBlocked()=0
virtual void clearNeedNetworkLedger()=0
virtual bool isAmendmentWarned()=0
virtual bool isNeedNetworkLedger()=0
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
Persistency layer for NodeObject.
Definition: Database.h:51
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition: Database.cpp:241
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::uint32_t earliestLedgerSeq() const noexcept
Definition: Database.h:221
bool modify(modify_type const &f)
Modify the open ledger.
Definition: OpenLedger.cpp:56
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:49
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:57
void setup(std::shared_ptr< ReadView const > const &ledger)
Definition: OrderBookDB.cpp:39
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
bool requestsPending() const
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
Definition: PendingSaves.h:138
A view into a ledger.
Definition: ReadView.h:52
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual LedgerInfo const & info() const =0
Returns information about the ledger.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
Rules controlling protocol behavior.
Definition: Rules.h:35
uint256 const & as_uint256() const
Definition: SHAMapHash.h:44
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:95
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
Definition: SHAMapSync.cpp:101
void skip(int num)
Definition: Serializer.cpp:343
std::uint32_t get32()
Definition: Serializer.cpp:377
int getLength() const
Definition: Serializer.h:234
const void * getDataPtr() const
Definition: Serializer.h:224
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
static time_point now()
Definition: UptimeClock.cpp:67
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
Definition: Validations.h:1059
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
Definition: Validations.h:1000
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
Definition: Validations.h:1082
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
std::size_t quorum() const
Get quorum value for current trusted key set.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
bool isNonZero() const
Definition: base_uint.h:545
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
Definition: BuildInfo.cpp:172
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Definition: BuildInfo.cpp:165
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(PreclaimContext const &ctx, AccountID const &src)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
SizedItem
Definition: Config.h:44
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition: View.h:327
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition: RangeSet.h:183
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:149
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition: View.cpp:836
static constexpr int MAX_LEDGER_GAP
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
Definition: MathUtilities.h:44
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:244
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:119
ApplyResult apply(Application &app, OpenView &view, STTx const &tx, ApplyFlags flags, beast::Journal journal)
Apply a transaction to an OpenView.
Definition: apply.cpp:110
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition: RangeSet.h:54
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
ApplyFlags
Definition: ApplyView.h:31
@ tapNONE
Definition: ApplyView.h:32
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
@ ledgerMaster
ledger master data for signing
static constexpr int MAX_WRITE_LOAD_ACQUIRE
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
@ jtLEDGER_DATA
Definition: Job.h:66
@ jtUPDATE_PF
Definition: Job.h:56
@ jtPUBOLDLEDGER
Definition: Job.h:44
@ jtADVANCE
Definition: Job.h:67
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:225
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, const char *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition: View.cpp:674
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
Definition: contract.cpp:50
bool pendSaveValidated(Application &app, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger Returns false on error.
Definition: Ledger.cpp:993
STL namespace.
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)