rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/Ledger.h>
22#include <xrpld/app/ledger/LedgerMaster.h>
23#include <xrpld/app/ledger/LedgerReplayer.h>
24#include <xrpld/app/ledger/OpenLedger.h>
25#include <xrpld/app/ledger/OrderBookDB.h>
26#include <xrpld/app/ledger/PendingSaves.h>
27#include <xrpld/app/main/Application.h>
28#include <xrpld/app/misc/AmendmentTable.h>
29#include <xrpld/app/misc/HashRouter.h>
30#include <xrpld/app/misc/LoadFeeTrack.h>
31#include <xrpld/app/misc/NetworkOPs.h>
32#include <xrpld/app/misc/SHAMapStore.h>
33#include <xrpld/app/misc/Transaction.h>
34#include <xrpld/app/misc/TxQ.h>
35#include <xrpld/app/misc/ValidatorList.h>
36#include <xrpld/app/paths/PathRequests.h>
37#include <xrpld/app/rdb/RelationalDatabase.h>
38#include <xrpld/app/tx/apply.h>
39#include <xrpld/core/DatabaseCon.h>
40#include <xrpld/core/TimeKeeper.h>
41#include <xrpld/overlay/Overlay.h>
42#include <xrpld/overlay/Peer.h>
43#include <xrpl/basics/Log.h>
44#include <xrpl/basics/MathUtilities.h>
45#include <xrpl/basics/TaggedCache.h>
46#include <xrpl/basics/UptimeClock.h>
47#include <xrpl/basics/contract.h>
48#include <xrpl/basics/safe_cast.h>
49#include <xrpl/basics/scope.h>
50#include <xrpl/beast/utility/instrumentation.h>
51#include <xrpl/protocol/BuildInfo.h>
52#include <xrpl/protocol/HashPrefix.h>
53#include <xrpl/protocol/digest.h>
54#include <xrpl/resource/Fees.h>
55
56#include <algorithm>
57#include <chrono>
58#include <cstdlib>
59#include <limits>
60#include <memory>
61#include <vector>
62
63namespace ripple {
64
65// Don't catch up more than 100 ledgers (cannot exceed 256)
66static constexpr int MAX_LEDGER_GAP{100};
67
68// Don't acquire history if ledger is too old
70
71// Don't acquire history if write load is too high
72static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
73
74// Helper function for LedgerMaster::doAdvance()
75// Return true if candidateLedger should be fetched from the network.
76static bool
78 std::uint32_t const currentLedger,
79 std::uint32_t const ledgerHistory,
80 std::optional<LedgerIndex> const minimumOnline,
81 std::uint32_t const candidateLedger,
83{
84 bool const ret = [&]() {
85 // Fetch ledger if it may be the current ledger
86 if (candidateLedger >= currentLedger)
87 return true;
88
89 // Or if it is within our configured history range:
90 if (currentLedger - candidateLedger <= ledgerHistory)
91 return true;
92
93 // Or if greater than or equal to a specific minimum ledger.
94 // Do nothing if the minimum ledger to keep online is unknown.
95 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
96 }();
97
98 JLOG(j.trace()) << "Missing ledger " << candidateLedger
99 << (ret ? " should" : " should NOT") << " be acquired";
100 return ret;
101}
102
104 Application& app,
106 beast::insight::Collector::ptr const& collector,
107 beast::Journal journal)
108 : app_(app)
109 , m_journal(journal)
110 , mLedgerHistory(collector, app)
111 , standalone_(app_.config().standalone())
112 , fetch_depth_(
113 app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
114 , ledger_history_(app_.config().LEDGER_HISTORY)
115 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
116 , fetch_packs_(
117 "FetchPack",
118 65536,
119 std::chrono::seconds{45},
120 stopwatch,
121 app_.journal("TaggedCache"))
122 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
123{
124}
125
128{
129 return app_.openLedger().current()->info().seq;
130}
131
134{
135 return mValidLedgerSeq;
136}
137
138bool
140 ReadView const& view,
142 char const* reason)
143{
144 auto validLedger = getValidatedLedger();
145
146 if (validLedger && !areCompatible(*validLedger, view, s, reason))
147 {
148 return false;
149 }
150
151 {
153
154 if ((mLastValidLedger.second != 0) &&
156 mLastValidLedger.first,
157 mLastValidLedger.second,
158 view,
159 s,
160 reason))
161 {
162 return false;
163 }
164 }
165
166 return true;
167}
168
171{
172 using namespace std::chrono_literals;
174 if (pubClose == 0s)
175 {
176 JLOG(m_journal.debug()) << "No published ledger";
177 return weeks{2};
178 }
179
180 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
181 ret -= pubClose;
182 ret = (ret > 0s) ? ret : 0s;
183 static std::chrono::seconds lastRet = -1s;
184
185 if (ret != lastRet)
186 {
187 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
188 lastRet = ret;
189 }
190 return ret;
191}
192
195{
196 using namespace std::chrono_literals;
197
199 if (valClose == 0s)
200 {
201 JLOG(m_journal.debug()) << "No validated ledger";
202 return weeks{2};
203 }
204
205 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
206 ret -= valClose;
207 ret = (ret > 0s) ? ret : 0s;
208 static std::chrono::seconds lastRet = -1s;
209
210 if (ret != lastRet)
211 {
212 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
213 lastRet = ret;
214 }
215 return ret;
216}
217
218bool
220{
221 using namespace std::chrono_literals;
222
223 if (getPublishedLedgerAge() > 3min)
224 {
225 reason = "No recently-published ledger";
226 return false;
227 }
228 std::uint32_t validClose = mValidLedgerSign.load();
230 if (!validClose || !pubClose)
231 {
232 reason = "No published ledger";
233 return false;
234 }
235 if (validClose > (pubClose + 90))
236 {
237 reason = "Published ledger lags validated ledger";
238 return false;
239 }
240 return true;
241}
242
243void
245{
247 std::optional<uint256> consensusHash;
248
249 if (!standalone_)
250 {
251 auto validations = app_.validators().negativeUNLFilter(
253 l->info().hash, l->info().seq));
254 times.reserve(validations.size());
255 for (auto const& val : validations)
256 times.push_back(val->getSignTime());
257
258 if (!validations.empty())
259 consensusHash = validations.front()->getConsensusHash();
260 }
261
262 NetClock::time_point signTime;
263
264 if (!times.empty() && times.size() >= app_.validators().quorum())
265 {
266 // Calculate the sample median
267 std::sort(times.begin(), times.end());
268 auto const t0 = times[(times.size() - 1) / 2];
269 auto const t1 = times[times.size() / 2];
270 signTime = t0 + (t1 - t0) / 2;
271 }
272 else
273 {
274 signTime = l->info().closeTime;
275 }
276
277 mValidLedger.set(l);
278 mValidLedgerSign = signTime.time_since_epoch().count();
279 XRPL_ASSERT(
281 l->info().seq + max_ledger_difference_ >
283 "ripple::LedgerMaster::setValidLedger : valid ledger sequence");
285 mValidLedgerSeq = l->info().seq;
286
289 mLedgerHistory.validatedLedger(l, consensusHash);
291 if (!app_.getOPs().isBlocked())
292 {
294 {
295 JLOG(m_journal.error()) << "One or more unsupported amendments "
296 "activated: server blocked.";
298 }
299 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
300 {
301 // Amendments can lose majority, so re-check periodically (every
302 // flag ledger), and clear the flag if appropriate. If an unknown
303 // amendment gains majority log a warning as soon as it's
304 // discovered, then again every flag ledger until the operator
305 // upgrades, the amendment loses majority, or the amendment goes
306 // live and the node gets blocked. Unlike being amendment blocked,
307 // this message may be logged more than once per session, because
308 // the node will otherwise function normally, and this gives
309 // operators an opportunity to see and resolve the warning.
310 if (auto const first =
312 {
313 JLOG(m_journal.error()) << "One or more unsupported amendments "
314 "reached majority. Upgrade before "
315 << to_string(*first)
316 << " to prevent your server from "
317 "becoming amendment blocked.";
319 }
320 else
322 }
323 }
324}
325
326void
328{
329 mPubLedger = l;
330 mPubLedgerClose = l->info().closeTime.time_since_epoch().count();
331 mPubLedgerSeq = l->info().seq;
332}
333
334void
336 std::shared_ptr<Transaction> const& transaction)
337{
339 mHeldTransactions.insert(transaction->getSTransaction());
340}
341
342// Validate a ledger's close time and sequence number if we're considering
343// jumping to that ledger. This helps defend against some rare hostile or
344// diverged majority scenarios.
345bool
347{
348 XRPL_ASSERT(ledger, "ripple::LedgerMaster::canBeCurrent : non-null input");
349
350 // Never jump to a candidate ledger that precedes our
351 // last validated ledger
352
353 auto validLedger = getValidatedLedger();
354 if (validLedger && (ledger->info().seq < validLedger->info().seq))
355 {
356 JLOG(m_journal.trace())
357 << "Candidate for current ledger has low seq " << ledger->info().seq
358 << " < " << validLedger->info().seq;
359 return false;
360 }
361
362 // Ensure this ledger's parent close time is within five minutes of
363 // our current time. If we already have a known fully-valid ledger
364 // we perform this check. Otherwise, we only do it if we've built a
365 // few ledgers as our clock can be off when we first start up
366
367 auto closeTime = app_.timeKeeper().closeTime();
368 auto ledgerClose = ledger->info().parentCloseTime;
369
370 using namespace std::chrono_literals;
371 if ((validLedger || (ledger->info().seq > 10)) &&
372 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) >
373 5min))
374 {
375 JLOG(m_journal.warn())
376 << "Candidate for current ledger has close time "
377 << to_string(ledgerClose) << " at network time "
378 << to_string(closeTime) << " seq " << ledger->info().seq;
379 return false;
380 }
381
382 if (validLedger)
383 {
384 // Sequence number must not be too high. We allow ten ledgers
385 // for time inaccuracies plus a maximum run rate of one ledger
386 // every two seconds. The goal is to prevent a malicious ledger
387 // from increasing our sequence unreasonably high
388
389 LedgerIndex maxSeq = validLedger->info().seq + 10;
390
391 if (closeTime > validLedger->info().parentCloseTime)
392 maxSeq += std::chrono::duration_cast<std::chrono::seconds>(
393 closeTime - validLedger->info().parentCloseTime)
394 .count() /
395 2;
396
397 if (ledger->info().seq > maxSeq)
398 {
399 JLOG(m_journal.warn())
400 << "Candidate for current ledger has high seq "
401 << ledger->info().seq << " > " << maxSeq;
402 return false;
403 }
404
405 JLOG(m_journal.trace())
406 << "Acceptable seq range: " << validLedger->info().seq
407 << " <= " << ledger->info().seq << " <= " << maxSeq;
408 }
409
410 return true;
411}
412
413void
415{
416 XRPL_ASSERT(lastClosed, "ripple::LedgerMaster::switchLCL : non-null input");
417 if (!lastClosed->isImmutable())
418 LogicError("mutable ledger in switchLCL");
419
420 if (lastClosed->open())
421 LogicError("The new last closed ledger is open!");
422
423 {
425 mClosedLedger.set(lastClosed);
426 }
427
428 if (standalone_)
429 {
430 setFullLedger(lastClosed, true, false);
431 tryAdvance();
432 }
433 else
434 {
435 checkAccept(lastClosed);
436 }
437}
438
439bool
440LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
441{
442 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
443}
444
445bool
447{
448 bool validated = ledger->info().validated;
449 // Returns true if we already had the ledger
450 return mLedgerHistory.insert(std::move(ledger), validated);
451}
452
458void
460{
462
464 bool any = false;
465 for (auto const& it : mHeldTransactions)
466 {
467 ApplyFlags flags = tapNONE;
468 auto const result =
469 app_.getTxQ().apply(app_, view, it.second, flags, j);
470 if (result.second)
471 any = true;
472 }
473 return any;
474 });
475
476 // VFALCO TODO recreate the CanonicalTxSet object instead of resetting
477 // it.
478 // VFALCO NOTE The hash for an open ledger is undefined so we use
479 // something that is a reasonable substitute.
480 mHeldTransactions.reset(app_.openLedger().current()->info().parentHash);
481}
482
485{
487
489}
490
491void
493{
494 mBuildingLedgerSeq.store(i);
495}
496
497bool
499{
501 return boost::icl::contains(mCompleteLedgers, seq);
502}
503
504void
506{
508 mCompleteLedgers.erase(seq);
509}
510
511bool
513{
514 if (ledger.open())
515 return false;
516
517 if (ledger.info().validated)
518 return true;
519
520 auto const seq = ledger.info().seq;
521 try
522 {
523 // Use the skip list in the last validated ledger to see if ledger
524 // comes before the last validated ledger (and thus has been
525 // validated).
526 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
527
528 if (!hash || ledger.info().hash != *hash)
529 {
530 // This ledger's hash is not the hash of the validated ledger
531 if (hash)
532 {
533 XRPL_ASSERT(
534 hash->isNonZero(),
535 "ripple::LedgerMaster::isValidated : nonzero hash");
536 uint256 valHash =
538 if (valHash == ledger.info().hash)
539 {
540 // SQL database doesn't match ledger chain
541 clearLedger(seq);
542 }
543 }
544 return false;
545 }
546 }
547 catch (SHAMapMissingNode const& mn)
548 {
549 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
550 return false;
551 }
552
553 // Mark ledger as validated to save time if we see it again.
554 ledger.info().validated = true;
555 return true;
556}
557
558// returns Ledgers we have all the nodes for
559bool
561 std::uint32_t& minVal,
562 std::uint32_t& maxVal)
563{
564 // Validated ledger is likely not stored in the DB yet so we use the
565 // published ledger which is.
566 maxVal = mPubLedgerSeq.load();
567
568 if (!maxVal)
569 return false;
570
572 {
574 maybeMin = prevMissing(mCompleteLedgers, maxVal);
575 }
576
577 if (maybeMin == std::nullopt)
578 minVal = maxVal;
579 else
580 minVal = 1 + *maybeMin;
581
582 return true;
583}
584
585// Returns Ledgers we have all the nodes for and are indexed
586bool
588{
589 if (!getFullValidatedRange(minVal, maxVal))
590 return false;
591
592 // Remove from the validated range any ledger sequences that may not be
593 // fully updated in the database yet
594
595 auto const pendingSaves = app_.pendingSaves().getSnapshot();
596
597 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
598 {
599 // Ensure we shrink the tips as much as possible. If we have 7-9 and
600 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
601 // because then we'll have nothing when we could have 7.
602 while (pendingSaves.count(maxVal) > 0)
603 --maxVal;
604 while (pendingSaves.count(minVal) > 0)
605 ++minVal;
606
607 // Best effort for remaining exclusions
608 for (auto v : pendingSaves)
609 {
610 if ((v.first >= minVal) && (v.first <= maxVal))
611 {
612 if (v.first > ((minVal + maxVal) / 2))
613 maxVal = v.first - 1;
614 else
615 minVal = v.first + 1;
616 }
617 }
618
619 if (minVal > maxVal)
620 minVal = maxVal = 0;
621 }
622
623 return true;
624}
625
626// Get the earliest ledger we will let peers fetch
629{
630 // The earliest ledger we will let people fetch is ledger zero,
631 // unless that creates a larger range than allowed
632 std::uint32_t e = getClosedLedger()->info().seq;
633
634 if (e > fetch_depth_)
635 e -= fetch_depth_;
636 else
637 e = 0;
638 return e;
639}
640
641void
643{
644 std::uint32_t seq = ledger->info().seq;
645 uint256 prevHash = ledger->info().parentHash;
646
648
649 std::uint32_t minHas = seq;
650 std::uint32_t maxHas = seq;
651
653 while (!app_.getJobQueue().isStopping() && seq > 0)
654 {
655 {
657 minHas = seq;
658 --seq;
659
660 if (haveLedger(seq))
661 break;
662 }
663
664 auto it(ledgerHashes.find(seq));
665
666 if (it == ledgerHashes.end())
667 {
668 if (app_.isStopping())
669 return;
670
671 {
673 mCompleteLedgers.insert(range(minHas, maxHas));
674 }
675 maxHas = minHas;
677 (seq < 500) ? 0 : (seq - 499), seq);
678 it = ledgerHashes.find(seq);
679
680 if (it == ledgerHashes.end())
681 break;
682
683 if (!nodeStore.fetchNodeObject(
684 ledgerHashes.begin()->second.ledgerHash,
685 ledgerHashes.begin()->first))
686 {
687 // The ledger is not backed by the node store
688 JLOG(m_journal.warn()) << "SQL DB ledger sequence " << seq
689 << " mismatches node store";
690 break;
691 }
692 }
693
694 if (it->second.ledgerHash != prevHash)
695 break;
696
697 prevHash = it->second.parentHash;
698 }
699
700 {
702 mCompleteLedgers.insert(range(minHas, maxHas));
703 }
704 {
706 mFillInProgress = 0;
707 tryAdvance();
708 }
709}
710
713void
715{
716 LedgerIndex const ledgerIndex = missing + 1;
717
718 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
719 if (!haveHash || haveHash->isZero())
720 {
721 JLOG(m_journal.error())
722 << "No hash for fetch pack. Missing Index " << missing;
723 return;
724 }
725
726 // Select target Peer based on highest score. The score is randomized
727 // but biased in favor of Peers with low latency.
729 {
730 int maxScore = 0;
731 auto peerList = app_.overlay().getActivePeers();
732 for (auto const& peer : peerList)
733 {
734 if (peer->hasRange(missing, missing + 1))
735 {
736 int score = peer->getScore(true);
737 if (!target || (score > maxScore))
738 {
739 target = peer;
740 maxScore = score;
741 }
742 }
743 }
744 }
745
746 if (target)
747 {
748 protocol::TMGetObjectByHash tmBH;
749 tmBH.set_query(true);
750 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
751 tmBH.set_ledgerhash(haveHash->begin(), 32);
752 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
753
754 target->send(packet);
755 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
756 }
757 else
758 JLOG(m_journal.debug()) << "No peer for fetch pack";
759}
760
761void
763{
764 int invalidate = 0;
766
767 for (std::uint32_t lSeq = ledger.info().seq - 1; lSeq > 0; --lSeq)
768 {
769 if (haveLedger(lSeq))
770 {
771 try
772 {
773 hash = hashOfSeq(ledger, lSeq, m_journal);
774 }
775 catch (std::exception const& ex)
776 {
777 JLOG(m_journal.warn())
778 << "fixMismatch encounters partial ledger. Exception: "
779 << ex.what();
780 clearLedger(lSeq);
781 return;
782 }
783
784 if (hash)
785 {
786 // try to close the seam
787 auto otherLedger = getLedgerBySeq(lSeq);
788
789 if (otherLedger && (otherLedger->info().hash == *hash))
790 {
791 // we closed the seam
792 if (invalidate != 0)
793 {
794 JLOG(m_journal.warn())
795 << "Match at " << lSeq << ", " << invalidate
796 << " prior ledgers invalidated";
797 }
798
799 return;
800 }
801 }
802
803 clearLedger(lSeq);
804 ++invalidate;
805 }
806 }
807
808 // all prior ledgers invalidated
809 if (invalidate != 0)
810 {
811 JLOG(m_journal.warn())
812 << "All " << invalidate << " prior ledgers invalidated";
813 }
814}
815
816void
818 std::shared_ptr<Ledger const> const& ledger,
819 bool isSynchronous,
820 bool isCurrent)
821{
822 // A new ledger has been accepted as part of the trusted chain
823 JLOG(m_journal.debug()) << "Ledger " << ledger->info().seq
824 << " accepted :" << ledger->info().hash;
825 XRPL_ASSERT(
826 ledger->stateMap().getHash().isNonZero(),
827 "ripple::LedgerMaster::setFullLedger : nonzero ledger state hash");
828
829 ledger->setValidated();
830 ledger->setFull();
831
832 if (isCurrent)
833 mLedgerHistory.insert(ledger, true);
834
835 {
836 // Check the SQL database's entry for the sequence before this
837 // ledger, if it's not this ledger's parent, invalidate it
838 uint256 prevHash =
839 app_.getRelationalDatabase().getHashByIndex(ledger->info().seq - 1);
840 if (prevHash.isNonZero() && prevHash != ledger->info().parentHash)
841 clearLedger(ledger->info().seq - 1);
842 }
843
844 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
845
846 {
848 mCompleteLedgers.insert(ledger->info().seq);
849 }
850
851 {
853
854 if (ledger->info().seq > mValidLedgerSeq)
855 setValidLedger(ledger);
856 if (!mPubLedger)
857 {
858 setPubLedger(ledger);
859 app_.getOrderBookDB().setup(ledger);
860 }
861
862 if (ledger->info().seq != 0 && haveLedger(ledger->info().seq - 1))
863 {
864 // we think we have the previous ledger, double check
865 auto prevLedger = getLedgerBySeq(ledger->info().seq - 1);
866
867 if (!prevLedger ||
868 (prevLedger->info().hash != ledger->info().parentHash))
869 {
870 JLOG(m_journal.warn())
871 << "Acquired ledger invalidates previous ledger: "
872 << (prevLedger ? "hashMismatch" : "missingLedger");
873 fixMismatch(*ledger);
874 }
875 }
876 }
877}
878
879void
881{
882 clearLedger(seq);
884}
885
886// Check if the specified ledger can become the new last fully-validated
887// ledger.
888void
890{
891 std::size_t valCount = 0;
892
893 if (seq != 0)
894 {
895 // Ledger is too old
896 if (seq < mValidLedgerSeq)
897 return;
898
899 auto validations = app_.validators().negativeUNLFilter(
901 valCount = validations.size();
902 if (valCount >= app_.validators().quorum())
903 {
905 if (seq > mLastValidLedger.second)
906 mLastValidLedger = std::make_pair(hash, seq);
907 }
908
909 if (seq == mValidLedgerSeq)
910 return;
911
912 // Ledger could match the ledger we're already building
913 if (seq == mBuildingLedgerSeq)
914 return;
915 }
916
917 auto ledger = mLedgerHistory.getLedgerByHash(hash);
918
919 if (!ledger)
920 {
921 if ((seq != 0) && (getValidLedgerIndex() == 0))
922 {
923 // Set peers converged early if we can
924 if (valCount >= app_.validators().quorum())
926 }
927
928 // FIXME: We may not want to fetch a ledger with just one
929 // trusted validation
930 ledger = app_.getInboundLedgers().acquire(
932 }
933
934 if (ledger)
935 checkAccept(ledger);
936}
937
945{
946 return standalone_ ? 0 : app_.validators().quorum();
947}
948
949void
951{
952 // Can we accept this ledger as our new last fully-validated ledger
953
954 if (!canBeCurrent(ledger))
955 return;
956
957 // Can we advance the last fully-validated ledger? If so, can we
958 // publish?
960
961 if (ledger->info().seq <= mValidLedgerSeq)
962 return;
963
964 auto const minVal = getNeededValidations();
965 auto validations = app_.validators().negativeUNLFilter(
967 ledger->info().hash, ledger->info().seq));
968 auto const tvc = validations.size();
969 if (tvc < minVal) // nothing we can do
970 {
971 JLOG(m_journal.trace())
972 << "Only " << tvc << " validations for " << ledger->info().hash;
973 return;
974 }
975
976 JLOG(m_journal.info()) << "Advancing accepted ledger to "
977 << ledger->info().seq << " with >= " << minVal
978 << " validations";
979
980 ledger->setValidated();
981 ledger->setFull();
982 setValidLedger(ledger);
983 if (!mPubLedger)
984 {
985 pendSaveValidated(app_, ledger, true, true);
986 setPubLedger(ledger);
987 app_.getOrderBookDB().setup(ledger);
988 }
989
990 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
991 auto fees = app_.getValidations().fees(ledger->info().hash, base);
992 {
993 auto fees2 =
994 app_.getValidations().fees(ledger->info().parentHash, base);
995 fees.reserve(fees.size() + fees2.size());
996 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
997 }
998 std::uint32_t fee;
999 if (!fees.empty())
1000 {
1001 std::sort(fees.begin(), fees.end());
1002 if (auto stream = m_journal.debug())
1003 {
1005 s << "Received fees from validations: (" << fees.size() << ") ";
1006 for (auto const fee1 : fees)
1007 {
1008 s << " " << fee1;
1009 }
1010 stream << s.str();
1011 }
1012 fee = fees[fees.size() / 2]; // median
1013 }
1014 else
1015 {
1016 fee = base;
1017 }
1018
1020
1021 tryAdvance();
1022
1023 if (ledger->seq() % 256 == 0)
1024 {
1025 // Check if the majority of validators run a higher version rippled
1026 // software. If so print a warning.
1027 //
1028 // Once the HardenedValidations amendment is enabled, validators include
1029 // their rippled software version in the validation messages of every
1030 // (flag - 1) ledger. We wait for one ledger time before checking the
1031 // version information to accumulate more validation messages.
1032
1033 auto currentTime = app_.timeKeeper().now();
1034 bool needPrint = false;
1035
1036 // The variable upgradeWarningPrevTime_ will be set when and only when
1037 // the warning is printed.
1039 {
1040 // Have not printed the warning before, check if need to print.
1041 auto const vals = app_.getValidations().getTrustedForLedger(
1042 ledger->info().parentHash, ledger->info().seq - 1);
1043 std::size_t higherVersionCount = 0;
1044 std::size_t rippledCount = 0;
1045 for (auto const& v : vals)
1046 {
1047 if (v->isFieldPresent(sfServerVersion))
1048 {
1049 auto version = v->getFieldU64(sfServerVersion);
1050 higherVersionCount +=
1051 BuildInfo::isNewerVersion(version) ? 1 : 0;
1052 rippledCount +=
1053 BuildInfo::isRippledVersion(version) ? 1 : 0;
1054 }
1055 }
1056 // We report only if (1) we have accumulated validation messages
1057 // from 90% validators from the UNL, (2) 60% of validators
1058 // running the rippled implementation have higher version numbers,
1059 // and (3) the calculation won't cause divide-by-zero.
1060 if (higherVersionCount > 0 && rippledCount > 0)
1061 {
1062 constexpr std::size_t reportingPercent = 90;
1063 constexpr std::size_t cutoffPercent = 60;
1064 auto const unlSize{
1065 app_.validators().getQuorumKeys().second.size()};
1066 needPrint = unlSize > 0 &&
1067 calculatePercent(vals.size(), unlSize) >=
1068 reportingPercent &&
1069 calculatePercent(higherVersionCount, rippledCount) >=
1070 cutoffPercent;
1071 }
1072 }
1073 // To throttle the warning messages, instead of printing a warning
1074 // every flag ledger, we print every week.
1075 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
1076 {
1077 // Printed the warning before, and assuming most validators
1078 // do not downgrade, we keep printing the warning
1079 // until the local server is restarted.
1080 needPrint = true;
1081 }
1082
1083 if (needPrint)
1084 {
1085 upgradeWarningPrevTime_ = currentTime;
1086 auto const upgradeMsg =
1087 "Check for upgrade: "
1088 "A majority of trusted validators are "
1089 "running a newer version.";
1090 std::cerr << upgradeMsg << std::endl;
1091 JLOG(m_journal.error()) << upgradeMsg;
1092 }
1093 }
1094}
1095
1097void
1099 std::shared_ptr<Ledger const> const& ledger,
1100 uint256 const& consensusHash,
1101 Json::Value consensus)
1102{
1103 // Because we just built a ledger, we are no longer building one
1105
1106 // No need to process validations in standalone mode
1107 if (standalone_)
1108 return;
1109
1110 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1111
1112 if (ledger->info().seq <= mValidLedgerSeq)
1113 {
1114 auto stream = app_.journal("LedgerConsensus").info();
1115 JLOG(stream) << "Consensus built old ledger: " << ledger->info().seq
1116 << " <= " << mValidLedgerSeq;
1117 return;
1118 }
1119
1120 // See if this ledger can be the new fully-validated ledger
1121 checkAccept(ledger);
1122
1123 if (ledger->info().seq <= mValidLedgerSeq)
1124 {
1125 auto stream = app_.journal("LedgerConsensus").debug();
1126 JLOG(stream) << "Consensus ledger fully validated";
1127 return;
1128 }
1129
1130 // This ledger cannot be the new fully-validated ledger, but
1131 // maybe we saved up validations for some other ledger that can be
1132
1133 auto validations = app_.validators().negativeUNLFilter(
1135
1136 // Track validation counts with sequence numbers
1137 class valSeq
1138 {
1139 public:
1140 valSeq() : valCount_(0), ledgerSeq_(0)
1141 {
1142 ;
1143 }
1144
1145 void
1146 mergeValidation(LedgerIndex seq)
1147 {
1148 valCount_++;
1149
1150 // If we didn't already know the sequence, now we do
1151 if (ledgerSeq_ == 0)
1152 ledgerSeq_ = seq;
1153 }
1154
1155 std::size_t valCount_;
1156 LedgerIndex ledgerSeq_;
1157 };
1158
1159 // Count the number of current, trusted validations
1161 for (auto const& v : validations)
1162 {
1163 valSeq& vs = count[v->getLedgerHash()];
1164 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1165 }
1166
1167 auto const neededValidations = getNeededValidations();
1168 auto maxSeq = mValidLedgerSeq.load();
1169 auto maxLedger = ledger->info().hash;
1170
1171 // Of the ledgers with sufficient validations,
1172 // find the one with the highest sequence
1173 for (auto& v : count)
1174 if (v.second.valCount_ > neededValidations)
1175 {
1176 // If we still don't know the sequence, get it
1177 if (v.second.ledgerSeq_ == 0)
1178 {
1179 if (auto l = getLedgerByHash(v.first))
1180 v.second.ledgerSeq_ = l->info().seq;
1181 }
1182
1183 if (v.second.ledgerSeq_ > maxSeq)
1184 {
1185 maxSeq = v.second.ledgerSeq_;
1186 maxLedger = v.first;
1187 }
1188 }
1189
1190 if (maxSeq > mValidLedgerSeq)
1191 {
1192 auto stream = app_.journal("LedgerConsensus").debug();
1193 JLOG(stream) << "Consensus triggered check of ledger";
1194 checkAccept(maxLedger, maxSeq);
1195 }
1196}
1197
1200 LedgerIndex index,
1201 InboundLedger::Reason reason)
1202{
1203 // Try to get the hash of a ledger we need to fetch for history
1205 auto const& l{mHistLedger};
1206
1207 if (l && l->info().seq >= index)
1208 {
1209 ret = hashOfSeq(*l, index, m_journal);
1210 if (!ret)
1211 ret = walkHashBySeq(index, l, reason);
1212 }
1213
1214 if (!ret)
1215 ret = walkHashBySeq(index, reason);
1216
1217 return ret;
1218}
1219
1223{
1225
1226 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1227
1228 // No valid ledger, nothing to do
1229 if (mValidLedger.empty())
1230 {
1231 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1232 return {};
1233 }
1234
1235 if (!mPubLedger)
1236 {
1237 JLOG(m_journal.info())
1238 << "First published ledger will be " << mValidLedgerSeq;
1239 return {mValidLedger.get()};
1240 }
1241
1243 {
1244 JLOG(m_journal.warn()) << "Gap in validated ledger stream "
1245 << mPubLedgerSeq << " - " << mValidLedgerSeq - 1;
1246
1247 auto valLedger = mValidLedger.get();
1248 ret.push_back(valLedger);
1249 setPubLedger(valLedger);
1250 app_.getOrderBookDB().setup(valLedger);
1251
1252 return {valLedger};
1253 }
1254
1256 {
1257 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1258 return {};
1259 }
1260
1261 int acqCount = 0;
1262
1263 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1264 auto valLedger = mValidLedger.get();
1265 std::uint32_t valSeq = valLedger->info().seq;
1266
1267 scope_unlock sul{sl};
1268 try
1269 {
1270 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1271 {
1272 JLOG(m_journal.trace())
1273 << "Trying to fetch/publish valid ledger " << seq;
1274
1276 // This can throw
1277 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1278 // VFALCO TODO Restructure this code so that zero is not
1279 // used.
1280 if (!hash)
1281 hash = beast::zero; // kludge
1282 if (seq == valSeq)
1283 {
1284 // We need to publish the ledger we just fully validated
1285 ledger = valLedger;
1286 }
1287 else if (hash->isZero())
1288 {
1289 JLOG(m_journal.fatal()) << "Ledger: " << valSeq
1290 << " does not have hash for " << seq;
1291 UNREACHABLE(
1292 "ripple::LedgerMaster::findNewLedgersToPublish : ledger "
1293 "not found");
1294 }
1295 else
1296 {
1297 ledger = mLedgerHistory.getLedgerByHash(*hash);
1298 }
1299
1300 if (!app_.config().LEDGER_REPLAY)
1301 {
1302 // Can we try to acquire the ledger we need?
1303 if (!ledger && (++acqCount < ledger_fetch_size_))
1304 ledger = app_.getInboundLedgers().acquire(
1305 *hash, seq, InboundLedger::Reason::GENERIC);
1306 }
1307
1308 // Did we acquire the next ledger we need to publish?
1309 if (ledger && (ledger->info().seq == pubSeq))
1310 {
1311 ledger->setValidated();
1312 ret.push_back(ledger);
1313 ++pubSeq;
1314 }
1315 }
1316
1317 JLOG(m_journal.trace())
1318 << "ready to publish " << ret.size() << " ledgers.";
1319 }
1320 catch (std::exception const& ex)
1321 {
1322 JLOG(m_journal.error())
1323 << "Exception while trying to find ledgers to publish: "
1324 << ex.what();
1325 }
1326
1328 {
1329 /* Narrow down the gap of ledgers, and try to replay them.
1330 * When replaying a ledger gap, if the local node has
1331 * the start ledger, it saves an expensive InboundLedger
1332 * acquire. If the local node has the finish ledger, it
1333 * saves a skip list acquire.
1334 */
1335 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1336 auto finishLedger = valLedger;
1337 while (startLedger->seq() + 1 < finishLedger->seq())
1338 {
1339 if (auto const parent = mLedgerHistory.getLedgerByHash(
1340 finishLedger->info().parentHash);
1341 parent)
1342 {
1343 finishLedger = parent;
1344 }
1345 else
1346 {
1347 auto numberLedgers =
1348 finishLedger->seq() - startLedger->seq() + 1;
1349 JLOG(m_journal.debug())
1350 << "Publish LedgerReplays " << numberLedgers
1351 << " ledgers, from seq=" << startLedger->info().seq << ", "
1352 << startLedger->info().hash
1353 << " to seq=" << finishLedger->info().seq << ", "
1354 << finishLedger->info().hash;
1357 finishLedger->info().hash,
1358 numberLedgers);
1359 break;
1360 }
1361 }
1362 }
1363
1364 return ret;
1365}
1366
1367void
1369{
1371
1372 // Can't advance without at least one fully-valid ledger
1373 mAdvanceWork = true;
1375 {
1376 mAdvanceThread = true;
1377 app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
1379
1380 XRPL_ASSERT(
1382 "ripple::LedgerMaster::tryAdvance : has valid ledger");
1383
1384 JLOG(m_journal.trace()) << "advanceThread<";
1385
1386 try
1387 {
1388 doAdvance(sl);
1389 }
1390 catch (std::exception const& ex)
1391 {
1392 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1393 }
1394
1395 mAdvanceThread = false;
1396 JLOG(m_journal.trace()) << "advanceThread>";
1397 });
1398 }
1399}
1400
1401void
1403{
1404 {
1407 {
1409 mPathLedger.reset();
1410 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1411 return;
1412 }
1413 }
1414
1415 while (!app_.getJobQueue().isStopping())
1416 {
1417 JLOG(m_journal.debug()) << "updatePaths running";
1419 {
1421
1422 if (!mValidLedger.empty() &&
1423 (!mPathLedger || (mPathLedger->info().seq != mValidLedgerSeq)))
1424 { // We have a new valid ledger since the last full pathfinding
1426 lastLedger = mPathLedger;
1427 }
1428 else if (mPathFindNewRequest)
1429 { // We have a new request but no new ledger
1430 lastLedger = app_.openLedger().current();
1431 }
1432 else
1433 { // Nothing to do
1435 mPathLedger.reset();
1436 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1437 return;
1438 }
1439 }
1440
1441 if (!standalone_)
1442 { // don't pathfind with a ledger that's more than 60 seconds old
1443 using namespace std::chrono;
1444 auto age = time_point_cast<seconds>(app_.timeKeeper().closeTime()) -
1445 lastLedger->info().closeTime;
1446 if (age > 1min)
1447 {
1448 JLOG(m_journal.debug())
1449 << "Published ledger too old for updating paths";
1452 mPathLedger.reset();
1453 return;
1454 }
1455 }
1456
1457 try
1458 {
1459 auto& pathRequests = app_.getPathRequests();
1460 {
1462 if (!pathRequests.requestsPending())
1463 {
1465 mPathLedger.reset();
1466 JLOG(m_journal.debug())
1467 << "No path requests found. Nothing to do for updating "
1468 "paths. "
1469 << mPathFindThread << " jobs remaining";
1470 return;
1471 }
1472 }
1473 JLOG(m_journal.debug()) << "Updating paths";
1474 pathRequests.updateAll(lastLedger);
1475
1477 if (!pathRequests.requestsPending())
1478 {
1479 JLOG(m_journal.debug())
1480 << "No path requests left. No need for further updating "
1481 "paths";
1483 mPathLedger.reset();
1484 return;
1485 }
1486 }
1487 catch (SHAMapMissingNode const& mn)
1488 {
1489 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1490 if (lastLedger->open())
1491 {
1492 // our parent is the problem
1494 lastLedger->info().parentHash,
1495 lastLedger->info().seq - 1,
1497 }
1498 else
1499 {
1500 // this ledger is the problem
1502 lastLedger->info().hash,
1503 lastLedger->info().seq,
1505 }
1506 }
1507 }
1508}
1509
1510bool
1512{
1514 mPathFindNewRequest = newPFWork("pf:newRequest", ml);
1515 return mPathFindNewRequest;
1516}
1517
1518bool
1520{
1522 bool const ret = mPathFindNewRequest;
1523 mPathFindNewRequest = false;
1524 return ret;
1525}
1526
1527// If the order book is radically updated, we need to reprocess all
1528// pathfinding requests.
1529bool
1531{
1533 mPathLedger.reset();
1534
1535 return newPFWork("pf:newOBDB", ml);
1536}
1537
1540bool
1542 const char* name,
1544{
1545 if (!app_.isStopping() && mPathFindThread < 2 &&
1547 {
1548 JLOG(m_journal.debug())
1549 << "newPFWork: Creating job. path find threads: "
1550 << mPathFindThread;
1551 if (app_.getJobQueue().addJob(
1552 jtUPDATE_PF, name, [this]() { updatePaths(); }))
1553 {
1555 }
1556 }
1557 // If we're stopping don't give callers the expectation that their
1558 // request will be fulfilled, even if it may be serviced.
1559 return mPathFindThread > 0 && !app_.isStopping();
1560}
1561
1564{
1565 return m_mutex;
1566}
1567
1568// The current ledger is the ledger we believe new transactions should go in
1571{
1572 return app_.openLedger().current();
1573}
1574
1577{
1578 return mValidLedger.get();
1579}
1580
1581Rules
1583{
1584 // Once we have a guarantee that there's always a last validated
1585 // ledger then we can dispense with the if.
1586
1587 // Return the Rules from the last validated ledger.
1588 if (auto const ledger = getValidatedLedger())
1589 return ledger->rules();
1590
1591 return Rules(app_.config().features);
1592}
1593
1594// This is the last ledger we published to clients and can lag the validated
1595// ledger.
1598{
1600 return mPubLedger;
1601}
1602
1605{
1608}
1609
1612{
1613 uint256 hash = getHashBySeq(ledgerIndex);
1614 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex)
1615 : std::nullopt;
1616}
1617
1620 LedgerHash const& ledgerHash,
1621 std::uint32_t index)
1622{
1623 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1624 if (nodeObject && (nodeObject->getData().size() >= 120))
1625 {
1626 SerialIter it(
1627 nodeObject->getData().data(), nodeObject->getData().size());
1628 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1629 {
1630 it.skip(
1631 4 + 8 + 32 + // seq drops parentHash
1632 32 + 32 + 4); // txHash acctHash parentClose
1634 }
1635 }
1636
1637 return std::nullopt;
1638}
1639
1640uint256
1642{
1644
1645 if (hash.isNonZero())
1646 return hash;
1647
1649}
1650
1653{
1654 std::optional<LedgerHash> ledgerHash;
1655
1656 if (auto referenceLedger = mValidLedger.get())
1657 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1658
1659 return ledgerHash;
1660}
1661
1664 std::uint32_t index,
1665 std::shared_ptr<ReadView const> const& referenceLedger,
1666 InboundLedger::Reason reason)
1667{
1668 if (!referenceLedger || (referenceLedger->info().seq < index))
1669 {
1670 // Nothing we can do. No validated ledger.
1671 return std::nullopt;
1672 }
1673
1674 // See if the hash for the ledger we need is in the reference ledger
1675 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1676 if (ledgerHash)
1677 return ledgerHash;
1678
1679 // The hash is not in the reference ledger. Get another ledger which can
1680 // be located easily and should contain the hash.
1681 LedgerIndex refIndex = getCandidateLedger(index);
1682 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1683 XRPL_ASSERT(refHash, "ripple::LedgerMaster::walkHashBySeq : found ledger");
1684 if (refHash)
1685 {
1686 // Try the hash and sequence of a better reference ledger just found
1687 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1688
1689 if (ledger)
1690 {
1691 try
1692 {
1693 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1694 }
1695 catch (SHAMapMissingNode const&)
1696 {
1697 ledger.reset();
1698 }
1699 }
1700
1701 // Try to acquire the complete ledger
1702 if (!ledger)
1703 {
1704 if (auto const l = app_.getInboundLedgers().acquire(
1705 *refHash, refIndex, reason))
1706 {
1707 ledgerHash = hashOfSeq(*l, index, m_journal);
1708 XRPL_ASSERT(
1709 ledgerHash,
1710 "ripple::LedgerMaster::walkHashBySeq : has complete "
1711 "ledger");
1712 }
1713 }
1714 }
1715 return ledgerHash;
1716}
1717
1720{
1721 if (index <= mValidLedgerSeq)
1722 {
1723 // Always prefer a validated ledger
1724 if (auto valid = mValidLedger.get())
1725 {
1726 if (valid->info().seq == index)
1727 return valid;
1728
1729 try
1730 {
1731 auto const hash = hashOfSeq(*valid, index, m_journal);
1732
1733 if (hash)
1735 }
1736 catch (std::exception const&)
1737 {
1738 // Missing nodes are already handled
1739 }
1740 }
1741 }
1742
1743 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1744 return ret;
1745
1746 auto ret = mClosedLedger.get();
1747 if (ret && (ret->info().seq == index))
1748 return ret;
1749
1750 clearLedger(index);
1751 return {};
1752}
1753
1756{
1757 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1758 return ret;
1759
1760 auto ret = mClosedLedger.get();
1761 if (ret && (ret->info().hash == hash))
1762 return ret;
1763
1764 return {};
1765}
1766
1767void
1769{
1771 mCompleteLedgers.insert(range(minV, maxV));
1772}
1773
1774void
1776{
1778 fetch_packs_.sweep();
1779}
1780
1781float
1783{
1785}
1786
1787void
1789{
1791 if (seq > 0)
1792 mCompleteLedgers.erase(range(0u, seq - 1));
1793}
1794
1795void
1797{
1799}
1800
1801void
1803{
1804 replayData = std::move(replay);
1805}
1806
1809{
1810 return std::move(replayData);
1811}
1812
1813void
1815 std::uint32_t missing,
1816 bool& progress,
1817 InboundLedger::Reason reason,
1819{
1820 scope_unlock sul{sl};
1821 if (auto hash = getLedgerHashForHistory(missing, reason))
1822 {
1823 XRPL_ASSERT(
1824 hash->isNonZero(),
1825 "ripple::LedgerMaster::fetchForHistory : found ledger");
1826 auto ledger = getLedgerByHash(*hash);
1827 if (!ledger)
1828 {
1830 {
1831 ledger =
1832 app_.getInboundLedgers().acquire(*hash, missing, reason);
1833 if (!ledger && missing != fetch_seq_ &&
1834 missing > app_.getNodeStore().earliestLedgerSeq())
1835 {
1836 JLOG(m_journal.trace())
1837 << "fetchForHistory want fetch pack " << missing;
1838 fetch_seq_ = missing;
1839 getFetchPack(missing, reason);
1840 }
1841 else
1842 JLOG(m_journal.trace())
1843 << "fetchForHistory no fetch pack for " << missing;
1844 }
1845 else
1846 JLOG(m_journal.debug())
1847 << "fetchForHistory found failed acquire";
1848 }
1849 if (ledger)
1850 {
1851 auto seq = ledger->info().seq;
1852 XRPL_ASSERT(
1853 seq == missing,
1854 "ripple::LedgerMaster::fetchForHistory : sequence match");
1855 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1856 setFullLedger(ledger, false, false);
1857 int fillInProgress;
1858 {
1860 mHistLedger = ledger;
1861 fillInProgress = mFillInProgress;
1862 }
1863 if (fillInProgress == 0 &&
1865 ledger->info().parentHash)
1866 {
1867 {
1868 // Previous ledger is in DB
1870 mFillInProgress = seq;
1871 }
1873 jtADVANCE, "tryFill", [this, ledger]() {
1874 tryFill(ledger);
1875 });
1876 }
1877 progress = true;
1878 }
1879 else
1880 {
1881 std::uint32_t fetchSz;
1882 // Do not fetch ledger sequences lower
1883 // than the earliest ledger sequence
1884 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1885 fetchSz = missing >= fetchSz
1886 ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1)
1887 : 0;
1888 try
1889 {
1890 for (std::uint32_t i = 0; i < fetchSz; ++i)
1891 {
1892 std::uint32_t seq = missing - i;
1893 if (auto h = getLedgerHashForHistory(seq, reason))
1894 {
1895 XRPL_ASSERT(
1896 h->isNonZero(),
1897 "ripple::LedgerMaster::fetchForHistory : "
1898 "prefetched ledger");
1899 app_.getInboundLedgers().acquire(*h, seq, reason);
1900 }
1901 }
1902 }
1903 catch (std::exception const& ex)
1904 {
1905 JLOG(m_journal.warn())
1906 << "Threw while prefetching: " << ex.what();
1907 }
1908 }
1909 }
1910 else
1911 {
1912 JLOG(m_journal.fatal())
1913 << "Can't find ledger following prevMissing " << missing;
1914 JLOG(m_journal.fatal())
1915 << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1916 JLOG(m_journal.fatal())
1917 << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1918 JLOG(m_journal.fatal())
1919 << "Acquire reason: "
1920 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY"
1921 : "NOT HISTORY");
1922 clearLedger(missing + 1);
1923 progress = true;
1924 }
1925}
1926
1927// Try to publish ledgers, acquire missing ledgers
1928void
1930{
1931 do
1932 {
1933 mAdvanceWork = false; // If there's work to do, we'll make progress
1934 bool progress = false;
1935
1936 auto const pubLedgers = findNewLedgersToPublish(sl);
1937 if (pubLedgers.empty())
1938 {
1944 {
1945 // We are in sync, so can acquire
1948 {
1950 missing = prevMissing(
1952 mPubLedger->info().seq,
1954 }
1955 if (missing)
1956 {
1957 JLOG(m_journal.trace())
1958 << "tryAdvance discovered missing " << *missing;
1959 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1964 *missing,
1965 m_journal))
1966 {
1967 JLOG(m_journal.trace())
1968 << "advanceThread should acquire";
1969 }
1970 else
1971 missing = std::nullopt;
1972 }
1973 if (missing)
1974 {
1975 fetchForHistory(*missing, progress, reason, sl);
1977 {
1978 JLOG(m_journal.debug())
1979 << "tryAdvance found last valid changed";
1980 progress = true;
1981 }
1982 }
1983 }
1984 else
1985 {
1986 mHistLedger.reset();
1987 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1988 }
1989 }
1990 else
1991 {
1992 JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size()
1993 << " ledgers to publish";
1994 for (auto const& ledger : pubLedgers)
1995 {
1996 {
1997 scope_unlock sul{sl};
1998 JLOG(m_journal.debug())
1999 << "tryAdvance publishing seq " << ledger->info().seq;
2000 setFullLedger(ledger, true, true);
2001 }
2002
2003 setPubLedger(ledger);
2004
2005 {
2006 scope_unlock sul{sl};
2007 app_.getOPs().pubLedger(ledger);
2008 }
2009 }
2010
2012 progress = newPFWork("pf:newLedger", sl);
2013 }
2014 if (progress)
2015 mAdvanceWork = true;
2016 } while (mAdvanceWork);
2017}
2018
2019void
2021{
2022 fetch_packs_.canonicalize_replace_client(hash, data);
2023}
2024
2027{
2028 Blob data;
2029 if (fetch_packs_.retrieve(hash, data))
2030 {
2031 fetch_packs_.del(hash, false);
2032 if (hash == sha512Half(makeSlice(data)))
2033 return data;
2034 }
2035 return std::nullopt;
2036}
2037
2038void
2040{
2041 if (!mGotFetchPackThread.test_and_set(std::memory_order_acquire))
2042 {
2043 app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&]() {
2045 mGotFetchPackThread.clear(std::memory_order_release);
2046 });
2047 }
2048}
2049
2075static void
2077 SHAMap const& want,
2078 SHAMap const* have,
2079 std::uint32_t cnt,
2080 protocol::TMGetObjectByHash* into,
2081 std::uint32_t seq,
2082 bool withLeaves = true)
2083{
2084 XRPL_ASSERT(cnt, "ripple::populateFetchPack : nonzero count input");
2085
2086 Serializer s(1024);
2087
2088 want.visitDifferences(
2089 have,
2090 [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
2091 if (!withLeaves && n.isLeaf())
2092 return true;
2093
2094 s.erase();
2096
2097 auto const& hash = n.getHash().as_uint256();
2098
2099 protocol::TMIndexedObject* obj = into->add_objects();
2100 obj->set_ledgerseq(seq);
2101 obj->set_hash(hash.data(), hash.size());
2102 obj->set_data(s.getDataPtr(), s.getLength());
2103
2104 return --cnt != 0;
2105 });
2106}
2107
2108void
2110 std::weak_ptr<Peer> const& wPeer,
2112 uint256 haveLedgerHash,
2114{
2115 using namespace std::chrono_literals;
2116 if (UptimeClock::now() > uptime + 1s)
2117 {
2118 JLOG(m_journal.info()) << "Fetch pack request got stale";
2119 return;
2120 }
2121
2123 {
2124 JLOG(m_journal.info()) << "Too busy to make fetch pack";
2125 return;
2126 }
2127
2128 auto peer = wPeer.lock();
2129
2130 if (!peer)
2131 return;
2132
2133 auto have = getLedgerByHash(haveLedgerHash);
2134
2135 if (!have)
2136 {
2137 JLOG(m_journal.info())
2138 << "Peer requests fetch pack for ledger we don't have: " << have;
2139 peer->charge(Resource::feeRequestNoReply);
2140 return;
2141 }
2142
2143 if (have->open())
2144 {
2145 JLOG(m_journal.warn())
2146 << "Peer requests fetch pack from open ledger: " << have;
2147 peer->charge(Resource::feeInvalidRequest);
2148 return;
2149 }
2150
2151 if (have->info().seq < getEarliestFetch())
2152 {
2153 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
2154 peer->charge(Resource::feeInvalidRequest);
2155 return;
2156 }
2157
2158 auto want = getLedgerByHash(have->info().parentHash);
2159
2160 if (!want)
2161 {
2162 JLOG(m_journal.info())
2163 << "Peer requests fetch pack for ledger whose predecessor we "
2164 << "don't have: " << have;
2165 peer->charge(Resource::feeRequestNoReply);
2166 return;
2167 }
2168
2169 try
2170 {
2171 Serializer hdr(128);
2172
2173 protocol::TMGetObjectByHash reply;
2174 reply.set_query(false);
2175
2176 if (request->has_seq())
2177 reply.set_seq(request->seq());
2178
2179 reply.set_ledgerhash(request->ledgerhash());
2180 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2181
2182 // Building a fetch pack:
2183 // 1. Add the header for the requested ledger.
2184 // 2. Add the nodes for the AccountStateMap of that ledger.
2185 // 3. If there are transactions, add the nodes for the
2186 // transactions of the ledger.
2187 // 4. If the FetchPack now contains at least 512 entries then stop.
2188 // 5. If not very much time has elapsed, then loop back and repeat
2189 // the same process adding the previous ledger to the FetchPack.
2190 do
2191 {
2192 std::uint32_t lSeq = want->info().seq;
2193
2194 {
2195 // Serialize the ledger header:
2196 hdr.erase();
2197
2199 addRaw(want->info(), hdr);
2200
2201 // Add the data
2202 protocol::TMIndexedObject* obj = reply.add_objects();
2203 obj->set_hash(
2204 want->info().hash.data(), want->info().hash.size());
2205 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2206 obj->set_ledgerseq(lSeq);
2207 }
2208
2210 want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2211
2212 // We use nullptr here because transaction maps are per ledger
2213 // and so the requestor is unlikely to already have it.
2214 if (want->info().txHash.isNonZero())
2215 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2216
2217 if (reply.objects().size() >= 512)
2218 break;
2219
2220 have = std::move(want);
2221 want = getLedgerByHash(have->info().parentHash);
2222 } while (want && UptimeClock::now() <= uptime + 1s);
2223
2224 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2225
2226 JLOG(m_journal.info())
2227 << "Built fetch pack with " << reply.objects().size() << " nodes ("
2228 << msg->getBufferSize() << " bytes)";
2229
2230 peer->send(msg);
2231 }
2232 catch (std::exception const& ex)
2233 {
2234 JLOG(m_journal.warn())
2235 << "Exception building fetch pach. Exception: " << ex.what();
2236 }
2237}
2238
2241{
2242 return fetch_packs_.getCacheSize();
2243}
2244
2245// Returns the minimum ledger sequence in SQL database, if any.
2248{
2250}
2251
2254{
2255 uint32_t first = 0, last = 0;
2256
2257 if (!getValidatedRange(first, last) || last < ledgerSeq)
2258 return {};
2259
2260 auto const lgr = getLedgerBySeq(ledgerSeq);
2261 if (!lgr || lgr->txs.empty())
2262 return {};
2263
2264 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2265 if (it->first && it->second &&
2266 it->second->isFieldPresent(sfTransactionIndex) &&
2267 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2268 return it->first->getTransactionID();
2269
2270 return {};
2271}
2272
2273} // namespace ripple
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition: json_value.h:147
Provide a light-weight way to check active() before string formatting.
Definition: Journal.h:194
A generic endpoint for log messages.
Definition: Journal.h:59
Stream fatal() const
Definition: Journal.h:341
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
typename Clock::time_point time_point
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual SHAMapStore & getSHAMapStore()=0
virtual bool isStopping() const =0
virtual NodeStore::Database & getNodeStore()=0
virtual RCLValidations & getValidations()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual LedgerReplayer & getLedgerReplayer()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual PathRequests & getPathRequests()=0
virtual TxQ & getTxQ()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
virtual AmendmentTable & getAmendmentTable()=0
virtual PendingSaves & pendingSaves()=0
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
void reset(LedgerHash const &salt)
bool LEDGER_REPLAY
Definition: Config.h:230
std::unordered_set< uint256, beast::uhash<> > features
Definition: Config.h:284
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
virtual bool isFailure(uint256 const &h)=0
bool isStopping() const
Definition: JobQueue.h:230
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:140
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
Definition: LedgerHistory.h:53
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
void sweep()
Remove stale cache entries.
Definition: LedgerHistory.h:76
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
std::shared_ptr< Ledger const > get()
Definition: LedgerHolder.h:56
void set(std::shared_ptr< Ledger const > ledger)
Definition: LedgerHolder.h:44
bool haveLedger(std::uint32_t seq)
std::shared_ptr< Ledger const > getValidatedLedger()
void clearLedgerCachePrior(LedgerIndex seq)
RangeSet< std::uint32_t > mCompleteLedgers
Definition: LedgerMaster.h:349
void setBuildingLedger(LedgerIndex index)
std::unique_ptr< LedgerReplay > releaseReplay()
void failedSave(std::uint32_t seq, uint256 const &hash)
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::uint32_t const ledger_history_
Definition: LedgerMaster.h:377
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
std::unique_ptr< LedgerReplay > replayData
Definition: LedgerMaster.h:346
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
Application & app_
Definition: LedgerMaster.h:318
TimeKeeper::time_point upgradeWarningPrevTime_
Definition: LedgerMaster.h:390
LedgerHistory mLedgerHistory
Definition: LedgerMaster.h:341
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
void fixMismatch(ReadView const &ledger)
std::atomic< LedgerIndex > mPubLedgerSeq
Definition: LedgerMaster.h:365
void clearPriorLedgers(LedgerIndex seq)
std::shared_ptr< Ledger const > mPubLedger
Definition: LedgerMaster.h:330
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
std::atomic< LedgerIndex > mBuildingLedgerSeq
Definition: LedgerMaster.h:368
std::shared_ptr< ReadView const > getCurrentLedger()
void tryFill(std::shared_ptr< Ledger const > ledger)
std::uint32_t const fetch_depth_
Definition: LedgerMaster.h:374
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
bool isValidated(ReadView const &ledger)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
Definition: LedgerMaster.h:321
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
LedgerIndex const max_ledger_difference_
Definition: LedgerMaster.h:387
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
TaggedCache< uint256, Blob > fetch_packs_
Definition: LedgerMaster.h:381
bool const standalone_
Definition: LedgerMaster.h:371
bool isCaughtUp(std::string &reason)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
bool newPFWork(const char *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
beast::Journal m_journal
Definition: LedgerMaster.h:319
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void clearLedger(std::uint32_t seq)
std::pair< uint256, LedgerIndex > mLastValidLedger
Definition: LedgerMaster.h:339
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:80
std::optional< LedgerIndex > minSqlSeq()
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
Definition: LedgerMaster.h:366
CanonicalTXSet mHeldTransactions
Definition: LedgerMaster.h:343
std::uint32_t const ledger_fetch_size_
Definition: LedgerMaster.h:379
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
std::chrono::seconds getPublishedLedgerAge()
std::shared_ptr< Ledger const > mHistLedger
Definition: LedgerMaster.h:336
std::recursive_mutex mCompleteLock
Definition: LedgerMaster.h:348
std::string getCompleteLedgers()
std::atomic< LedgerIndex > mValidLedgerSeq
Definition: LedgerMaster.h:367
std::size_t getFetchPackCacheSize() const
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
void gotFetchPack(bool progress, std::uint32_t seq)
std::recursive_mutex & peekMutex()
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
std::shared_ptr< Ledger const > mPathLedger
Definition: LedgerMaster.h:333
void setValidLedger(std::shared_ptr< Ledger const > const &l)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
std::atomic< std::uint32_t > mPubLedgerClose
Definition: LedgerMaster.h:364
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
LedgerHolder mValidLedger
Definition: LedgerMaster.h:327
std::shared_ptr< ReadView const > getPublishedLedger()
std::atomic_flag mGotFetchPackThread
Definition: LedgerMaster.h:361
void doAdvance(std::unique_lock< std::recursive_mutex > &)
LedgerHolder mClosedLedger
Definition: LedgerMaster.h:324
bool storeLedger(std::shared_ptr< Ledger const > ledger)
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
LedgerIndex getCurrentLedgerIndex()
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t fetch_seq_
Definition: LedgerMaster.h:383
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
Definition: LoadFeeTrack.h:61
bool isLoadedLocal() const
Definition: LoadFeeTrack.h:127
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:90
virtual bool isBlocked()=0
virtual void setAmendmentWarned()=0
virtual void setAmendmentBlocked()=0
virtual void clearNeedNetworkLedger()=0
virtual bool isAmendmentWarned()=0
virtual bool isNeedNetworkLedger()=0
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
Persistency layer for NodeObject.
Definition: Database.h:50
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition: Database.cpp:239
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::uint32_t earliestLedgerSeq() const noexcept
Definition: Database.h:220
bool modify(modify_type const &f)
Modify the open ledger.
Definition: OpenLedger.cpp:57
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:50
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
void setup(std::shared_ptr< ReadView const > const &ledger)
Definition: OrderBookDB.cpp:38
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
bool requestsPending() const
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
Definition: PendingSaves.h:137
A view into a ledger.
Definition: ReadView.h:55
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual LedgerInfo const & info() const =0
Returns information about the ledger.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
Rules controlling protocol behavior.
Definition: Rules.h:35
uint256 const & as_uint256() const
Definition: SHAMapHash.h:44
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:96
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
Definition: SHAMapSync.cpp:100
void skip(int num)
Definition: Serializer.cpp:330
std::uint32_t get32()
Definition: Serializer.cpp:364
int getLength() const
Definition: Serializer.h:233
const void * getDataPtr() const
Definition: Serializer.h:223
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
static time_point now()
Definition: UptimeClock.cpp:63
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
Definition: Validations.h:1058
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
Definition: Validations.h:999
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
Definition: Validations.h:1081
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
std::size_t quorum() const
Get quorum value for current trusted key set.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
bool isNonZero() const
Definition: base_uint.h:544
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
Definition: BuildInfo.cpp:169
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Definition: BuildInfo.cpp:162
Charge const feeInvalidRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(PreclaimContext const &ctx, AccountID const &src)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::pair< TER, bool > apply(Application &app, OpenView &view, STTx const &tx, ApplyFlags flags, beast::Journal journal)
Apply a transaction to an OpenView.
Definition: apply.cpp:109
SizedItem
Definition: Config.h:51
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition: View.h:317
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition: RangeSet.h:183
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:148
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition: View.cpp:759
static constexpr int MAX_LEDGER_GAP
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
Definition: MathUtilities.h:44
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:243
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:120
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition: RangeSet.h:54
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
ApplyFlags
Definition: ApplyView.h:30
@ tapNONE
Definition: ApplyView.h:31
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
@ ledgerMaster
ledger master data for signing
static constexpr int MAX_WRITE_LOAD_ACQUIRE
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
@ jtLEDGER_DATA
Definition: Job.h:66
@ jtUPDATE_PF
Definition: Job.h:56
@ jtPUBOLDLEDGER
Definition: Job.h:44
@ jtADVANCE
Definition: Job.h:67
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:223
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, const char *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition: View.cpp:597
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
Definition: contract.cpp:48
bool pendSaveValidated(Application &app, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger Returns false on error.
Definition: Ledger.cpp:1002
STL namespace.
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)