rippled
Loading...
Searching...
No Matches
InboundLedger.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/ledger/AccountStateSF.h>
21#include <xrpld/app/ledger/InboundLedger.h>
22#include <xrpld/app/ledger/InboundLedgers.h>
23#include <xrpld/app/ledger/LedgerMaster.h>
24#include <xrpld/app/ledger/TransactionStateSF.h>
25#include <xrpld/app/main/Application.h>
26#include <xrpld/core/JobQueue.h>
27#include <xrpld/overlay/Overlay.h>
28#include <xrpld/shamap/SHAMapNodeID.h>
29#include <xrpl/basics/Log.h>
30#include <xrpl/protocol/HashPrefix.h>
31#include <xrpl/protocol/jss.h>
32#include <xrpl/resource/Fees.h>
33
34#include <boost/iterator/function_output_iterator.hpp>
35
36#include <algorithm>
37#include <random>
38
39namespace ripple {
40
41using namespace std::chrono_literals;
42
43enum {
44 // Number of peers to start with
46
47 // Number of peers to add on a timeout
48 ,
49 peerCountAdd = 3
50
51 // how many timeouts before we give up
52 ,
54
55 // how many timeouts before we get aggressive
56 ,
58
59 // Number of nodes to find initially
60 ,
62
63 // Number of nodes to request for a reply
64 ,
65 reqNodesReply = 128
66
67 // Number of nodes to request blindly
68 ,
69 reqNodes = 12
70};
71
72// millisecond for each ledger timeout
73auto constexpr ledgerAcquireTimeout = 3000ms;
74
76 Application& app,
77 uint256 const& hash,
78 std::uint32_t seq,
79 Reason reason,
80 clock_type& clock,
83 app,
84 hash,
86 {jtLEDGER_DATA, "InboundLedger", 5},
87 app.journal("InboundLedger"))
88 , m_clock(clock)
89 , mHaveHeader(false)
90 , mHaveState(false)
91 , mHaveTransactions(false)
92 , mSignaled(false)
93 , mByHash(true)
94 , mSeq(seq)
95 , mReason(reason)
96 , mReceiveDispatched(false)
97 , mPeerSet(std::move(peerSet))
98{
99 JLOG(journal_.trace()) << "Acquiring ledger " << hash_;
100 touch();
101}
102
103void
105{
107 collectionLock.unlock();
108
110 if (failed_)
111 return;
112
113 if (!complete_)
114 {
115 addPeers();
116 queueJob(sl);
117 return;
118 }
119
120 JLOG(journal_.debug()) << "Acquiring ledger we already have in "
121 << " local store. " << hash_;
122 XRPL_ASSERT(
123 mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES ||
124 mLedger->read(keylet::fees()),
125 "ripple::InboundLedger::init : valid ledger fees");
126 mLedger->setImmutable();
127
129 return;
130
132
133 // Check if this could be a newer fully-validated ledger
136}
137
140{
141 auto const& peerIds = mPeerSet->getPeerIds();
142 return std::count_if(peerIds.begin(), peerIds.end(), [this](auto id) {
143 return (app_.overlay().findPeerByShortID(id) != nullptr);
144 });
145}
146
147void
149{
151
152 // If we didn't know the sequence number, but now do, save it
153 if ((seq != 0) && (mSeq == 0))
154 mSeq = seq;
155
156 // Prevent this from being swept
157 touch();
158}
159
160bool
162{
164 if (!isDone())
165 {
166 if (mLedger)
167 tryDB(mLedger->stateMap().family().db());
168 else
170 if (failed_ || complete_)
171 {
172 done();
173 return true;
174 }
175 }
176 return false;
177}
178
180{
181 // Save any received AS data not processed. It could be useful
182 // for populating a different ledger
183 for (auto& entry : mReceivedData)
184 {
185 if (entry.second->type() == protocol::liAS_NODE)
186 app_.getInboundLedgers().gotStaleData(entry.second);
187 }
188 if (!isDone())
189 {
190 JLOG(journal_.debug())
191 << "Acquire " << hash_ << " abort "
192 << ((timeouts_ == 0) ? std::string()
193 : (std::string("timeouts:") +
195 << mStats.get();
196 }
197}
198
201 uint256 const& root,
202 SHAMap& map,
203 int max,
204 SHAMapSyncFilter* filter)
205{
207
208 if (!root.isZero())
209 {
210 if (map.getHash().isZero())
211 ret.push_back(root);
212 else
213 {
214 auto mn = map.getMissingNodes(max, filter);
215 ret.reserve(mn.size());
216 for (auto const& n : mn)
217 ret.push_back(n.second);
218 }
219 }
220
221 return ret;
222}
223
226{
227 return neededHashes(mLedger->info().txHash, mLedger->txMap(), max, filter);
228}
229
232{
233 return neededHashes(
234 mLedger->info().accountHash, mLedger->stateMap(), max, filter);
235}
236
237// See how much of the ledger data is stored locally
238// Data found in a fetch pack will be stored
239void
241{
242 if (!mHaveHeader)
243 {
244 auto makeLedger = [&, this](Blob const& data) {
245 JLOG(journal_.trace()) << "Ledger header found in fetch pack";
246 mLedger = std::make_shared<Ledger>(
248 app_.config(),
250 if (mLedger->info().hash != hash_ ||
251 (mSeq != 0 && mSeq != mLedger->info().seq))
252 {
253 // We know for a fact the ledger can never be acquired
254 JLOG(journal_.warn())
255 << "hash " << hash_ << " seq " << std::to_string(mSeq)
256 << " cannot be a ledger";
257 mLedger.reset();
258 failed_ = true;
259 }
260 };
261
262 // Try to fetch the ledger header from the DB
263 if (auto nodeObject = srcDB.fetchNodeObject(hash_, mSeq))
264 {
265 JLOG(journal_.trace()) << "Ledger header found in local store";
266
267 makeLedger(nodeObject->getData());
268 if (failed_)
269 return;
270
271 // Store the ledger header if the source and destination differ
272 auto& dstDB{mLedger->stateMap().family().db()};
273 if (std::addressof(dstDB) != std::addressof(srcDB))
274 {
275 Blob blob{nodeObject->getData()};
276 dstDB.store(
277 hotLEDGER, std::move(blob), hash_, mLedger->info().seq);
278 }
279 }
280 else
281 {
282 // Try to fetch the ledger header from a fetch pack
283 auto data = app_.getLedgerMaster().getFetchPack(hash_);
284 if (!data)
285 return;
286
287 JLOG(journal_.trace()) << "Ledger header found in fetch pack";
288
289 makeLedger(*data);
290 if (failed_)
291 return;
292
293 // Store the ledger header in the ledger's database
294 mLedger->stateMap().family().db().store(
295 hotLEDGER, std::move(*data), hash_, mLedger->info().seq);
296 }
297
298 if (mSeq == 0)
299 mSeq = mLedger->info().seq;
300 mLedger->stateMap().setLedgerSeq(mSeq);
301 mLedger->txMap().setLedgerSeq(mSeq);
302 mHaveHeader = true;
303 }
304
306 {
307 if (mLedger->info().txHash.isZero())
308 {
309 JLOG(journal_.trace()) << "No TXNs to fetch";
310 mHaveTransactions = true;
311 }
312 else
313 {
314 TransactionStateSF filter(
315 mLedger->txMap().family().db(), app_.getLedgerMaster());
316 if (mLedger->txMap().fetchRoot(
317 SHAMapHash{mLedger->info().txHash}, &filter))
318 {
319 if (neededTxHashes(1, &filter).empty())
320 {
321 JLOG(journal_.trace()) << "Had full txn map locally";
322 mHaveTransactions = true;
323 }
324 }
325 }
326 }
327
328 if (!mHaveState)
329 {
330 if (mLedger->info().accountHash.isZero())
331 {
332 JLOG(journal_.fatal())
333 << "We are acquiring a ledger with a zero account hash";
334 failed_ = true;
335 return;
336 }
337 AccountStateSF filter(
338 mLedger->stateMap().family().db(), app_.getLedgerMaster());
339 if (mLedger->stateMap().fetchRoot(
340 SHAMapHash{mLedger->info().accountHash}, &filter))
341 {
342 if (neededStateHashes(1, &filter).empty())
343 {
344 JLOG(journal_.trace()) << "Had full AS map locally";
345 mHaveState = true;
346 }
347 }
348 }
349
351 {
352 JLOG(journal_.debug()) << "Had everything locally";
353 complete_ = true;
354 XRPL_ASSERT(
355 mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES ||
356 mLedger->read(keylet::fees()),
357 "ripple::InboundLedger::tryDB : valid ledger fees");
358 mLedger->setImmutable();
359 }
360}
361
364void
366{
367 mRecentNodes.clear();
368
369 if (isDone())
370 {
371 JLOG(journal_.info()) << "Already done " << hash_;
372 return;
373 }
374
376 {
377 if (mSeq != 0)
378 {
379 JLOG(journal_.warn())
380 << timeouts_ << " timeouts for ledger " << mSeq;
381 }
382 else
383 {
384 JLOG(journal_.warn())
385 << timeouts_ << " timeouts for ledger " << hash_;
386 }
387 failed_ = true;
388 done();
389 return;
390 }
391
392 if (!wasProgress)
393 {
394 checkLocal();
395
396 mByHash = true;
397
399 JLOG(journal_.debug())
400 << "No progress(" << pc << ") for ledger " << hash_;
401
402 // addPeers triggers if the reason is not HISTORY
403 // So if the reason IS HISTORY, need to trigger after we add
404 // otherwise, we need to trigger before we add
405 // so each peer gets triggered once
408 addPeers();
411 }
412}
413
415void
417{
418 mPeerSet->addPeers(
420 [this](auto peer) { return peer->hasLedger(hash_, mSeq); },
421 [this](auto peer) {
422 // For historical nodes, do not trigger too soon
423 // since a fetch pack is probably coming
426 });
427}
428
431{
432 return shared_from_this();
433}
434
435void
437{
438 if (mSignaled)
439 return;
440
441 mSignaled = true;
442 touch();
443
444 JLOG(journal_.debug()) << "Acquire " << hash_ << (failed_ ? " fail " : " ")
445 << ((timeouts_ == 0)
446 ? std::string()
447 : (std::string("timeouts:") +
449 << mStats.get();
450
451 XRPL_ASSERT(
453 "ripple::InboundLedger::done : complete or failed");
454
455 if (complete_ && !failed_ && mLedger)
456 {
457 XRPL_ASSERT(
458 mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES ||
459 mLedger->read(keylet::fees()),
460 "ripple::InboundLedger::done : valid ledger fees");
461 mLedger->setImmutable();
462 switch (mReason)
463 {
464 case Reason::HISTORY:
466 break;
467 default:
469 break;
470 }
471 }
472
473 // We hold the PeerSet lock, so must dispatch
475 jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() {
476 if (self->complete_ && !self->failed_)
477 {
478 self->app_.getLedgerMaster().checkAccept(self->getLedger());
479 self->app_.getLedgerMaster().tryAdvance();
480 }
481 else
482 self->app_.getInboundLedgers().logFailure(
483 self->hash_, self->mSeq);
484 });
485}
486
489void
491{
493
494 if (isDone())
495 {
496 JLOG(journal_.debug())
497 << "Trigger on ledger: " << hash_ << (complete_ ? " completed" : "")
498 << (failed_ ? " failed" : "");
499 return;
500 }
501
502 if (auto stream = journal_.debug())
503 {
504 stream << "Trigger acquiring ledger " << hash_;
505 if (peer)
506 stream << " from " << peer;
507
508 if (complete_ || failed_)
509 stream << "complete=" << complete_ << " failed=" << failed_;
510 else
511 stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions
512 << " as=" << mHaveState;
513 }
514
515 if (!mHaveHeader)
516 {
518 if (failed_)
519 {
520 JLOG(journal_.warn()) << " failed local for " << hash_;
521 return;
522 }
523 }
524
525 protocol::TMGetLedger tmGL;
526 tmGL.set_ledgerhash(hash_.begin(), hash_.size());
527
528 if (timeouts_ != 0)
529 {
530 // Be more aggressive if we've timed out at least once
531 tmGL.set_querytype(protocol::qtINDIRECT);
532
533 if (!progress_ && !failed_ && mByHash &&
535 {
536 auto need = getNeededHashes();
537
538 if (!need.empty())
539 {
540 protocol::TMGetObjectByHash tmBH;
541 bool typeSet = false;
542 tmBH.set_query(true);
543 tmBH.set_ledgerhash(hash_.begin(), hash_.size());
544 for (auto const& p : need)
545 {
546 JLOG(journal_.debug()) << "Want: " << p.second;
547
548 if (!typeSet)
549 {
550 tmBH.set_type(p.first);
551 typeSet = true;
552 }
553
554 if (p.first == tmBH.type())
555 {
556 protocol::TMIndexedObject* io = tmBH.add_objects();
557 io->set_hash(p.second.begin(), p.second.size());
558 if (mSeq != 0)
559 io->set_ledgerseq(mSeq);
560 }
561 }
562
563 auto packet =
564 std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
565 auto const& peerIds = mPeerSet->getPeerIds();
567 peerIds.begin(), peerIds.end(), [this, &packet](auto id) {
568 if (auto p = app_.overlay().findPeerByShortID(id))
569 {
570 mByHash = false;
571 p->send(packet);
572 }
573 });
574 }
575 else
576 {
577 JLOG(journal_.info())
578 << "getNeededHashes says acquire is complete";
579 mHaveHeader = true;
580 mHaveTransactions = true;
581 mHaveState = true;
582 complete_ = true;
583 }
584 }
585 }
586
587 // We can't do much without the header data because we don't know the
588 // state or transaction root hashes.
589 if (!mHaveHeader && !failed_)
590 {
591 tmGL.set_itype(protocol::liBASE);
592 if (mSeq != 0)
593 tmGL.set_ledgerseq(mSeq);
594 JLOG(journal_.trace()) << "Sending header request to "
595 << (peer ? "selected peer" : "all peers");
596 mPeerSet->sendRequest(tmGL, peer);
597 return;
598 }
599
600 if (mLedger)
601 tmGL.set_ledgerseq(mLedger->info().seq);
602
603 if (reason != TriggerReason::reply)
604 {
605 // If we're querying blind, don't query deep
606 tmGL.set_querydepth(0);
607 }
608 else if (peer && peer->isHighLatency())
609 {
610 // If the peer has high latency, query extra deep
611 tmGL.set_querydepth(2);
612 }
613 else
614 tmGL.set_querydepth(1);
615
616 // Get the state data first because it's the most likely to be useful
617 // if we wind up abandoning this fetch.
618 if (mHaveHeader && !mHaveState && !failed_)
619 {
620 XRPL_ASSERT(
621 mLedger,
622 "ripple::InboundLedger::trigger : non-null ledger to read state "
623 "from");
624
625 if (!mLedger->stateMap().isValid())
626 {
627 failed_ = true;
628 }
629 else if (mLedger->stateMap().getHash().isZero())
630 {
631 // we need the root node
632 tmGL.set_itype(protocol::liAS_NODE);
633 *tmGL.add_nodeids() = SHAMapNodeID().getRawString();
634 JLOG(journal_.trace()) << "Sending AS root request to "
635 << (peer ? "selected peer" : "all peers");
636 mPeerSet->sendRequest(tmGL, peer);
637 return;
638 }
639 else
640 {
641 AccountStateSF filter(
642 mLedger->stateMap().family().db(), app_.getLedgerMaster());
643
644 // Release the lock while we process the large state map
645 sl.unlock();
646 auto nodes =
647 mLedger->stateMap().getMissingNodes(missingNodesFind, &filter);
648 sl.lock();
649
650 // Make sure nothing happened while we released the lock
651 if (!failed_ && !complete_ && !mHaveState)
652 {
653 if (nodes.empty())
654 {
655 if (!mLedger->stateMap().isValid())
656 failed_ = true;
657 else
658 {
659 mHaveState = true;
660
661 if (mHaveTransactions)
662 complete_ = true;
663 }
664 }
665 else
666 {
667 filterNodes(nodes, reason);
668
669 if (!nodes.empty())
670 {
671 tmGL.set_itype(protocol::liAS_NODE);
672 for (auto const& id : nodes)
673 {
674 *(tmGL.add_nodeids()) = id.first.getRawString();
675 }
676
677 JLOG(journal_.trace())
678 << "Sending AS node request (" << nodes.size()
679 << ") to "
680 << (peer ? "selected peer" : "all peers");
681 mPeerSet->sendRequest(tmGL, peer);
682 return;
683 }
684 else
685 {
686 JLOG(journal_.trace()) << "All AS nodes filtered";
687 }
688 }
689 }
690 }
691 }
692
693 if (mHaveHeader && !mHaveTransactions && !failed_)
694 {
695 XRPL_ASSERT(
696 mLedger,
697 "ripple::InboundLedger::trigger : non-null ledger to read "
698 "transactions from");
699
700 if (!mLedger->txMap().isValid())
701 {
702 failed_ = true;
703 }
704 else if (mLedger->txMap().getHash().isZero())
705 {
706 // we need the root node
707 tmGL.set_itype(protocol::liTX_NODE);
708 *(tmGL.add_nodeids()) = SHAMapNodeID().getRawString();
709 JLOG(journal_.trace()) << "Sending TX root request to "
710 << (peer ? "selected peer" : "all peers");
711 mPeerSet->sendRequest(tmGL, peer);
712 return;
713 }
714 else
715 {
716 TransactionStateSF filter(
717 mLedger->txMap().family().db(), app_.getLedgerMaster());
718
719 auto nodes =
720 mLedger->txMap().getMissingNodes(missingNodesFind, &filter);
721
722 if (nodes.empty())
723 {
724 if (!mLedger->txMap().isValid())
725 failed_ = true;
726 else
727 {
728 mHaveTransactions = true;
729
730 if (mHaveState)
731 complete_ = true;
732 }
733 }
734 else
735 {
736 filterNodes(nodes, reason);
737
738 if (!nodes.empty())
739 {
740 tmGL.set_itype(protocol::liTX_NODE);
741 for (auto const& n : nodes)
742 {
743 *(tmGL.add_nodeids()) = n.first.getRawString();
744 }
745 JLOG(journal_.trace())
746 << "Sending TX node request (" << nodes.size()
747 << ") to " << (peer ? "selected peer" : "all peers");
748 mPeerSet->sendRequest(tmGL, peer);
749 return;
750 }
751 else
752 {
753 JLOG(journal_.trace()) << "All TX nodes filtered";
754 }
755 }
756 }
757 }
758
759 if (complete_ || failed_)
760 {
761 JLOG(journal_.debug())
762 << "Done:" << (complete_ ? " complete" : "")
763 << (failed_ ? " failed " : " ") << mLedger->info().seq;
764 sl.unlock();
765 done();
766 }
767}
768
769void
770InboundLedger::filterNodes(
772 TriggerReason reason)
773{
774 // Sort nodes so that the ones we haven't recently
775 // requested come before the ones we have.
776 auto dup = std::stable_partition(
777 nodes.begin(), nodes.end(), [this](auto const& item) {
778 return mRecentNodes.count(item.second) == 0;
779 });
780
781 // If everything is a duplicate we don't want to send
782 // any query at all except on a timeout where we need
783 // to query everyone:
784 if (dup == nodes.begin())
785 {
786 JLOG(journal_.trace()) << "filterNodes: all duplicates";
787
788 if (reason != TriggerReason::timeout)
789 {
790 nodes.clear();
791 return;
792 }
793 }
794 else
795 {
796 JLOG(journal_.trace()) << "filterNodes: pruning duplicates";
797
798 nodes.erase(dup, nodes.end());
799 }
800
801 std::size_t const limit =
802 (reason == TriggerReason::reply) ? reqNodesReply : reqNodes;
803
804 if (nodes.size() > limit)
805 nodes.resize(limit);
806
807 for (auto const& n : nodes)
808 mRecentNodes.insert(n.second);
809}
810
814// data must not have hash prefix
815bool
816InboundLedger::takeHeader(std::string const& data)
817{
818 // Return value: true=normal, false=bad data
819 JLOG(journal_.trace()) << "got header acquiring ledger " << hash_;
820
821 if (complete_ || failed_ || mHaveHeader)
822 return true;
823
824 auto* f = &app_.getNodeFamily();
825 mLedger = std::make_shared<Ledger>(
826 deserializeHeader(makeSlice(data)), app_.config(), *f);
827 if (mLedger->info().hash != hash_ ||
828 (mSeq != 0 && mSeq != mLedger->info().seq))
829 {
830 JLOG(journal_.warn())
831 << "Acquire hash mismatch: " << mLedger->info().hash
832 << "!=" << hash_;
833 mLedger.reset();
834 return false;
835 }
836 if (mSeq == 0)
837 mSeq = mLedger->info().seq;
838 mLedger->stateMap().setLedgerSeq(mSeq);
839 mLedger->txMap().setLedgerSeq(mSeq);
840 mHaveHeader = true;
841
842 Serializer s(data.size() + 4);
843 s.add32(HashPrefix::ledgerMaster);
844 s.addRaw(data.data(), data.size());
845 f->db().store(hotLEDGER, std::move(s.modData()), hash_, mSeq);
846
847 if (mLedger->info().txHash.isZero())
848 mHaveTransactions = true;
849
850 if (mLedger->info().accountHash.isZero())
851 mHaveState = true;
852
853 mLedger->txMap().setSynching();
854 mLedger->stateMap().setSynching();
855
856 return true;
857}
858
862void
863InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
864{
865 if (!mHaveHeader)
866 {
867 JLOG(journal_.warn()) << "Missing ledger header";
868 san.incInvalid();
869 return;
870 }
871 if (packet.type() == protocol::liTX_NODE)
872 {
873 if (mHaveTransactions || failed_)
874 {
875 san.incDuplicate();
876 return;
877 }
878 }
879 else if (mHaveState || failed_)
880 {
881 san.incDuplicate();
882 return;
883 }
884
885 auto [map, rootHash, filter] = [&]()
887 if (packet.type() == protocol::liTX_NODE)
888 return {
889 mLedger->txMap(),
890 SHAMapHash{mLedger->info().txHash},
891 std::make_unique<TransactionStateSF>(
892 mLedger->txMap().family().db(), app_.getLedgerMaster())};
893 return {
894 mLedger->stateMap(),
895 SHAMapHash{mLedger->info().accountHash},
896 std::make_unique<AccountStateSF>(
897 mLedger->stateMap().family().db(), app_.getLedgerMaster())};
898 }();
899
900 try
901 {
902 auto const f = filter.get();
903
904 for (auto const& node : packet.nodes())
905 {
906 auto const nodeID = deserializeSHAMapNodeID(node.nodeid());
907
908 if (!nodeID)
909 throw std::runtime_error("data does not properly deserialize");
910
911 if (nodeID->isRoot())
912 {
913 san += map.addRootNode(rootHash, makeSlice(node.nodedata()), f);
914 }
915 else
916 {
917 san += map.addKnownNode(*nodeID, makeSlice(node.nodedata()), f);
918 }
919
920 if (!san.isGood())
921 {
922 JLOG(journal_.warn()) << "Received bad node data";
923 return;
924 }
925 }
926 }
927 catch (std::exception const& e)
928 {
929 JLOG(journal_.error()) << "Received bad node data: " << e.what();
930 san.incInvalid();
931 return;
932 }
933
934 if (!map.isSynching())
935 {
936 if (packet.type() == protocol::liTX_NODE)
937 mHaveTransactions = true;
938 else
939 mHaveState = true;
940
941 if (mHaveTransactions && mHaveState)
942 {
943 complete_ = true;
944 done();
945 }
946 }
947}
948
952bool
953InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
954{
955 if (failed_ || mHaveState)
956 {
957 san.incDuplicate();
958 return true;
959 }
960
961 if (!mHaveHeader)
962 {
963 UNREACHABLE("ripple::InboundLedger::takeAsRootNode : no ledger header");
964 return false;
965 }
966
967 AccountStateSF filter(
968 mLedger->stateMap().family().db(), app_.getLedgerMaster());
969 san += mLedger->stateMap().addRootNode(
970 SHAMapHash{mLedger->info().accountHash}, data, &filter);
971 return san.isGood();
972}
973
977bool
978InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san)
979{
980 if (failed_ || mHaveTransactions)
981 {
982 san.incDuplicate();
983 return true;
984 }
985
986 if (!mHaveHeader)
987 {
988 UNREACHABLE("ripple::InboundLedger::takeTxRootNode : no ledger header");
989 return false;
990 }
991
992 TransactionStateSF filter(
993 mLedger->txMap().family().db(), app_.getLedgerMaster());
994 san += mLedger->txMap().addRootNode(
995 SHAMapHash{mLedger->info().txHash}, data, &filter);
996 return san.isGood();
997}
998
1000InboundLedger::getNeededHashes()
1001{
1003
1004 if (!mHaveHeader)
1005 {
1006 ret.push_back(
1007 std::make_pair(protocol::TMGetObjectByHash::otLEDGER, hash_));
1008 return ret;
1009 }
1010
1011 if (!mHaveState)
1012 {
1013 AccountStateSF filter(
1014 mLedger->stateMap().family().db(), app_.getLedgerMaster());
1015 for (auto const& h : neededStateHashes(4, &filter))
1016 {
1017 ret.push_back(
1018 std::make_pair(protocol::TMGetObjectByHash::otSTATE_NODE, h));
1019 }
1020 }
1021
1022 if (!mHaveTransactions)
1023 {
1024 TransactionStateSF filter(
1025 mLedger->txMap().family().db(), app_.getLedgerMaster());
1026 for (auto const& h : neededTxHashes(4, &filter))
1027 {
1029 protocol::TMGetObjectByHash::otTRANSACTION_NODE, h));
1030 }
1031 }
1032
1033 return ret;
1034}
1035
1039bool
1040InboundLedger::gotData(
1043{
1044 std::lock_guard sl(mReceivedDataLock);
1045
1046 if (isDone())
1047 return false;
1048
1049 mReceivedData.emplace_back(peer, data);
1050
1051 if (mReceiveDispatched)
1052 return false;
1053
1054 mReceiveDispatched = true;
1055 return true;
1056}
1057
1061// VFALCO NOTE, it is not necessary to pass the entire Peer,
1062// we can get away with just a Resource::Consumer endpoint.
1063//
1064// TODO Change peer to Consumer
1065//
1066int
1067InboundLedger::processData(
1069 protocol::TMLedgerData& packet)
1070{
1071 if (packet.type() == protocol::liBASE)
1072 {
1073 if (packet.nodes().empty())
1074 {
1075 JLOG(journal_.warn()) << peer->id() << ": empty header data";
1076 peer->charge(
1077 Resource::feeMalformedRequest, "ledger_data empty header");
1078 return -1;
1079 }
1080
1081 SHAMapAddNode san;
1082
1083 ScopedLockType sl(mtx_);
1084
1085 try
1086 {
1087 if (!mHaveHeader)
1088 {
1089 if (!takeHeader(packet.nodes(0).nodedata()))
1090 {
1091 JLOG(journal_.warn()) << "Got invalid header data";
1092 peer->charge(
1093 Resource::feeMalformedRequest,
1094 "ledger_data invalid header");
1095 return -1;
1096 }
1097
1098 san.incUseful();
1099 }
1100
1101 if (!mHaveState && (packet.nodes().size() > 1) &&
1102 !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
1103 {
1104 JLOG(journal_.warn()) << "Included AS root invalid";
1105 }
1106
1107 if (!mHaveTransactions && (packet.nodes().size() > 2) &&
1108 !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
1109 {
1110 JLOG(journal_.warn()) << "Included TX root invalid";
1111 }
1112 }
1113 catch (std::exception const& ex)
1114 {
1115 JLOG(journal_.warn())
1116 << "Included AS/TX root invalid: " << ex.what();
1117 using namespace std::string_literals;
1118 peer->charge(Resource::feeInvalidData, "ledger_data "s + ex.what());
1119 return -1;
1120 }
1121
1122 if (san.isUseful())
1123 progress_ = true;
1124
1125 mStats += san;
1126 return san.getGood();
1127 }
1128
1129 if ((packet.type() == protocol::liTX_NODE) ||
1130 (packet.type() == protocol::liAS_NODE))
1131 {
1132 std::string type = packet.type() == protocol::liTX_NODE ? "liTX_NODE: "
1133 : "liAS_NODE: ";
1134
1135 if (packet.nodes().empty())
1136 {
1137 JLOG(journal_.info()) << peer->id() << ": response with no nodes";
1138 peer->charge(Resource::feeMalformedRequest, "ledger_data no nodes");
1139 return -1;
1140 }
1141
1142 ScopedLockType sl(mtx_);
1143
1144 // Verify node IDs and data are complete
1145 for (auto const& node : packet.nodes())
1146 {
1147 if (!node.has_nodeid() || !node.has_nodedata())
1148 {
1149 JLOG(journal_.warn()) << "Got bad node";
1150 peer->charge(
1151 Resource::feeMalformedRequest, "ledger_data bad node");
1152 return -1;
1153 }
1154 }
1155
1156 SHAMapAddNode san;
1157 receiveNode(packet, san);
1158
1159 JLOG(journal_.debug())
1160 << "Ledger "
1161 << ((packet.type() == protocol::liTX_NODE) ? "TX" : "AS")
1162 << " node stats: " << san.get();
1163
1164 if (san.isUseful())
1165 progress_ = true;
1166
1167 mStats += san;
1168 return san.getGood();
1169 }
1170
1171 return -1;
1172}
1173
1174namespace detail {
1175// Track the amount of useful data that each peer returns
1177{
1178 // Map from peer to amount of useful the peer returned
1180 // The largest amount of useful data that any peer returned
1181 int maxCount = 0;
1182
1183 // Update the data count for a peer
1184 void
1185 update(std::shared_ptr<Peer>&& peer, int dataCount)
1186 {
1187 if (dataCount <= 0)
1188 return;
1189 maxCount = std::max(maxCount, dataCount);
1190 auto i = counts.find(peer);
1191 if (i == counts.end())
1192 {
1193 counts.emplace(std::move(peer), dataCount);
1194 return;
1195 }
1196 i->second = std::max(i->second, dataCount);
1197 }
1198
1199 // Prune all the peers that didn't return enough data.
1200 void
1202 {
1203 // Remove all the peers that didn't return at least half as much data as
1204 // the best peer
1205 auto const thresh = maxCount / 2;
1206 auto i = counts.begin();
1207 while (i != counts.end())
1208 {
1209 if (i->second < thresh)
1210 i = counts.erase(i);
1211 else
1212 ++i;
1213 }
1214 }
1215
1216 // call F with the `peer` parameter with a random sample of at most n values
1217 // of the counts vector.
1218 template <class F>
1219 void
1221 {
1222 if (counts.empty())
1223 return;
1224
1225 auto outFunc = [&f](auto&& v) { f(v.first); };
1227#if _MSC_VER
1229 s.reserve(n);
1231 counts.begin(), counts.end(), std::back_inserter(s), n, rng);
1232 for (auto& v : s)
1233 {
1234 outFunc(v);
1235 }
1236#else
1238 counts.begin(),
1239 counts.end(),
1240 boost::make_function_output_iterator(outFunc),
1241 n,
1242 rng);
1243#endif
1244 }
1245};
1246} // namespace detail
1247
1251void
1252InboundLedger::runData()
1253{
1254 // Maximum number of peers to request data from
1255 constexpr std::size_t maxUsefulPeers = 6;
1256
1257 decltype(mReceivedData) data;
1258
1259 // Reserve some memory so the first couple iterations don't reallocate
1260 data.reserve(8);
1261
1262 detail::PeerDataCounts dataCounts;
1263
1264 for (;;)
1265 {
1266 data.clear();
1267
1268 {
1269 std::lock_guard sl(mReceivedDataLock);
1270
1271 if (mReceivedData.empty())
1272 {
1273 mReceiveDispatched = false;
1274 break;
1275 }
1276
1277 data.swap(mReceivedData);
1278 }
1279
1280 for (auto& entry : data)
1281 {
1282 if (auto peer = entry.first.lock())
1283 {
1284 int count = processData(peer, *(entry.second));
1285 dataCounts.update(std::move(peer), count);
1286 }
1287 }
1288 }
1289
1290 // Select a random sample of the peers that gives us the most nodes that are
1291 // useful
1292 dataCounts.prune();
1293 dataCounts.sampleN(maxUsefulPeers, [&](std::shared_ptr<Peer> const& peer) {
1294 trigger(peer, TriggerReason::reply);
1295 });
1296}
1297
1299InboundLedger::getJson(int)
1300{
1302
1303 ScopedLockType sl(mtx_);
1304
1305 ret[jss::hash] = to_string(hash_);
1306
1307 if (complete_)
1308 ret[jss::complete] = true;
1309
1310 if (failed_)
1311 ret[jss::failed] = true;
1312
1313 if (!complete_ && !failed_)
1314 ret[jss::peers] = static_cast<int>(mPeerSet->getPeerIds().size());
1315
1316 ret[jss::have_header] = mHaveHeader;
1317
1318 if (mHaveHeader)
1319 {
1320 ret[jss::have_state] = mHaveState;
1321 ret[jss::have_transactions] = mHaveTransactions;
1322 }
1323
1324 ret[jss::timeouts] = timeouts_;
1325
1326 if (mHaveHeader && !mHaveState)
1327 {
1329 for (auto const& h : neededStateHashes(16, nullptr))
1330 {
1331 hv.append(to_string(h));
1332 }
1333 ret[jss::needed_state_hashes] = hv;
1334 }
1335
1336 if (mHaveHeader && !mHaveTransactions)
1337 {
1339 for (auto const& h : neededTxHashes(16, nullptr))
1340 {
1341 hv.append(to_string(h));
1342 }
1343 ret[jss::needed_transaction_hashes] = hv;
1344 }
1345
1346 return ret;
1347}
1348
1349} // namespace ripple
T addressof(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition: json_value.h:148
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:897
Stream fatal() const
Definition: Journal.h:352
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
virtual Config & config()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual Family & getNodeFamily()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual NodeStore::Database & db()=0
std::size_t getPeerCount() const
void trigger(std::shared_ptr< Peer > const &, TriggerReason)
Request more nodes, perhaps from a specific peer.
void init(ScopedLockType &collectionLock)
std::set< uint256 > mRecentNodes
void addPeers()
Add more peers to the set, if possible.
std::shared_ptr< Ledger > mLedger
std::vector< uint256 > neededTxHashes(int max, SHAMapSyncFilter *filter) const
InboundLedger(Application &app, uint256 const &hash, std::uint32_t seq, Reason reason, clock_type &, std::unique_ptr< PeerSet > peerSet)
SHAMapAddNode mStats
void tryDB(NodeStore::Database &srcDB)
void onTimer(bool progress, ScopedLockType &peerSetLock) override
Called with a lock by the PeerSet when the timer expires.
std::vector< uint256 > neededStateHashes(int max, SHAMapSyncFilter *filter) const
std::weak_ptr< TimeoutCounter > pmDowncast() override
Return a weak pointer to this.
std::vector< std::pair< std::weak_ptr< Peer >, std::shared_ptr< protocol::TMLedgerData > > > mReceivedData
std::vector< neededHash_t > getNeededHashes()
void update(std::uint32_t seq)
std::unique_ptr< PeerSet > mPeerSet
virtual void gotStaleData(std::shared_ptr< protocol::TMLedgerData > packet)=0
virtual void onLedgerFetched()=0
Called when a complete ledger is obtained.
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:165
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
bool storeLedger(std::shared_ptr< Ledger const > ledger)
Persistency layer for NodeObject.
Definition: Database.h:50
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition: Database.cpp:240
std::string get() const
bool isZero() const
Definition: SHAMapHash.h:54
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:94
SHAMapHash getHash() const
Definition: SHAMap.cpp:888
std::vector< std::pair< SHAMapNodeID, uint256 > > getMissingNodes(int maxNodes, SHAMapSyncFilter *filter)
Check for nodes in the SHAMap not available.
Definition: SHAMapSync.cpp:315
int addRaw(Blob const &vector)
Definition: Serializer.cpp:88
An immutable linear range of bytes.
Definition: Slice.h:46
This class is an "active" object.
void queueJob(ScopedLockType &)
Queue a job to call invokeOnTimer().
bool progress_
Whether forward progress has been made.
beast::Journal journal_
uint256 const hash_
The hash of the object (in practice, always a ledger) we are trying to fetch.
std::recursive_mutex mtx_
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
T count_if(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T for_each(T... args)
T make_pair(T... args)
T max(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:43
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:44
Keylet const & fees() noexcept
The (fixed) index of the object containing the ledger fees.
Definition: Indexes.cpp:213
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
LedgerHeader deserializePrefixedHeader(Slice data, bool hasHash=false)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
@ peerCountStart
@ ledgerBecomeAggressiveThreshold
@ ledgerTimeoutRetriesMax
@ missingNodesFind
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
auto constexpr ledgerAcquireTimeout
@ hotLEDGER
Definition: NodeObject.h:34
static constexpr std::uint32_t XRP_LEDGER_EARLIEST_FEES
The XRP Ledger mainnet's earliest ledger with a FeeSettings object.
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:244
Number root(Number f, unsigned d)
Definition: Number.cpp:635
@ jtLEDGER_DATA
Definition: Job.h:65
LedgerHeader deserializeHeader(Slice data, bool hasHash=false)
Deserialize a ledger header from a byte array.
static std::vector< uint256 > neededHashes(uint256 const &root, SHAMap &map, int max, SHAMapSyncFilter *filter)
T push_back(T... args)
T reserve(T... args)
T reset(T... args)
T sample(T... args)
T stable_partition(T... args)
std::unordered_map< std::shared_ptr< Peer >, int > counts
void sampleN(std::size_t n, F &&f)
void update(std::shared_ptr< Peer > &&peer, int dataCount)
T to_string(T... args)
T unlock(T... args)
T what(T... args)