mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-19 01:55:48 +00:00
Refactor of InboundTransactions et al:
This refactor was primarily aimed at reducing the size of objects derived from TimeoutCounter, by improving packing of structures. Other potential improvements also surfaced during this process and where implemented.
This commit is contained in:
@@ -61,24 +61,10 @@ public:
|
||||
void
|
||||
update(std::uint32_t seq);
|
||||
|
||||
/** Returns true if we got all the data. */
|
||||
bool
|
||||
isComplete() const
|
||||
{
|
||||
return complete_;
|
||||
}
|
||||
|
||||
/** Returns false if we failed to get the data. */
|
||||
bool
|
||||
isFailed() const
|
||||
{
|
||||
return failed_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger const>
|
||||
getLedger() const
|
||||
{
|
||||
return mLedger;
|
||||
return ledger_;
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
@@ -175,14 +161,9 @@ private:
|
||||
clock_type& m_clock;
|
||||
clock_type::time_point mLastAction;
|
||||
|
||||
std::shared_ptr<Ledger> mLedger;
|
||||
bool mHaveHeader;
|
||||
bool mHaveState;
|
||||
bool mHaveTransactions;
|
||||
bool mSignaled;
|
||||
bool mByHash;
|
||||
std::shared_ptr<Ledger> ledger_;
|
||||
std::uint32_t mSeq;
|
||||
Reason const mReason;
|
||||
Reason const reason_;
|
||||
|
||||
std::set<uint256> mRecentNodes;
|
||||
|
||||
@@ -193,7 +174,6 @@ private:
|
||||
std::vector<
|
||||
std::pair<std::weak_ptr<Peer>, std::shared_ptr<protocol::TMLedgerData>>>
|
||||
mReceivedData;
|
||||
bool mReceiveDispatched;
|
||||
std::unique_ptr<PeerSet> mPeerSet;
|
||||
};
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ public:
|
||||
InboundTransactions&
|
||||
operator=(InboundTransactions const&) = delete;
|
||||
|
||||
virtual ~InboundTransactions() = 0;
|
||||
virtual ~InboundTransactions() = default;
|
||||
|
||||
/** Find and return a transaction set, or nullptr if it is missing.
|
||||
*
|
||||
@@ -59,13 +59,13 @@ public:
|
||||
*
|
||||
* @param setHash The transaction set ID (digest of the SHAMap root node).
|
||||
* @param peer The peer that sent the message.
|
||||
* @param message The LedgerData message.
|
||||
* @param data The data we received.
|
||||
*/
|
||||
virtual void
|
||||
gotData(
|
||||
uint256 const& setHash,
|
||||
std::shared_ptr<Peer> peer,
|
||||
std::shared_ptr<protocol::TMLedgerData> message) = 0;
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> const& data) = 0;
|
||||
|
||||
/** Add a transaction set.
|
||||
*
|
||||
|
||||
@@ -52,7 +52,8 @@ std::uint32_t constexpr SUB_TASK_MAX_TIMEOUTS = 10;
|
||||
|
||||
// max number of peers that do not support the ledger replay feature
|
||||
// returned by the PeerSet before switch to fallback
|
||||
auto constexpr MAX_NO_FEATURE_PEER_COUNT = 2;
|
||||
std::uint8_t constexpr MAX_NO_FEATURE_PEER_COUNT = 2;
|
||||
|
||||
// subtask timeout value after fallback
|
||||
auto constexpr SUB_TASK_FALLBACK_TIMEOUT = std::chrono::milliseconds{1000};
|
||||
|
||||
|
||||
@@ -74,6 +74,13 @@ enum {
|
||||
// millisecond for each ledger timeout
|
||||
auto constexpr ledgerAcquireTimeout = 3000ms;
|
||||
|
||||
static constexpr std::uint8_t const IL_BY_HASH = 0x01;
|
||||
static constexpr std::uint8_t const IL_SIGNALED = 0x02;
|
||||
static constexpr std::uint8_t const IL_RECEIVE_DISPATCHED = 0x04;
|
||||
static constexpr std::uint8_t const IL_HAVE_HEADER = 0x08;
|
||||
static constexpr std::uint8_t const IL_HAVE_STATE = 0x10;
|
||||
static constexpr std::uint8_t const IL_HAVE_TXNS = 0x20;
|
||||
|
||||
InboundLedger::InboundLedger(
|
||||
Application& app,
|
||||
uint256 const& hash,
|
||||
@@ -85,17 +92,12 @@ InboundLedger::InboundLedger(
|
||||
app,
|
||||
hash,
|
||||
ledgerAcquireTimeout,
|
||||
{jtLEDGER_DATA, "InboundLedger", 5},
|
||||
{jtLEDGER_DATA, 5, "InboundLedger"},
|
||||
IL_BY_HASH,
|
||||
app.journal("InboundLedger"))
|
||||
, m_clock(clock)
|
||||
, mHaveHeader(false)
|
||||
, mHaveState(false)
|
||||
, mHaveTransactions(false)
|
||||
, mSignaled(false)
|
||||
, mByHash(true)
|
||||
, mSeq(seq)
|
||||
, mReason(reason)
|
||||
, mReceiveDispatched(false)
|
||||
, reason_(reason)
|
||||
, mPeerSet(std::move(peerSet))
|
||||
{
|
||||
JLOG(journal_.trace()) << "Acquiring ledger " << hash_;
|
||||
@@ -109,44 +111,43 @@ InboundLedger::init(ScopedLockType& collectionLock)
|
||||
collectionLock.unlock();
|
||||
|
||||
tryDB(app_.getNodeFamily().db());
|
||||
if (failed_)
|
||||
|
||||
if (hasFailed())
|
||||
return;
|
||||
|
||||
if (!complete_)
|
||||
if (!hasCompleted())
|
||||
{
|
||||
auto shardStore = app_.getShardStore();
|
||||
if (mReason == Reason::SHARD)
|
||||
if (reason_ == Reason::SHARD)
|
||||
{
|
||||
if (!shardStore)
|
||||
{
|
||||
JLOG(journal_.error())
|
||||
<< "Acquiring shard with no shard store available";
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
return;
|
||||
}
|
||||
|
||||
mHaveHeader = false;
|
||||
mHaveTransactions = false;
|
||||
mHaveState = false;
|
||||
mLedger.reset();
|
||||
userdata_ &= ~(IL_HAVE_HEADER | IL_HAVE_STATE | IL_HAVE_TXNS);
|
||||
ledger_.reset();
|
||||
|
||||
tryDB(app_.getShardFamily()->db());
|
||||
if (failed_)
|
||||
|
||||
if (hasFailed())
|
||||
return;
|
||||
}
|
||||
else if (shardStore && mSeq >= shardStore->earliestLedgerSeq())
|
||||
{
|
||||
if (auto l = shardStore->fetchLedger(hash_, mSeq))
|
||||
{
|
||||
mHaveHeader = true;
|
||||
mHaveTransactions = true;
|
||||
mHaveState = true;
|
||||
complete_ = true;
|
||||
mLedger = std::move(l);
|
||||
userdata_ |= (IL_HAVE_HEADER | IL_HAVE_STATE | IL_HAVE_TXNS);
|
||||
markComplete();
|
||||
ledger_ = std::move(l);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!complete_)
|
||||
|
||||
if (!hasCompleted())
|
||||
{
|
||||
addPeers();
|
||||
queueJob(sl);
|
||||
@@ -155,17 +156,17 @@ InboundLedger::init(ScopedLockType& collectionLock)
|
||||
|
||||
JLOG(journal_.debug()) << "Acquiring ledger we already have in "
|
||||
<< " local store. " << hash_;
|
||||
assert(mLedger->read(keylet::fees()));
|
||||
mLedger->setImmutable();
|
||||
assert(ledger_->read(keylet::fees()));
|
||||
ledger_->setImmutable();
|
||||
|
||||
if (mReason == Reason::HISTORY || mReason == Reason::SHARD)
|
||||
if (reason_ == Reason::HISTORY || reason_ == Reason::SHARD)
|
||||
return;
|
||||
|
||||
app_.getLedgerMaster().storeLedger(mLedger);
|
||||
app_.getLedgerMaster().storeLedger(ledger_);
|
||||
|
||||
// Check if this could be a newer fully-validated ledger
|
||||
if (mReason == Reason::CONSENSUS)
|
||||
app_.getLedgerMaster().checkAccept(mLedger);
|
||||
if (reason_ == Reason::CONSENSUS)
|
||||
app_.getLedgerMaster().checkAccept(ledger_);
|
||||
}
|
||||
|
||||
std::size_t
|
||||
@@ -196,13 +197,13 @@ InboundLedger::checkLocal()
|
||||
ScopedLockType sl(mtx_);
|
||||
if (!isDone())
|
||||
{
|
||||
if (mLedger)
|
||||
tryDB(mLedger->stateMap().family().db());
|
||||
else if (mReason == Reason::SHARD)
|
||||
if (ledger_)
|
||||
tryDB(ledger_->stateMap().family().db());
|
||||
else if (reason_ == Reason::SHARD)
|
||||
tryDB(app_.getShardFamily()->db());
|
||||
else
|
||||
tryDB(app_.getNodeFamily().db());
|
||||
if (failed_ || complete_)
|
||||
if (hasFailed() || hasCompleted())
|
||||
{
|
||||
done();
|
||||
return true;
|
||||
@@ -220,14 +221,12 @@ InboundLedger::~InboundLedger()
|
||||
if (entry.second->type() == protocol::liAS_NODE)
|
||||
app_.getInboundLedgers().gotStaleData(entry.second);
|
||||
}
|
||||
|
||||
if (!isDone())
|
||||
{
|
||||
JLOG(journal_.debug())
|
||||
<< "Acquire " << hash_ << " abort "
|
||||
<< ((timeouts_ == 0) ? std::string()
|
||||
: (std::string("timeouts:") +
|
||||
std::to_string(timeouts_) + " "))
|
||||
<< mStats.get();
|
||||
<< "Acquire " << hash_ << " aborted. Timeouts: " << timeouts_
|
||||
<< ", stats: " << mStats.get();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,14 +258,14 @@ neededHashes(
|
||||
std::vector<uint256>
|
||||
InboundLedger::neededTxHashes(int max, SHAMapSyncFilter* filter) const
|
||||
{
|
||||
return neededHashes(mLedger->info().txHash, mLedger->txMap(), max, filter);
|
||||
return neededHashes(ledger_->info().txHash, ledger_->txMap(), max, filter);
|
||||
}
|
||||
|
||||
std::vector<uint256>
|
||||
InboundLedger::neededStateHashes(int max, SHAMapSyncFilter* filter) const
|
||||
{
|
||||
return neededHashes(
|
||||
mLedger->info().accountHash, mLedger->stateMap(), max, filter);
|
||||
ledger_->info().accountHash, ledger_->stateMap(), max, filter);
|
||||
}
|
||||
|
||||
LedgerInfo
|
||||
@@ -304,24 +303,24 @@ deserializePrefixedHeader(Slice data, bool hasHash)
|
||||
void
|
||||
InboundLedger::tryDB(NodeStore::Database& srcDB)
|
||||
{
|
||||
if (!mHaveHeader)
|
||||
if (!(userdata_ & IL_HAVE_HEADER))
|
||||
{
|
||||
auto makeLedger = [&, this](Blob const& data) {
|
||||
JLOG(journal_.trace()) << "Ledger header found in fetch pack";
|
||||
mLedger = std::make_shared<Ledger>(
|
||||
ledger_ = std::make_shared<Ledger>(
|
||||
deserializePrefixedHeader(makeSlice(data)),
|
||||
app_.config(),
|
||||
mReason == Reason::SHARD ? *app_.getShardFamily()
|
||||
reason_ == Reason::SHARD ? *app_.getShardFamily()
|
||||
: app_.getNodeFamily());
|
||||
if (mLedger->info().hash != hash_ ||
|
||||
(mSeq != 0 && mSeq != mLedger->info().seq))
|
||||
if (ledger_->info().hash != hash_ ||
|
||||
(mSeq != 0 && mSeq != ledger_->info().seq))
|
||||
{
|
||||
// We know for a fact the ledger can never be acquired
|
||||
JLOG(journal_.warn())
|
||||
<< "hash " << hash_ << " seq " << std::to_string(mSeq)
|
||||
<< " cannot be a ledger";
|
||||
mLedger.reset();
|
||||
failed_ = true;
|
||||
ledger_.reset();
|
||||
markFailed();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -331,16 +330,16 @@ InboundLedger::tryDB(NodeStore::Database& srcDB)
|
||||
JLOG(journal_.trace()) << "Ledger header found in local store";
|
||||
|
||||
makeLedger(nodeObject->getData());
|
||||
if (failed_)
|
||||
if (hasFailed())
|
||||
return;
|
||||
|
||||
// Store the ledger header if the source and destination differ
|
||||
auto& dstDB{mLedger->stateMap().family().db()};
|
||||
auto& dstDB{ledger_->stateMap().family().db()};
|
||||
if (std::addressof(dstDB) != std::addressof(srcDB))
|
||||
{
|
||||
Blob blob{nodeObject->getData()};
|
||||
dstDB.store(
|
||||
hotLEDGER, std::move(blob), hash_, mLedger->info().seq);
|
||||
hotLEDGER, std::move(blob), hash_, ledger_->info().seq);
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -353,72 +352,73 @@ InboundLedger::tryDB(NodeStore::Database& srcDB)
|
||||
JLOG(journal_.trace()) << "Ledger header found in fetch pack";
|
||||
|
||||
makeLedger(*data);
|
||||
if (failed_)
|
||||
if (hasFailed())
|
||||
return;
|
||||
|
||||
// Store the ledger header in the ledger's database
|
||||
mLedger->stateMap().family().db().store(
|
||||
hotLEDGER, std::move(*data), hash_, mLedger->info().seq);
|
||||
ledger_->stateMap().family().db().store(
|
||||
hotLEDGER, std::move(*data), hash_, ledger_->info().seq);
|
||||
}
|
||||
|
||||
if (mSeq == 0)
|
||||
mSeq = mLedger->info().seq;
|
||||
mLedger->stateMap().setLedgerSeq(mSeq);
|
||||
mLedger->txMap().setLedgerSeq(mSeq);
|
||||
mHaveHeader = true;
|
||||
mSeq = ledger_->info().seq;
|
||||
ledger_->stateMap().setLedgerSeq(mSeq);
|
||||
ledger_->txMap().setLedgerSeq(mSeq);
|
||||
userdata_ |= IL_HAVE_HEADER;
|
||||
}
|
||||
|
||||
if (!mHaveTransactions)
|
||||
if (!(userdata_ & IL_HAVE_TXNS))
|
||||
{
|
||||
if (mLedger->info().txHash.isZero())
|
||||
if (ledger_->info().txHash.isZero())
|
||||
{
|
||||
JLOG(journal_.trace()) << "No TXNs to fetch";
|
||||
mHaveTransactions = true;
|
||||
userdata_ |= IL_HAVE_TXNS;
|
||||
}
|
||||
else
|
||||
{
|
||||
TransactionStateSF filter(
|
||||
mLedger->txMap().family().db(), app_.getLedgerMaster());
|
||||
if (mLedger->txMap().fetchRoot(
|
||||
SHAMapHash{mLedger->info().txHash}, &filter))
|
||||
ledger_->txMap().family().db(), app_.getLedgerMaster());
|
||||
if (ledger_->txMap().fetchRoot(
|
||||
SHAMapHash{ledger_->info().txHash}, &filter))
|
||||
{
|
||||
if (neededTxHashes(1, &filter).empty())
|
||||
{
|
||||
JLOG(journal_.trace()) << "Had full txn map locally";
|
||||
mHaveTransactions = true;
|
||||
userdata_ |= IL_HAVE_TXNS;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!mHaveState)
|
||||
if (!(userdata_ & IL_HAVE_STATE))
|
||||
{
|
||||
if (mLedger->info().accountHash.isZero())
|
||||
if (ledger_->info().accountHash.isZero())
|
||||
{
|
||||
JLOG(journal_.fatal())
|
||||
<< "We are acquiring a ledger with a zero account hash";
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
return;
|
||||
}
|
||||
AccountStateSF filter(
|
||||
mLedger->stateMap().family().db(), app_.getLedgerMaster());
|
||||
if (mLedger->stateMap().fetchRoot(
|
||||
SHAMapHash{mLedger->info().accountHash}, &filter))
|
||||
ledger_->stateMap().family().db(), app_.getLedgerMaster());
|
||||
if (ledger_->stateMap().fetchRoot(
|
||||
SHAMapHash{ledger_->info().accountHash}, &filter))
|
||||
{
|
||||
if (neededStateHashes(1, &filter).empty())
|
||||
{
|
||||
JLOG(journal_.trace()) << "Had full AS map locally";
|
||||
mHaveState = true;
|
||||
userdata_ |= IL_HAVE_STATE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mHaveTransactions && mHaveState)
|
||||
if ((IL_HAVE_TXNS | IL_HAVE_STATE) ==
|
||||
(userdata_ & (IL_HAVE_TXNS | IL_HAVE_STATE)))
|
||||
{
|
||||
JLOG(journal_.debug()) << "Had everything locally";
|
||||
complete_ = true;
|
||||
assert(mLedger->read(keylet::fees()));
|
||||
mLedger->setImmutable();
|
||||
markComplete();
|
||||
assert(ledger_->read(keylet::fees()));
|
||||
ledger_->setImmutable();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -447,7 +447,7 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&)
|
||||
JLOG(journal_.warn())
|
||||
<< timeouts_ << " timeouts for ledger " << hash_;
|
||||
}
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
done();
|
||||
return;
|
||||
}
|
||||
@@ -456,20 +456,19 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&)
|
||||
{
|
||||
checkLocal();
|
||||
|
||||
mByHash = true;
|
||||
userdata_ |= IL_BY_HASH;
|
||||
|
||||
std::size_t pc = getPeerCount();
|
||||
JLOG(journal_.debug())
|
||||
<< "No progress(" << pc << ") for ledger " << hash_;
|
||||
<< "No progress(" << getPeerCount() << ") for ledger " << hash_;
|
||||
|
||||
// addPeers triggers if the reason is not HISTORY
|
||||
// So if the reason IS HISTORY, need to trigger after we add
|
||||
// otherwise, we need to trigger before we add
|
||||
// so each peer gets triggered once
|
||||
if (mReason != Reason::HISTORY)
|
||||
if (reason_ != Reason::HISTORY)
|
||||
trigger(nullptr, TriggerReason::timeout);
|
||||
addPeers();
|
||||
if (mReason == Reason::HISTORY)
|
||||
if (reason_ == Reason::HISTORY)
|
||||
trigger(nullptr, TriggerReason::timeout);
|
||||
}
|
||||
}
|
||||
@@ -484,7 +483,7 @@ InboundLedger::addPeers()
|
||||
[this](auto peer) {
|
||||
// For historical nodes, do not trigger too soon
|
||||
// since a fetch pack is probably coming
|
||||
if (mReason != Reason::HISTORY)
|
||||
if (reason_ != Reason::HISTORY)
|
||||
trigger(peer, TriggerReason::added);
|
||||
});
|
||||
}
|
||||
@@ -498,35 +497,36 @@ InboundLedger::pmDowncast()
|
||||
void
|
||||
InboundLedger::done()
|
||||
{
|
||||
if (mSignaled)
|
||||
// Nothing to do if it was already signaled.
|
||||
if (userdata_.fetch_or(IL_SIGNALED) & IL_SIGNALED)
|
||||
return;
|
||||
|
||||
mSignaled = true;
|
||||
touch();
|
||||
|
||||
JLOG(journal_.debug()) << "Acquire " << hash_ << (failed_ ? " fail " : " ")
|
||||
JLOG(journal_.debug()) << "Acquire " << hash_
|
||||
<< (hasFailed() ? " fail " : " ")
|
||||
<< ((timeouts_ == 0)
|
||||
? std::string()
|
||||
: (std::string("timeouts:") +
|
||||
std::to_string(timeouts_) + " "))
|
||||
<< mStats.get();
|
||||
|
||||
assert(complete_ || failed_);
|
||||
assert(hasCompleted() || hasFailed());
|
||||
|
||||
if (complete_ && !failed_ && mLedger)
|
||||
if (hasCompleted() && !hasFailed() && ledger_)
|
||||
{
|
||||
assert(mLedger->read(keylet::fees()));
|
||||
mLedger->setImmutable();
|
||||
switch (mReason)
|
||||
assert(ledger_->read(keylet::fees()));
|
||||
ledger_->setImmutable();
|
||||
switch (reason_)
|
||||
{
|
||||
case Reason::SHARD:
|
||||
app_.getShardStore()->setStored(mLedger);
|
||||
app_.getShardStore()->setStored(ledger_);
|
||||
[[fallthrough]];
|
||||
case Reason::HISTORY:
|
||||
app_.getInboundLedgers().onLedgerFetched();
|
||||
break;
|
||||
default:
|
||||
app_.getLedgerMaster().storeLedger(mLedger);
|
||||
app_.getLedgerMaster().storeLedger(ledger_);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -534,7 +534,7 @@ InboundLedger::done()
|
||||
// We hold the PeerSet lock, so must dispatch
|
||||
app_.getJobQueue().addJob(
|
||||
jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() {
|
||||
if (self->complete_ && !self->failed_)
|
||||
if (self->hasCompleted() && !self->hasFailed())
|
||||
{
|
||||
self->app_.getLedgerMaster().checkAccept(self->getLedger());
|
||||
self->app_.getLedgerMaster().tryAdvance();
|
||||
@@ -554,9 +554,9 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
|
||||
if (isDone())
|
||||
{
|
||||
JLOG(journal_.debug())
|
||||
<< "Trigger on ledger: " << hash_ << (complete_ ? " completed" : "")
|
||||
<< (failed_ ? " failed" : "");
|
||||
JLOG(journal_.debug()) << "Trigger on ledger: " << hash_
|
||||
<< (hasCompleted() ? " completed" : "")
|
||||
<< (hasFailed() ? " failed" : "");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -567,19 +567,30 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
else
|
||||
stream << "Trigger acquiring ledger " << hash_;
|
||||
|
||||
if (complete_ || failed_)
|
||||
stream << "complete=" << complete_ << " failed=" << failed_;
|
||||
if (hasCompleted() || hasFailed())
|
||||
stream << "complete=" << hasCompleted()
|
||||
<< " failed=" << hasFailed();
|
||||
else
|
||||
stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions
|
||||
<< " as=" << mHaveState;
|
||||
{
|
||||
auto [haveHeader, haveState, haveTransactions] =
|
||||
[](std::uint8_t flags) {
|
||||
return std::make_tuple(
|
||||
(flags & IL_HAVE_HEADER) == IL_HAVE_HEADER,
|
||||
(flags & IL_HAVE_STATE) == IL_HAVE_STATE,
|
||||
(flags & IL_HAVE_TXNS) == IL_HAVE_TXNS);
|
||||
}(userdata_);
|
||||
|
||||
stream << "header=" << haveHeader << " tx=" << haveTransactions
|
||||
<< " as=" << haveState;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mHaveHeader)
|
||||
if (!(userdata_ & IL_HAVE_HEADER))
|
||||
{
|
||||
tryDB(
|
||||
mReason == Reason::SHARD ? app_.getShardFamily()->db()
|
||||
reason_ == Reason::SHARD ? app_.getShardFamily()->db()
|
||||
: app_.getNodeFamily().db());
|
||||
if (failed_)
|
||||
if (hasFailed())
|
||||
{
|
||||
JLOG(journal_.warn()) << " failed local for " << hash_;
|
||||
return;
|
||||
@@ -594,7 +605,7 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
// Be more aggressive if we've timed out at least once
|
||||
tmGL.set_querytype(protocol::qtINDIRECT);
|
||||
|
||||
if (!progress_ && !failed_ && mByHash &&
|
||||
if (!hasProgressed() && !hasFailed() && (userdata_ & IL_BY_HASH) &&
|
||||
(timeouts_ > ledgerBecomeAggressiveThreshold))
|
||||
{
|
||||
auto need = getNeededHashes();
|
||||
@@ -631,7 +642,7 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
peerIds.begin(), peerIds.end(), [this, &packet](auto id) {
|
||||
if (auto p = app_.overlay().findPeerByShortID(id))
|
||||
{
|
||||
mByHash = false;
|
||||
userdata_ &= ~IL_BY_HASH;
|
||||
p->send(packet);
|
||||
}
|
||||
});
|
||||
@@ -640,17 +651,15 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
{
|
||||
JLOG(journal_.info())
|
||||
<< "getNeededHashes says acquire is complete";
|
||||
mHaveHeader = true;
|
||||
mHaveTransactions = true;
|
||||
mHaveState = true;
|
||||
complete_ = true;
|
||||
userdata_ |= (IL_HAVE_HEADER | IL_HAVE_STATE | IL_HAVE_TXNS);
|
||||
markComplete();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We can't do much without the header data because we don't know the
|
||||
// state or transaction root hashes.
|
||||
if (!mHaveHeader && !failed_)
|
||||
if (!(userdata_ & IL_HAVE_HEADER) && !hasFailed())
|
||||
{
|
||||
tmGL.set_itype(protocol::liBASE);
|
||||
if (mSeq != 0)
|
||||
@@ -661,8 +670,8 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
return;
|
||||
}
|
||||
|
||||
if (mLedger)
|
||||
tmGL.set_ledgerseq(mLedger->info().seq);
|
||||
if (ledger_)
|
||||
tmGL.set_ledgerseq(ledger_->info().seq);
|
||||
|
||||
if (reason != TriggerReason::reply)
|
||||
{
|
||||
@@ -679,15 +688,16 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
|
||||
// Get the state data first because it's the most likely to be useful
|
||||
// if we wind up abandoning this fetch.
|
||||
if (mHaveHeader && !mHaveState && !failed_)
|
||||
if (((userdata_ & (IL_HAVE_HEADER | IL_HAVE_STATE)) == IL_HAVE_HEADER) &&
|
||||
!hasFailed())
|
||||
{
|
||||
assert(mLedger);
|
||||
assert(ledger_);
|
||||
|
||||
if (!mLedger->stateMap().isValid())
|
||||
if (!ledger_->stateMap().isValid())
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
}
|
||||
else if (mLedger->stateMap().getHash().isZero())
|
||||
else if (ledger_->stateMap().getHash().isZero())
|
||||
{
|
||||
// we need the root node
|
||||
tmGL.set_itype(protocol::liAS_NODE);
|
||||
@@ -700,27 +710,27 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
else
|
||||
{
|
||||
AccountStateSF filter(
|
||||
mLedger->stateMap().family().db(), app_.getLedgerMaster());
|
||||
ledger_->stateMap().family().db(), app_.getLedgerMaster());
|
||||
|
||||
// Release the lock while we process the large state map
|
||||
sl.unlock();
|
||||
auto nodes =
|
||||
mLedger->stateMap().getMissingNodes(missingNodesFind, &filter);
|
||||
ledger_->stateMap().getMissingNodes(missingNodesFind, &filter);
|
||||
sl.lock();
|
||||
|
||||
// Make sure nothing happened while we released the lock
|
||||
if (!failed_ && !complete_ && !mHaveState)
|
||||
if (!hasFailed() && !hasCompleted() && !(userdata_ & IL_HAVE_STATE))
|
||||
{
|
||||
if (nodes.empty())
|
||||
{
|
||||
if (!mLedger->stateMap().isValid())
|
||||
failed_ = true;
|
||||
if (!ledger_->stateMap().isValid())
|
||||
markFailed();
|
||||
else
|
||||
{
|
||||
mHaveState = true;
|
||||
userdata_ |= IL_HAVE_STATE;
|
||||
|
||||
if (mHaveTransactions)
|
||||
complete_ = true;
|
||||
if (userdata_ & IL_HAVE_TXNS)
|
||||
markComplete();
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -751,15 +761,16 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
}
|
||||
}
|
||||
|
||||
if (mHaveHeader && !mHaveTransactions && !failed_)
|
||||
if (((userdata_ & (IL_HAVE_HEADER | IL_HAVE_TXNS)) == IL_HAVE_HEADER) &&
|
||||
!hasFailed())
|
||||
{
|
||||
assert(mLedger);
|
||||
assert(ledger_);
|
||||
|
||||
if (!mLedger->txMap().isValid())
|
||||
if (!ledger_->txMap().isValid())
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
}
|
||||
else if (mLedger->txMap().getHash().isZero())
|
||||
else if (ledger_->txMap().getHash().isZero())
|
||||
{
|
||||
// we need the root node
|
||||
tmGL.set_itype(protocol::liTX_NODE);
|
||||
@@ -772,21 +783,21 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
else
|
||||
{
|
||||
TransactionStateSF filter(
|
||||
mLedger->txMap().family().db(), app_.getLedgerMaster());
|
||||
ledger_->txMap().family().db(), app_.getLedgerMaster());
|
||||
|
||||
auto nodes =
|
||||
mLedger->txMap().getMissingNodes(missingNodesFind, &filter);
|
||||
ledger_->txMap().getMissingNodes(missingNodesFind, &filter);
|
||||
|
||||
if (nodes.empty())
|
||||
{
|
||||
if (!mLedger->txMap().isValid())
|
||||
failed_ = true;
|
||||
if (!ledger_->txMap().isValid())
|
||||
markFailed();
|
||||
else
|
||||
{
|
||||
mHaveTransactions = true;
|
||||
userdata_ |= IL_HAVE_TXNS;
|
||||
|
||||
if (mHaveState)
|
||||
complete_ = true;
|
||||
if (userdata_ & IL_HAVE_STATE)
|
||||
markComplete();
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -814,11 +825,11 @@ InboundLedger::trigger(std::shared_ptr<Peer> const& peer, TriggerReason reason)
|
||||
}
|
||||
}
|
||||
|
||||
if (complete_ || failed_)
|
||||
if (hasCompleted() || hasFailed())
|
||||
{
|
||||
JLOG(journal_.debug())
|
||||
<< "Done:" << (complete_ ? " complete" : "")
|
||||
<< (failed_ ? " failed " : " ") << mLedger->info().seq;
|
||||
<< "Done:" << (hasCompleted() ? " complete" : "")
|
||||
<< (hasFailed() ? " failed " : " ") << ledger_->info().seq;
|
||||
sl.unlock();
|
||||
done();
|
||||
}
|
||||
@@ -876,41 +887,41 @@ InboundLedger::takeHeader(std::string const& data)
|
||||
// Return value: true=normal, false=bad data
|
||||
JLOG(journal_.trace()) << "got header acquiring ledger " << hash_;
|
||||
|
||||
if (complete_ || failed_ || mHaveHeader)
|
||||
if (hasCompleted() || hasFailed() || (userdata_ & IL_HAVE_HEADER))
|
||||
return true;
|
||||
|
||||
auto* f = mReason == Reason::SHARD ? app_.getShardFamily()
|
||||
auto* f = reason_ == Reason::SHARD ? app_.getShardFamily()
|
||||
: &app_.getNodeFamily();
|
||||
mLedger = std::make_shared<Ledger>(
|
||||
ledger_ = std::make_shared<Ledger>(
|
||||
deserializeHeader(makeSlice(data)), app_.config(), *f);
|
||||
if (mLedger->info().hash != hash_ ||
|
||||
(mSeq != 0 && mSeq != mLedger->info().seq))
|
||||
if (ledger_->info().hash != hash_ ||
|
||||
(mSeq != 0 && mSeq != ledger_->info().seq))
|
||||
{
|
||||
JLOG(journal_.warn())
|
||||
<< "Acquire hash mismatch: " << mLedger->info().hash
|
||||
<< "Acquire hash mismatch: " << ledger_->info().hash
|
||||
<< "!=" << hash_;
|
||||
mLedger.reset();
|
||||
ledger_.reset();
|
||||
return false;
|
||||
}
|
||||
if (mSeq == 0)
|
||||
mSeq = mLedger->info().seq;
|
||||
mLedger->stateMap().setLedgerSeq(mSeq);
|
||||
mLedger->txMap().setLedgerSeq(mSeq);
|
||||
mHaveHeader = true;
|
||||
mSeq = ledger_->info().seq;
|
||||
ledger_->stateMap().setLedgerSeq(mSeq);
|
||||
ledger_->txMap().setLedgerSeq(mSeq);
|
||||
userdata_ |= IL_HAVE_HEADER;
|
||||
|
||||
Serializer s(data.size() + 4);
|
||||
s.add32(HashPrefix::ledgerMaster);
|
||||
s.addRaw(data.data(), data.size());
|
||||
f->db().store(hotLEDGER, std::move(s.modData()), hash_, mSeq);
|
||||
|
||||
if (mLedger->info().txHash.isZero())
|
||||
mHaveTransactions = true;
|
||||
if (ledger_->info().txHash.isZero())
|
||||
userdata_ |= IL_HAVE_TXNS;
|
||||
|
||||
if (mLedger->info().accountHash.isZero())
|
||||
mHaveState = true;
|
||||
if (ledger_->info().accountHash.isZero())
|
||||
userdata_ |= IL_HAVE_STATE;
|
||||
|
||||
mLedger->txMap().setSynching();
|
||||
mLedger->stateMap().setSynching();
|
||||
ledger_->txMap().setSynching();
|
||||
ledger_->stateMap().setSynching();
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -921,7 +932,7 @@ InboundLedger::takeHeader(std::string const& data)
|
||||
void
|
||||
InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
|
||||
{
|
||||
if (!mHaveHeader)
|
||||
if (!(userdata_ & IL_HAVE_HEADER))
|
||||
{
|
||||
JLOG(journal_.warn()) << "Missing ledger header";
|
||||
san.incInvalid();
|
||||
@@ -929,13 +940,13 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
|
||||
}
|
||||
if (packet.type() == protocol::liTX_NODE)
|
||||
{
|
||||
if (mHaveTransactions || failed_)
|
||||
if ((userdata_ & IL_HAVE_TXNS) || hasFailed())
|
||||
{
|
||||
san.incDuplicate();
|
||||
return;
|
||||
}
|
||||
}
|
||||
else if (mHaveState || failed_)
|
||||
else if ((userdata_ & IL_HAVE_STATE) || hasFailed())
|
||||
{
|
||||
san.incDuplicate();
|
||||
return;
|
||||
@@ -945,15 +956,15 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
|
||||
-> std::tuple<SHAMap&, SHAMapHash, std::unique_ptr<SHAMapSyncFilter>> {
|
||||
if (packet.type() == protocol::liTX_NODE)
|
||||
return {
|
||||
mLedger->txMap(),
|
||||
SHAMapHash{mLedger->info().txHash},
|
||||
ledger_->txMap(),
|
||||
SHAMapHash{ledger_->info().txHash},
|
||||
std::make_unique<TransactionStateSF>(
|
||||
mLedger->txMap().family().db(), app_.getLedgerMaster())};
|
||||
ledger_->txMap().family().db(), app_.getLedgerMaster())};
|
||||
return {
|
||||
mLedger->stateMap(),
|
||||
SHAMapHash{mLedger->info().accountHash},
|
||||
ledger_->stateMap(),
|
||||
SHAMapHash{ledger_->info().accountHash},
|
||||
std::make_unique<AccountStateSF>(
|
||||
mLedger->stateMap().family().db(), app_.getLedgerMaster())};
|
||||
ledger_->stateMap().family().db(), app_.getLedgerMaster())};
|
||||
}();
|
||||
|
||||
try
|
||||
@@ -993,13 +1004,14 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
|
||||
if (!map.isSynching())
|
||||
{
|
||||
if (packet.type() == protocol::liTX_NODE)
|
||||
mHaveTransactions = true;
|
||||
userdata_ |= IL_HAVE_TXNS;
|
||||
else
|
||||
mHaveState = true;
|
||||
userdata_ |= IL_HAVE_STATE;
|
||||
|
||||
if (mHaveTransactions && mHaveState)
|
||||
if ((IL_HAVE_STATE | IL_HAVE_TXNS) ==
|
||||
(userdata_ & (IL_HAVE_STATE | IL_HAVE_TXNS)))
|
||||
{
|
||||
complete_ = true;
|
||||
markComplete();
|
||||
done();
|
||||
}
|
||||
}
|
||||
@@ -1011,22 +1023,22 @@ InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san)
|
||||
bool
|
||||
InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
|
||||
{
|
||||
if (failed_ || mHaveState)
|
||||
if (hasFailed() || (userdata_ & IL_HAVE_STATE))
|
||||
{
|
||||
san.incDuplicate();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!mHaveHeader)
|
||||
if (!(userdata_ & IL_HAVE_HEADER))
|
||||
{
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
AccountStateSF filter(
|
||||
mLedger->stateMap().family().db(), app_.getLedgerMaster());
|
||||
san += mLedger->stateMap().addRootNode(
|
||||
SHAMapHash{mLedger->info().accountHash}, data, &filter);
|
||||
ledger_->stateMap().family().db(), app_.getLedgerMaster());
|
||||
san += ledger_->stateMap().addRootNode(
|
||||
SHAMapHash{ledger_->info().accountHash}, data, &filter);
|
||||
return san.isGood();
|
||||
}
|
||||
|
||||
@@ -1036,22 +1048,22 @@ InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san)
|
||||
bool
|
||||
InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san)
|
||||
{
|
||||
if (failed_ || mHaveTransactions)
|
||||
if (hasFailed() || (userdata_ & IL_HAVE_TXNS))
|
||||
{
|
||||
san.incDuplicate();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!mHaveHeader)
|
||||
if (!(userdata_ & IL_HAVE_HEADER))
|
||||
{
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
TransactionStateSF filter(
|
||||
mLedger->txMap().family().db(), app_.getLedgerMaster());
|
||||
san += mLedger->txMap().addRootNode(
|
||||
SHAMapHash{mLedger->info().txHash}, data, &filter);
|
||||
ledger_->txMap().family().db(), app_.getLedgerMaster());
|
||||
san += ledger_->txMap().addRootNode(
|
||||
SHAMapHash{ledger_->info().txHash}, data, &filter);
|
||||
return san.isGood();
|
||||
}
|
||||
|
||||
@@ -1060,17 +1072,17 @@ InboundLedger::getNeededHashes()
|
||||
{
|
||||
std::vector<neededHash_t> ret;
|
||||
|
||||
if (!mHaveHeader)
|
||||
if (!(userdata_ & IL_HAVE_HEADER))
|
||||
{
|
||||
ret.push_back(
|
||||
std::make_pair(protocol::TMGetObjectByHash::otLEDGER, hash_));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!mHaveState)
|
||||
if (!(userdata_ & IL_HAVE_STATE))
|
||||
{
|
||||
AccountStateSF filter(
|
||||
mLedger->stateMap().family().db(), app_.getLedgerMaster());
|
||||
ledger_->stateMap().family().db(), app_.getLedgerMaster());
|
||||
for (auto const& h : neededStateHashes(4, &filter))
|
||||
{
|
||||
ret.push_back(
|
||||
@@ -1078,10 +1090,10 @@ InboundLedger::getNeededHashes()
|
||||
}
|
||||
}
|
||||
|
||||
if (!mHaveTransactions)
|
||||
if (!(userdata_ & IL_HAVE_TXNS))
|
||||
{
|
||||
TransactionStateSF filter(
|
||||
mLedger->txMap().family().db(), app_.getLedgerMaster());
|
||||
ledger_->txMap().family().db(), app_.getLedgerMaster());
|
||||
for (auto const& h : neededTxHashes(4, &filter))
|
||||
{
|
||||
ret.push_back(std::make_pair(
|
||||
@@ -1107,10 +1119,9 @@ InboundLedger::gotData(
|
||||
|
||||
mReceivedData.emplace_back(peer, data);
|
||||
|
||||
if (mReceiveDispatched)
|
||||
if (userdata_.fetch_or(IL_RECEIVE_DISPATCHED) & IL_RECEIVE_DISPATCHED)
|
||||
return false;
|
||||
|
||||
mReceiveDispatched = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1142,7 +1153,7 @@ InboundLedger::processData(
|
||||
|
||||
try
|
||||
{
|
||||
if (!mHaveHeader)
|
||||
if (!(userdata_ & IL_HAVE_HEADER))
|
||||
{
|
||||
if (!takeHeader(packet.nodes(0).nodedata()))
|
||||
{
|
||||
@@ -1154,13 +1165,13 @@ InboundLedger::processData(
|
||||
san.incUseful();
|
||||
}
|
||||
|
||||
if (!mHaveState && (packet.nodes().size() > 1) &&
|
||||
if (!(userdata_ & IL_HAVE_STATE) && (packet.nodes().size() > 1) &&
|
||||
!takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san))
|
||||
{
|
||||
JLOG(journal_.warn()) << "Included AS root invalid";
|
||||
}
|
||||
|
||||
if (!mHaveTransactions && (packet.nodes().size() > 2) &&
|
||||
if (!(userdata_ & IL_HAVE_TXNS) && (packet.nodes().size() > 2) &&
|
||||
!takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san))
|
||||
{
|
||||
JLOG(journal_.warn()) << "Included TX root invalid";
|
||||
@@ -1175,7 +1186,7 @@ InboundLedger::processData(
|
||||
}
|
||||
|
||||
if (san.isUseful())
|
||||
progress_ = true;
|
||||
makeProgress();
|
||||
|
||||
mStats += san;
|
||||
return san.getGood();
|
||||
@@ -1216,7 +1227,7 @@ InboundLedger::processData(
|
||||
<< " node stats: " << san.get();
|
||||
|
||||
if (san.isUseful())
|
||||
progress_ = true;
|
||||
makeProgress();
|
||||
|
||||
mStats += san;
|
||||
return san.getGood();
|
||||
@@ -1324,7 +1335,7 @@ InboundLedger::runData()
|
||||
|
||||
if (mReceivedData.empty())
|
||||
{
|
||||
mReceiveDispatched = false;
|
||||
userdata_ &= ~IL_RECEIVE_DISPATCHED;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1358,45 +1369,49 @@ InboundLedger::getJson(int)
|
||||
|
||||
ret[jss::hash] = to_string(hash_);
|
||||
|
||||
if (complete_)
|
||||
if (hasCompleted())
|
||||
ret[jss::complete] = true;
|
||||
|
||||
if (failed_)
|
||||
if (hasFailed())
|
||||
ret[jss::failed] = true;
|
||||
|
||||
if (!complete_ && !failed_)
|
||||
if (!hasCompleted() && !hasFailed())
|
||||
ret[jss::peers] = static_cast<int>(mPeerSet->getPeerIds().size());
|
||||
|
||||
ret[jss::have_header] = mHaveHeader;
|
||||
auto [haveHeader, haveState, haveTransactions] = [](std::uint8_t flags) {
|
||||
return std::make_tuple(
|
||||
(flags & IL_HAVE_HEADER) == IL_HAVE_HEADER,
|
||||
(flags & IL_HAVE_STATE) == IL_HAVE_STATE,
|
||||
(flags & IL_HAVE_TXNS) == IL_HAVE_TXNS);
|
||||
}(userdata_);
|
||||
|
||||
if (mHaveHeader)
|
||||
ret[jss::have_header] = haveHeader;
|
||||
|
||||
if (haveHeader)
|
||||
{
|
||||
ret[jss::have_state] = mHaveState;
|
||||
ret[jss::have_transactions] = mHaveTransactions;
|
||||
ret[jss::have_state] = haveState;
|
||||
|
||||
if (haveHeader && !haveState)
|
||||
{
|
||||
Json::Value hv(Json::arrayValue);
|
||||
for (auto const& h : neededStateHashes(16, nullptr))
|
||||
hv.append(to_string(h));
|
||||
ret[jss::needed_state_hashes] = hv;
|
||||
}
|
||||
|
||||
ret[jss::have_transactions] = haveTransactions;
|
||||
|
||||
if (haveHeader && !haveTransactions)
|
||||
{
|
||||
Json::Value hv(Json::arrayValue);
|
||||
for (auto const& h : neededTxHashes(16, nullptr))
|
||||
hv.append(to_string(h));
|
||||
ret[jss::needed_transaction_hashes] = hv;
|
||||
}
|
||||
}
|
||||
|
||||
ret[jss::timeouts] = timeouts_;
|
||||
|
||||
if (mHaveHeader && !mHaveState)
|
||||
{
|
||||
Json::Value hv(Json::arrayValue);
|
||||
for (auto const& h : neededStateHashes(16, nullptr))
|
||||
{
|
||||
hv.append(to_string(h));
|
||||
}
|
||||
ret[jss::needed_state_hashes] = hv;
|
||||
}
|
||||
|
||||
if (mHaveHeader && !mHaveTransactions)
|
||||
{
|
||||
Json::Value hv(Json::arrayValue);
|
||||
for (auto const& h : neededTxHashes(16, nullptr))
|
||||
{
|
||||
hv.append(to_string(h));
|
||||
}
|
||||
ret[jss::needed_transaction_hashes] = hv;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -110,13 +110,13 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if (inbound->isFailed())
|
||||
if (inbound->hasFailed())
|
||||
return {};
|
||||
|
||||
if (!isNew)
|
||||
inbound->update(seq);
|
||||
|
||||
if (!inbound->isComplete())
|
||||
if (!inbound->hasCompleted())
|
||||
return {};
|
||||
|
||||
if (reason == InboundLedger::Reason::HISTORY)
|
||||
|
||||
@@ -17,49 +17,42 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/ledger/InboundLedgers.h>
|
||||
#include <ripple/app/ledger/InboundTransactions.h>
|
||||
#include <ripple/app/ledger/impl/TransactionAcquire.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/core/JobQueue.h>
|
||||
#include <ripple/protocol/RippleLedgerHash.h>
|
||||
#include <ripple/resource/Fees.h>
|
||||
#include <cassert>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
enum {
|
||||
// Ideal number of peers to start with
|
||||
startPeers = 2,
|
||||
|
||||
// How many rounds to keep a set
|
||||
setKeepRounds = 3,
|
||||
};
|
||||
|
||||
class InboundTransactionSet
|
||||
struct InboundTransactionSet
|
||||
{
|
||||
// A transaction set we generated, acquired, or are acquiring
|
||||
public:
|
||||
std::uint32_t mSeq;
|
||||
TransactionAcquire::pointer mAcquire;
|
||||
std::shared_ptr<SHAMap> mSet;
|
||||
std::shared_ptr<TransactionAcquire> acquire;
|
||||
std::shared_ptr<SHAMap> txset;
|
||||
std::uint32_t seq = 0;
|
||||
|
||||
InboundTransactionSet(std::uint32_t seq, std::shared_ptr<SHAMap> const& set)
|
||||
: mSeq(seq), mSet(set)
|
||||
{
|
||||
;
|
||||
}
|
||||
InboundTransactionSet() : mSeq(0)
|
||||
{
|
||||
;
|
||||
}
|
||||
InboundTransactionSet() = default;
|
||||
InboundTransactionSet(InboundTransactionSet&& other) = default;
|
||||
|
||||
InboundTransactionSet&
|
||||
operator=(InboundTransactionSet&& other) = delete;
|
||||
|
||||
InboundTransactionSet(InboundTransactionSet const& other) = delete;
|
||||
InboundTransactionSet&
|
||||
operator=(InboundTransactionSet const& other) = delete;
|
||||
};
|
||||
|
||||
class InboundTransactionsImp : public InboundTransactions
|
||||
{
|
||||
/** The initial number of peers to query when fetching a transaction set. */
|
||||
static constexpr int const startPeers = 2;
|
||||
|
||||
/** How many rounds to keep an inbound transaction set for */
|
||||
static constexpr std::uint32_t const setKeepRounds = 3;
|
||||
|
||||
public:
|
||||
InboundTransactionsImp(
|
||||
Application& app,
|
||||
@@ -67,112 +60,78 @@ public:
|
||||
std::function<void(std::shared_ptr<SHAMap> const&, bool)> gotSet,
|
||||
std::unique_ptr<PeerSetBuilder> peerSetBuilder)
|
||||
: app_(app)
|
||||
, m_seq(0)
|
||||
, m_zeroSet(m_map[uint256()])
|
||||
, m_gotSet(std::move(gotSet))
|
||||
, m_peerSetBuilder(std::move(peerSetBuilder))
|
||||
, j_(app_.journal("InboundTransactions"))
|
||||
, peerSetBuilder_(std::move(peerSetBuilder))
|
||||
{
|
||||
m_zeroSet.mSet = std::make_shared<SHAMap>(
|
||||
emptyMap_ = std::make_shared<SHAMap>(
|
||||
SHAMapType::TRANSACTION, uint256(), app_.getNodeFamily());
|
||||
m_zeroSet.mSet->setUnbacked();
|
||||
}
|
||||
|
||||
TransactionAcquire::pointer
|
||||
getAcquire(uint256 const& hash)
|
||||
{
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
|
||||
auto it = m_map.find(hash);
|
||||
|
||||
if (it != m_map.end())
|
||||
return it->second.mAcquire;
|
||||
}
|
||||
return {};
|
||||
emptyMap_->setUnbacked();
|
||||
}
|
||||
|
||||
std::shared_ptr<SHAMap>
|
||||
getSet(uint256 const& hash, bool acquire) override
|
||||
{
|
||||
TransactionAcquire::pointer ta;
|
||||
if (hash.isZero())
|
||||
return emptyMap_;
|
||||
|
||||
std::shared_ptr<TransactionAcquire> ta;
|
||||
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
std::lock_guard sl(lock_);
|
||||
|
||||
if (stopping_)
|
||||
return {};
|
||||
|
||||
if (auto it = m_map.find(hash); it != m_map.end())
|
||||
{
|
||||
if (acquire)
|
||||
{
|
||||
it->second.mSeq = m_seq;
|
||||
if (it->second.mAcquire)
|
||||
{
|
||||
it->second.mAcquire->stillNeed();
|
||||
}
|
||||
it->second.seq = m_seq;
|
||||
|
||||
if (it->second.acquire)
|
||||
it->second.acquire->stillNeed();
|
||||
}
|
||||
return it->second.mSet;
|
||||
|
||||
return it->second.txset;
|
||||
}
|
||||
|
||||
if (!acquire || stopping_)
|
||||
return std::shared_ptr<SHAMap>();
|
||||
if (acquire)
|
||||
{
|
||||
ta = std::make_shared<TransactionAcquire>(
|
||||
app_, hash, peerSetBuilder_->build());
|
||||
|
||||
ta = std::make_shared<TransactionAcquire>(
|
||||
app_, hash, m_peerSetBuilder->build());
|
||||
|
||||
auto& obj = m_map[hash];
|
||||
obj.mAcquire = ta;
|
||||
obj.mSeq = m_seq;
|
||||
auto& obj = m_map[hash];
|
||||
obj.acquire = ta;
|
||||
obj.seq = m_seq;
|
||||
}
|
||||
}
|
||||
|
||||
ta->init(startPeers);
|
||||
if (ta)
|
||||
ta->init(startPeers);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
/** We received a TMLedgerData from a peer.
|
||||
*/
|
||||
/** We received data from a peer. */
|
||||
void
|
||||
gotData(
|
||||
LedgerHash const& hash,
|
||||
uint256 const& hash,
|
||||
std::shared_ptr<Peer> peer,
|
||||
std::shared_ptr<protocol::TMLedgerData> packet_ptr) override
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> const& data) override
|
||||
{
|
||||
protocol::TMLedgerData& packet = *packet_ptr;
|
||||
assert(!data.empty());
|
||||
|
||||
JLOG(j_.trace()) << "Got data (" << packet.nodes().size()
|
||||
<< ") for acquiring ledger: " << hash;
|
||||
|
||||
TransactionAcquire::pointer ta = getAcquire(hash);
|
||||
|
||||
if (ta == nullptr)
|
||||
{
|
||||
peer->charge(Resource::feeUnwantedData);
|
||||
if (hash.isZero())
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> data;
|
||||
data.reserve(packet.nodes().size());
|
||||
auto ta = [this, &hash]() -> std::shared_ptr<TransactionAcquire> {
|
||||
std::lock_guard sl(lock_);
|
||||
if (auto it = m_map.find(hash); it != m_map.end())
|
||||
return it->second.acquire;
|
||||
return {};
|
||||
}();
|
||||
|
||||
for (auto const& node : packet.nodes())
|
||||
{
|
||||
if (!node.has_nodeid() || !node.has_nodedata())
|
||||
{
|
||||
peer->charge(Resource::feeInvalidRequest);
|
||||
return;
|
||||
}
|
||||
|
||||
auto const id = deserializeSHAMapNodeID(node.nodeid());
|
||||
|
||||
if (!id)
|
||||
{
|
||||
peer->charge(Resource::feeBadData);
|
||||
return;
|
||||
}
|
||||
|
||||
data.emplace_back(std::make_pair(*id, makeSlice(node.nodedata())));
|
||||
}
|
||||
|
||||
if (!ta->takeNodes(data, peer).isUseful())
|
||||
if (!ta || !ta->takeNodes(data, peer).isUseful())
|
||||
peer->charge(Resource::feeUnwantedData);
|
||||
}
|
||||
|
||||
@@ -182,22 +141,28 @@ public:
|
||||
std::shared_ptr<SHAMap> const& set,
|
||||
bool fromAcquire) override
|
||||
{
|
||||
if (hash.isZero())
|
||||
return;
|
||||
|
||||
bool isNew = true;
|
||||
|
||||
{
|
||||
std::lock_guard sl(mLock);
|
||||
std::lock_guard sl(lock_);
|
||||
|
||||
if (stopping_)
|
||||
return;
|
||||
|
||||
auto& inboundSet = m_map[hash];
|
||||
|
||||
if (inboundSet.mSeq < m_seq)
|
||||
inboundSet.mSeq = m_seq;
|
||||
if (inboundSet.seq < m_seq)
|
||||
inboundSet.seq = m_seq;
|
||||
|
||||
if (inboundSet.mSet)
|
||||
if (inboundSet.txset)
|
||||
isNew = false;
|
||||
else
|
||||
inboundSet.mSet = set;
|
||||
inboundSet.txset = set;
|
||||
|
||||
inboundSet.mAcquire.reset();
|
||||
inboundSet.acquire.reset();
|
||||
}
|
||||
|
||||
if (isNew)
|
||||
@@ -207,24 +172,23 @@ public:
|
||||
void
|
||||
newRound(std::uint32_t seq) override
|
||||
{
|
||||
std::lock_guard lock(mLock);
|
||||
assert(
|
||||
seq <= std::numeric_limits<std::uint32_t>::max() - setKeepRounds);
|
||||
|
||||
// Protect zero set from expiration
|
||||
m_zeroSet.mSeq = seq;
|
||||
std::lock_guard lock(lock_);
|
||||
|
||||
if (m_seq != seq)
|
||||
if (!stopping_ && m_seq != seq)
|
||||
{
|
||||
m_seq = seq;
|
||||
|
||||
auto it = m_map.begin();
|
||||
|
||||
std::uint32_t const minSeq =
|
||||
(seq < setKeepRounds) ? 0 : (seq - setKeepRounds);
|
||||
std::uint32_t maxSeq = seq + setKeepRounds;
|
||||
std::uint32_t const maxSeq = seq + setKeepRounds;
|
||||
std::uint32_t const minSeq = seq - std::min(seq, setKeepRounds);
|
||||
|
||||
while (it != m_map.end())
|
||||
{
|
||||
if (it->second.mSeq < minSeq || it->second.mSeq > maxSeq)
|
||||
if (it->second.seq < minSeq || it->second.seq > maxSeq)
|
||||
it = m_map.erase(it);
|
||||
else
|
||||
++it;
|
||||
@@ -235,36 +199,32 @@ public:
|
||||
void
|
||||
stop() override
|
||||
{
|
||||
std::lock_guard lock(mLock);
|
||||
std::lock_guard lock(lock_);
|
||||
stopping_ = true;
|
||||
m_map.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
using MapType = hash_map<uint256, InboundTransactionSet>;
|
||||
|
||||
Application& app_;
|
||||
|
||||
std::recursive_mutex mLock;
|
||||
std::mutex lock_;
|
||||
|
||||
bool stopping_{false};
|
||||
MapType m_map;
|
||||
std::uint32_t m_seq;
|
||||
hash_map<uint256, InboundTransactionSet> m_map;
|
||||
|
||||
// The empty transaction set whose hash is zero
|
||||
InboundTransactionSet& m_zeroSet;
|
||||
// The empty transaction set (whose hash is zero)
|
||||
std::shared_ptr<SHAMap> emptyMap_;
|
||||
|
||||
std::function<void(std::shared_ptr<SHAMap> const&, bool)> m_gotSet;
|
||||
|
||||
std::unique_ptr<PeerSetBuilder> m_peerSetBuilder;
|
||||
std::unique_ptr<PeerSetBuilder> peerSetBuilder_;
|
||||
|
||||
beast::Journal j_;
|
||||
std::uint32_t m_seq = 0;
|
||||
|
||||
bool stopping_ = false;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
InboundTransactions::~InboundTransactions() = default;
|
||||
|
||||
std::unique_ptr<InboundTransactions>
|
||||
make_InboundTransactions(
|
||||
Application& app,
|
||||
|
||||
@@ -39,8 +39,9 @@ LedgerDeltaAcquire::LedgerDeltaAcquire(
|
||||
ledgerHash,
|
||||
LedgerReplayParameters::SUB_TASK_TIMEOUT,
|
||||
{jtREPLAY_TASK,
|
||||
"LedgerReplayDelta",
|
||||
LedgerReplayParameters::MAX_QUEUED_TASKS},
|
||||
LedgerReplayParameters::MAX_QUEUED_TASKS,
|
||||
"LedgerReplayDelta"},
|
||||
0,
|
||||
app.journal("LedgerReplayDelta"))
|
||||
, inboundLedgers_(inboundLedgers)
|
||||
, ledgerSeq_(ledgerSeq)
|
||||
@@ -71,13 +72,13 @@ LedgerDeltaAcquire::trigger(std::size_t limit, ScopedLockType& sl)
|
||||
fullLedger_ = app_.getLedgerMaster().getLedgerByHash(hash_);
|
||||
if (fullLedger_)
|
||||
{
|
||||
complete_ = true;
|
||||
markComplete();
|
||||
JLOG(journal_.trace()) << "existing ledger " << hash_;
|
||||
notify(sl);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fallBack_)
|
||||
if (userdata_ <= LedgerReplayParameters::MAX_NO_FEATURE_PEER_COUNT)
|
||||
{
|
||||
peerSet_->addPeers(
|
||||
limit,
|
||||
@@ -93,22 +94,20 @@ LedgerDeltaAcquire::trigger(std::size_t limit, ScopedLockType& sl)
|
||||
protocol::TMReplayDeltaRequest request;
|
||||
request.set_ledgerhash(hash_.data(), hash_.size());
|
||||
peerSet_->sendRequest(request, peer);
|
||||
return;
|
||||
}
|
||||
else
|
||||
|
||||
if (++userdata_ ==
|
||||
LedgerReplayParameters::MAX_NO_FEATURE_PEER_COUNT)
|
||||
{
|
||||
if (++noFeaturePeerCount >=
|
||||
LedgerReplayParameters::MAX_NO_FEATURE_PEER_COUNT)
|
||||
{
|
||||
JLOG(journal_.debug()) << "Fall back for " << hash_;
|
||||
timerInterval_ =
|
||||
LedgerReplayParameters::SUB_TASK_FALLBACK_TIMEOUT;
|
||||
fallBack_ = true;
|
||||
}
|
||||
JLOG(journal_.debug()) << "Fall back for " << hash_;
|
||||
timerInterval_ =
|
||||
LedgerReplayParameters::SUB_TASK_FALLBACK_TIMEOUT;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (fallBack_)
|
||||
if (userdata_ > LedgerReplayParameters::MAX_NO_FEATURE_PEER_COUNT)
|
||||
inboundLedgers_.acquire(
|
||||
hash_, ledgerSeq_, InboundLedger::Reason::GENERIC);
|
||||
}
|
||||
@@ -119,7 +118,7 @@ LedgerDeltaAcquire::onTimer(bool progress, ScopedLockType& sl)
|
||||
JLOG(journal_.trace()) << "mTimeouts=" << timeouts_ << " for " << hash_;
|
||||
if (timeouts_ > LedgerReplayParameters::SUB_TASK_MAX_TIMEOUTS)
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
JLOG(journal_.debug()) << "too many timeouts " << hash_;
|
||||
notify(sl);
|
||||
}
|
||||
@@ -152,7 +151,7 @@ LedgerDeltaAcquire::processData(
|
||||
std::make_shared<Ledger>(info, app_.config(), app_.getNodeFamily());
|
||||
if (replayTemp_)
|
||||
{
|
||||
complete_ = true;
|
||||
markComplete();
|
||||
orderedTxns_ = std::move(orderedTxns);
|
||||
JLOG(journal_.debug()) << "ready to replay " << hash_;
|
||||
notify(sl);
|
||||
@@ -160,7 +159,7 @@ LedgerDeltaAcquire::processData(
|
||||
}
|
||||
}
|
||||
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
JLOG(journal_.error())
|
||||
<< "failed to create a (info only) ledger from verified data " << hash_;
|
||||
notify(sl);
|
||||
@@ -196,7 +195,7 @@ LedgerDeltaAcquire::tryBuild(std::shared_ptr<Ledger const> const& parent)
|
||||
if (fullLedger_)
|
||||
return fullLedger_;
|
||||
|
||||
if (failed_ || !complete_ || !replayTemp_)
|
||||
if (hasFailed() || !hasCompleted() || !replayTemp_)
|
||||
return {};
|
||||
|
||||
assert(parent->seq() + 1 == replayTemp_->seq());
|
||||
@@ -212,8 +211,7 @@ LedgerDeltaAcquire::tryBuild(std::shared_ptr<Ledger const> const& parent)
|
||||
}
|
||||
else
|
||||
{
|
||||
failed_ = true;
|
||||
complete_ = false;
|
||||
markFailed();
|
||||
JLOG(journal_.error()) << "tryBuild failed " << hash_ << " with parent "
|
||||
<< parent->info().hash;
|
||||
Throw<std::runtime_error>("Cannot replay ledger");
|
||||
@@ -265,7 +263,7 @@ LedgerDeltaAcquire::notify(ScopedLockType& sl)
|
||||
assert(isDone());
|
||||
std::vector<OnDeltaDataCB> toCall;
|
||||
std::swap(toCall, dataReadyCallbacks_);
|
||||
auto const good = !failed_;
|
||||
auto const good = !hasFailed();
|
||||
sl.unlock();
|
||||
|
||||
for (auto& cb : toCall)
|
||||
|
||||
@@ -154,8 +154,6 @@ private:
|
||||
std::map<std::uint32_t, std::shared_ptr<STTx const>> orderedTxns_;
|
||||
std::vector<OnDeltaDataCB> dataReadyCallbacks_;
|
||||
std::set<InboundLedger::Reason> reasons_;
|
||||
std::uint32_t noFeaturePeerCount = 0;
|
||||
bool fallBack_ = false;
|
||||
|
||||
friend class LedgerReplayTask; // for asserts only
|
||||
friend class test::LedgerReplayClient;
|
||||
|
||||
@@ -92,8 +92,9 @@ LedgerReplayTask::LedgerReplayTask(
|
||||
parameter.finishHash_,
|
||||
LedgerReplayParameters::TASK_TIMEOUT,
|
||||
{jtREPLAY_TASK,
|
||||
"LedgerReplayTask",
|
||||
LedgerReplayParameters::MAX_QUEUED_TASKS},
|
||||
LedgerReplayParameters::MAX_QUEUED_TASKS,
|
||||
"LedgerReplayTask"},
|
||||
0,
|
||||
app.journal("LedgerReplayTask"))
|
||||
, inboundLedgers_(inboundLedgers)
|
||||
, replayer_(replayer)
|
||||
@@ -213,12 +214,12 @@ LedgerReplayTask::tryAdvance(ScopedLockType& sl)
|
||||
return;
|
||||
}
|
||||
|
||||
complete_ = true;
|
||||
markComplete();
|
||||
JLOG(journal_.info()) << "Completed " << hash_;
|
||||
}
|
||||
catch (std::runtime_error const&)
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,7 +236,7 @@ LedgerReplayTask::updateSkipList(
|
||||
if (!parameter_.update(hash, seq, sList))
|
||||
{
|
||||
JLOG(journal_.error()) << "Parameter update failed " << hash_;
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -252,7 +253,7 @@ LedgerReplayTask::onTimer(bool progress, ScopedLockType& sl)
|
||||
JLOG(journal_.trace()) << "mTimeouts=" << timeouts_ << " for " << hash_;
|
||||
if (timeouts_ > maxTimeouts_)
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
JLOG(journal_.debug())
|
||||
<< "LedgerReplayTask Failed, too many timeouts " << hash_;
|
||||
}
|
||||
|
||||
@@ -36,8 +36,9 @@ SkipListAcquire::SkipListAcquire(
|
||||
ledgerHash,
|
||||
LedgerReplayParameters::SUB_TASK_TIMEOUT,
|
||||
{jtREPLAY_TASK,
|
||||
"SkipListAcquire",
|
||||
LedgerReplayParameters::MAX_QUEUED_TASKS},
|
||||
LedgerReplayParameters::MAX_QUEUED_TASKS,
|
||||
"SkipListAcquire"},
|
||||
0,
|
||||
app.journal("LedgerReplaySkipList"))
|
||||
, inboundLedgers_(inboundLedgers)
|
||||
, peerSet_(std::move(peerSet))
|
||||
@@ -71,7 +72,7 @@ SkipListAcquire::trigger(std::size_t limit, ScopedLockType& sl)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fallBack_)
|
||||
if (userdata_ <= LedgerReplayParameters::MAX_NO_FEATURE_PEER_COUNT)
|
||||
{
|
||||
peerSet_->addPeers(
|
||||
limit,
|
||||
@@ -91,24 +92,22 @@ SkipListAcquire::trigger(std::size_t limit, ScopedLockType& sl)
|
||||
request.set_type(
|
||||
protocol::TMLedgerMapType::lmACCOUNT_STATE);
|
||||
peerSet_->sendRequest(request, peer);
|
||||
return;
|
||||
}
|
||||
else
|
||||
|
||||
JLOG(journal_.trace()) << "Add a no feature peer " << peer->id()
|
||||
<< " for " << hash_;
|
||||
if (++userdata_ ==
|
||||
LedgerReplayParameters::MAX_NO_FEATURE_PEER_COUNT)
|
||||
{
|
||||
JLOG(journal_.trace()) << "Add a no feature peer "
|
||||
<< peer->id() << " for " << hash_;
|
||||
if (++noFeaturePeerCount_ >=
|
||||
LedgerReplayParameters::MAX_NO_FEATURE_PEER_COUNT)
|
||||
{
|
||||
JLOG(journal_.debug()) << "Fall back for " << hash_;
|
||||
timerInterval_ =
|
||||
LedgerReplayParameters::SUB_TASK_FALLBACK_TIMEOUT;
|
||||
fallBack_ = true;
|
||||
}
|
||||
JLOG(journal_.debug()) << "Fall back for " << hash_;
|
||||
timerInterval_ =
|
||||
LedgerReplayParameters::SUB_TASK_FALLBACK_TIMEOUT;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (fallBack_)
|
||||
if (userdata_ > LedgerReplayParameters::MAX_NO_FEATURE_PEER_COUNT)
|
||||
inboundLedgers_.acquire(hash_, 0, InboundLedger::Reason::GENERIC);
|
||||
}
|
||||
|
||||
@@ -118,7 +117,7 @@ SkipListAcquire::onTimer(bool progress, ScopedLockType& sl)
|
||||
JLOG(journal_.trace()) << "mTimeouts=" << timeouts_ << " for " << hash_;
|
||||
if (timeouts_ > LedgerReplayParameters::SUB_TASK_MAX_TIMEOUTS)
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
JLOG(journal_.debug()) << "too many timeouts " << hash_;
|
||||
notify(sl);
|
||||
}
|
||||
@@ -161,7 +160,7 @@ SkipListAcquire::processData(
|
||||
{
|
||||
}
|
||||
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
JLOG(journal_.error()) << "failed to retrieve Skip list from verified data "
|
||||
<< hash_;
|
||||
notify(sl);
|
||||
@@ -203,7 +202,7 @@ SkipListAcquire::retrieveSkipList(
|
||||
}
|
||||
}
|
||||
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
JLOG(journal_.error()) << "failed to retrieve Skip list from a ledger "
|
||||
<< hash_;
|
||||
notify(sl);
|
||||
@@ -215,7 +214,7 @@ SkipListAcquire::onSkipListAcquired(
|
||||
std::uint32_t ledgerSeq,
|
||||
ScopedLockType& sl)
|
||||
{
|
||||
complete_ = true;
|
||||
markComplete();
|
||||
data_ = std::make_shared<SkipListData>(ledgerSeq, skipList);
|
||||
JLOG(journal_.debug()) << "Skip list acquired " << hash_;
|
||||
notify(sl);
|
||||
@@ -227,7 +226,7 @@ SkipListAcquire::notify(ScopedLockType& sl)
|
||||
assert(isDone());
|
||||
std::vector<OnSkipListDataCB> toCall;
|
||||
std::swap(toCall, dataReadyCallbacks_);
|
||||
auto const good = !failed_;
|
||||
auto const good = !hasFailed();
|
||||
sl.unlock();
|
||||
|
||||
for (auto& cb : toCall)
|
||||
|
||||
@@ -157,8 +157,6 @@ private:
|
||||
std::unique_ptr<PeerSet> peerSet_;
|
||||
std::vector<OnSkipListDataCB> dataReadyCallbacks_;
|
||||
std::shared_ptr<SkipListData const> data_;
|
||||
std::uint32_t noFeaturePeerCount_ = 0;
|
||||
bool fallBack_ = false;
|
||||
|
||||
friend class test::LedgerReplayClient;
|
||||
};
|
||||
|
||||
@@ -31,16 +31,14 @@ TimeoutCounter::TimeoutCounter(
|
||||
uint256 const& hash,
|
||||
std::chrono::milliseconds interval,
|
||||
QueueJobParameter&& jobParameter,
|
||||
std::uint8_t userdata,
|
||||
beast::Journal journal)
|
||||
: app_(app)
|
||||
, journal_(journal)
|
||||
, hash_(hash)
|
||||
, timeouts_(0)
|
||||
, complete_(false)
|
||||
, failed_(false)
|
||||
, progress_(false)
|
||||
, timerInterval_(interval)
|
||||
, queueJobParameter_(std::move(jobParameter))
|
||||
, hash_(hash)
|
||||
, userdata_(userdata)
|
||||
, timer_(app_.getIOService())
|
||||
{
|
||||
assert((timerInterval_ > 10ms) && (timerInterval_ < 30s));
|
||||
@@ -70,6 +68,7 @@ TimeoutCounter::queueJob(ScopedLockType& sl)
|
||||
{
|
||||
if (isDone())
|
||||
return;
|
||||
|
||||
if (queueJobParameter_.jobLimit &&
|
||||
app_.getJobQueue().getJobCountTotal(queueJobParameter_.jobType) >=
|
||||
queueJobParameter_.jobLimit)
|
||||
@@ -85,42 +84,41 @@ TimeoutCounter::queueJob(ScopedLockType& sl)
|
||||
queueJobParameter_.jobName,
|
||||
[wptr = pmDowncast()]() {
|
||||
if (auto sptr = wptr.lock(); sptr)
|
||||
sptr->invokeOnTimer();
|
||||
{
|
||||
ScopedLockType sl(sptr->mtx_);
|
||||
|
||||
if (sptr->isDone())
|
||||
return;
|
||||
|
||||
auto const progress = [&sptr]() {
|
||||
if (sptr->state_.fetch_and(~PROGRESSING) & PROGRESSING)
|
||||
return true;
|
||||
return false;
|
||||
}();
|
||||
|
||||
if (!progress)
|
||||
{
|
||||
++sptr->timeouts_;
|
||||
JLOG(sptr->journal_.debug())
|
||||
<< "Acquiring " << sptr->hash_ << ": timeout ("
|
||||
<< sptr->timeouts_ << ")";
|
||||
}
|
||||
|
||||
sptr->onTimer(progress, sl);
|
||||
|
||||
if (!sptr->isDone())
|
||||
sptr->setTimer(sl);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
TimeoutCounter::invokeOnTimer()
|
||||
{
|
||||
ScopedLockType sl(mtx_);
|
||||
|
||||
if (isDone())
|
||||
return;
|
||||
|
||||
if (!progress_)
|
||||
{
|
||||
++timeouts_;
|
||||
JLOG(journal_.debug()) << "Timeout(" << timeouts_ << ") "
|
||||
<< " acquiring " << hash_;
|
||||
onTimer(false, sl);
|
||||
}
|
||||
else
|
||||
{
|
||||
progress_ = false;
|
||||
onTimer(true, sl);
|
||||
}
|
||||
|
||||
if (!isDone())
|
||||
setTimer(sl);
|
||||
}
|
||||
|
||||
void
|
||||
TimeoutCounter::cancel()
|
||||
{
|
||||
ScopedLockType sl(mtx_);
|
||||
if (!isDone())
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
JLOG(journal_.info()) << "Cancel " << hash_;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <ripple/core/Job.h>
|
||||
#include <boost/asio/basic_waitable_timer.hpp>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
@@ -40,11 +41,11 @@ namespace ripple {
|
||||
1. The entry point is `setTimer`.
|
||||
|
||||
2. After `mTimerInterval`, `queueJob` is called, which schedules a job to
|
||||
call `invokeOnTimer` (or loops back to setTimer if there are too many
|
||||
concurrent jobs).
|
||||
do some housekeeping and invoke the timer callback (or loops back to
|
||||
setTimer if there are too many concurrent jobs).
|
||||
|
||||
3. The job queue calls `invokeOnTimer` which either breaks the loop if
|
||||
`isDone` or calls `onTimer`.
|
||||
3. The job queue performs housekeeping and either breaks the loop if
|
||||
`isDone` or calls the virtual `onTimer`.
|
||||
|
||||
4. `onTimer` is the only "real" virtual method in this class. It is the
|
||||
callback for when the timeout expires. Generally, its only responsibility
|
||||
@@ -53,7 +54,7 @@ namespace ripple {
|
||||
timeouts.
|
||||
|
||||
5. Once `onTimer` returns, if the object is still not `isDone`, then
|
||||
`invokeOnTimer` sets another timeout by looping back to setTimer.
|
||||
we set another timeout by looping back to `setTimer`.
|
||||
|
||||
This loop executes concurrently with another asynchronous sequence,
|
||||
implemented by the subtype, that is trying to make progress and eventually
|
||||
@@ -77,14 +78,32 @@ public:
|
||||
virtual void
|
||||
cancel();
|
||||
|
||||
bool
|
||||
hasFailed() const
|
||||
{
|
||||
return (state_ & FAILED) == FAILED;
|
||||
}
|
||||
|
||||
/** True if the object has completed its work. */
|
||||
bool
|
||||
hasCompleted() const
|
||||
{
|
||||
return (state_ & COMPLETED) == COMPLETED;
|
||||
}
|
||||
|
||||
protected:
|
||||
using ScopedLockType = std::unique_lock<std::recursive_mutex>;
|
||||
|
||||
struct QueueJobParameter
|
||||
{
|
||||
JobType jobType;
|
||||
std::string jobName;
|
||||
std::optional<std::uint32_t> jobLimit;
|
||||
JobType const jobType;
|
||||
|
||||
// The maximum number of jobs of this type already queued for us to
|
||||
// queue a job.
|
||||
std::uint32_t const jobLimit;
|
||||
|
||||
// The description of the job
|
||||
std::string const jobName;
|
||||
};
|
||||
|
||||
TimeoutCounter(
|
||||
@@ -92,6 +111,7 @@ protected:
|
||||
uint256 const& targetHash,
|
||||
std::chrono::milliseconds timeoutInterval,
|
||||
QueueJobParameter&& jobParameter,
|
||||
std::uint8_t userdata,
|
||||
beast::Journal journal);
|
||||
|
||||
virtual ~TimeoutCounter() = default;
|
||||
@@ -100,11 +120,11 @@ protected:
|
||||
void
|
||||
setTimer(ScopedLockType&);
|
||||
|
||||
/** Queue a job to call invokeOnTimer(). */
|
||||
/** Queue a job to call the timer callback. */
|
||||
void
|
||||
queueJob(ScopedLockType&);
|
||||
|
||||
/** Hook called from invokeOnTimer(). */
|
||||
/** Hook called from when the timer. */
|
||||
virtual void
|
||||
onTimer(bool progress, ScopedLockType&) = 0;
|
||||
|
||||
@@ -115,34 +135,68 @@ protected:
|
||||
bool
|
||||
isDone() const
|
||||
{
|
||||
return complete_ || failed_;
|
||||
return (state_ & (COMPLETED | FAILED)) != 0;
|
||||
}
|
||||
|
||||
/** True if the object has made progress since the last time we checked. */
|
||||
bool
|
||||
hasProgressed() const
|
||||
{
|
||||
return PROGRESSING == (state_ & PROGRESSING);
|
||||
}
|
||||
|
||||
/** Indicate that this object has completed its work. */
|
||||
void
|
||||
markComplete()
|
||||
{
|
||||
state_ = (state_ & ~FAILED) | COMPLETED;
|
||||
}
|
||||
|
||||
void
|
||||
markFailed()
|
||||
{
|
||||
state_ = (state_ & ~COMPLETED) | FAILED;
|
||||
}
|
||||
|
||||
/** Indicate that this object made progress. */
|
||||
void
|
||||
makeProgress()
|
||||
{
|
||||
state_ |= PROGRESSING;
|
||||
}
|
||||
|
||||
// Used in this class for access to boost::asio::io_service and
|
||||
// ripple::Overlay. Used in subtypes for the kitchen sink.
|
||||
Application& app_;
|
||||
beast::Journal journal_;
|
||||
mutable std::recursive_mutex mtx_;
|
||||
|
||||
/** The hash of the object (in practice, always a ledger) we are trying to
|
||||
* fetch. */
|
||||
uint256 const hash_;
|
||||
int timeouts_;
|
||||
bool complete_;
|
||||
bool failed_;
|
||||
/** Whether forward progress has been made. */
|
||||
bool progress_;
|
||||
/** The minimum time to wait between calls to execute(). */
|
||||
std::chrono::milliseconds timerInterval_;
|
||||
|
||||
QueueJobParameter queueJobParameter_;
|
||||
|
||||
/** The hash of the object (typically a ledger) we are trying to fetch. */
|
||||
uint256 const hash_;
|
||||
|
||||
mutable std::recursive_mutex mtx_;
|
||||
|
||||
std::uint16_t timeouts_ = 0;
|
||||
|
||||
/** A small amount of data for derived classes to use as needed. */
|
||||
std::atomic<std::uint8_t> userdata_ = 0;
|
||||
|
||||
private:
|
||||
/** Calls onTimer() if in the right state.
|
||||
* Only called by queueJob().
|
||||
*/
|
||||
void
|
||||
invokeOnTimer();
|
||||
/** Used to track the current state of the object. */
|
||||
std::atomic<std::uint8_t> state_ = 0;
|
||||
|
||||
/** If set, the acquisition has been completed. */
|
||||
static constexpr std::uint8_t const COMPLETED = 0x01;
|
||||
|
||||
/** If set, the acquisition has failed. */
|
||||
static constexpr std::uint8_t const FAILED = 0x02;
|
||||
|
||||
/** If set, the acquisition has made some progress. */
|
||||
static constexpr std::uint8_t const PROGRESSING = 0x04;
|
||||
|
||||
boost::asio::basic_waitable_timer<std::chrono::steady_clock> timer_;
|
||||
};
|
||||
|
||||
@@ -35,10 +35,11 @@ using namespace std::chrono_literals;
|
||||
// Timeout interval in milliseconds
|
||||
auto constexpr TX_ACQUIRE_TIMEOUT = 250ms;
|
||||
|
||||
enum {
|
||||
NORM_TIMEOUTS = 4,
|
||||
MAX_TIMEOUTS = 20,
|
||||
};
|
||||
// If set, we have acquired the root of the TX SHAMap
|
||||
static constexpr std::uint8_t const TX_HAVE_ROOT = 0x01;
|
||||
|
||||
static constexpr std::uint16_t const TX_NORM_TIMEOUTS = 4;
|
||||
static constexpr std::uint16_t const TX_MAX_TIMEOUTS = 20;
|
||||
|
||||
TransactionAcquire::TransactionAcquire(
|
||||
Application& app,
|
||||
@@ -48,9 +49,9 @@ TransactionAcquire::TransactionAcquire(
|
||||
app,
|
||||
hash,
|
||||
TX_ACQUIRE_TIMEOUT,
|
||||
{jtTXN_DATA, "TransactionAcquire", {}},
|
||||
{jtTXN_DATA, 0, "TransactionAcquire"},
|
||||
0,
|
||||
app.journal("TransactionAcquire"))
|
||||
, mHaveRoot(false)
|
||||
, mPeerSet(std::move(peerSet))
|
||||
{
|
||||
mMap = std::make_shared<SHAMap>(
|
||||
@@ -61,43 +62,36 @@ TransactionAcquire::TransactionAcquire(
|
||||
void
|
||||
TransactionAcquire::done()
|
||||
{
|
||||
// We hold a PeerSet lock and so cannot do real work here
|
||||
|
||||
if (failed_)
|
||||
if (hasFailed())
|
||||
{
|
||||
JLOG(journal_.debug()) << "Failed to acquire TX set " << hash_;
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
JLOG(journal_.debug()) << "Acquired TX set " << hash_;
|
||||
mMap->setImmutable();
|
||||
|
||||
uint256 const& hash(hash_);
|
||||
std::shared_ptr<SHAMap> const& map(mMap);
|
||||
auto const pap = &app_;
|
||||
// Note that, when we're in the process of shutting down, addJob()
|
||||
// may reject the request. If that happens then giveSet() will
|
||||
// not be called. That's fine. According to David the giveSet() call
|
||||
// just updates the consensus and related structures when we acquire
|
||||
// a transaction set. No need to update them if we're shutting down.
|
||||
app_.getJobQueue().addJob(
|
||||
jtTXN_DATA, "completeAcquire", [pap, hash, map]() {
|
||||
pap->getInboundTransactions().giveSet(hash, map, true);
|
||||
});
|
||||
}
|
||||
JLOG(journal_.debug()) << "Acquired TX set " << hash_;
|
||||
mMap->setImmutable();
|
||||
|
||||
// If we are in the process of shutting down, the job queue may refuse
|
||||
// to queue the job; that's fine.
|
||||
app_.getJobQueue().addJob(
|
||||
jtTXN_DATA,
|
||||
"completeAcquire",
|
||||
[&app = app_, hash = hash_, map = mMap]() {
|
||||
app.getInboundTransactions().giveSet(hash, map, true);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
TransactionAcquire::onTimer(bool progress, ScopedLockType& psl)
|
||||
{
|
||||
if (timeouts_ > MAX_TIMEOUTS)
|
||||
if (timeouts_ > TX_MAX_TIMEOUTS)
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
done();
|
||||
return;
|
||||
}
|
||||
|
||||
if (timeouts_ >= NORM_TIMEOUTS)
|
||||
if (timeouts_ >= TX_NORM_TIMEOUTS)
|
||||
trigger(nullptr);
|
||||
|
||||
addPeers(1);
|
||||
@@ -112,18 +106,18 @@ TransactionAcquire::pmDowncast()
|
||||
void
|
||||
TransactionAcquire::trigger(std::shared_ptr<Peer> const& peer)
|
||||
{
|
||||
if (complete_)
|
||||
if (hasCompleted())
|
||||
{
|
||||
JLOG(journal_.info()) << "trigger after complete";
|
||||
return;
|
||||
}
|
||||
if (failed_)
|
||||
if (hasFailed())
|
||||
{
|
||||
JLOG(journal_.info()) << "trigger after fail";
|
||||
return;
|
||||
}
|
||||
|
||||
if (!mHaveRoot)
|
||||
if (!(userdata_ & TX_HAVE_ROOT))
|
||||
{
|
||||
JLOG(journal_.trace()) << "TransactionAcquire::trigger "
|
||||
<< (peer ? "havePeer" : "noPeer") << " no root";
|
||||
@@ -140,7 +134,7 @@ TransactionAcquire::trigger(std::shared_ptr<Peer> const& peer)
|
||||
}
|
||||
else if (!mMap->isValid())
|
||||
{
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
done();
|
||||
}
|
||||
else
|
||||
@@ -151,9 +145,9 @@ TransactionAcquire::trigger(std::shared_ptr<Peer> const& peer)
|
||||
if (nodes.empty())
|
||||
{
|
||||
if (mMap->isValid())
|
||||
complete_ = true;
|
||||
markComplete();
|
||||
else
|
||||
failed_ = true;
|
||||
markFailed();
|
||||
|
||||
done();
|
||||
return;
|
||||
@@ -181,16 +175,16 @@ TransactionAcquire::takeNodes(
|
||||
{
|
||||
ScopedLockType sl(mtx_);
|
||||
|
||||
if (complete_)
|
||||
if (hasCompleted())
|
||||
{
|
||||
JLOG(journal_.trace()) << "TX set complete";
|
||||
return SHAMapAddNode();
|
||||
return {};
|
||||
}
|
||||
|
||||
if (failed_)
|
||||
if (hasFailed())
|
||||
{
|
||||
JLOG(journal_.trace()) << "TX set failed";
|
||||
return SHAMapAddNode();
|
||||
return {};
|
||||
}
|
||||
|
||||
try
|
||||
@@ -204,7 +198,7 @@ TransactionAcquire::takeNodes(
|
||||
{
|
||||
if (d.first.isRoot())
|
||||
{
|
||||
if (mHaveRoot)
|
||||
if (userdata_ & TX_HAVE_ROOT)
|
||||
JLOG(journal_.debug())
|
||||
<< "Got root TXS node, already have it";
|
||||
else if (!mMap->addRootNode(
|
||||
@@ -214,7 +208,7 @@ TransactionAcquire::takeNodes(
|
||||
JLOG(journal_.warn()) << "TX acquire got bad root node";
|
||||
}
|
||||
else
|
||||
mHaveRoot = true;
|
||||
userdata_ |= TX_HAVE_ROOT;
|
||||
}
|
||||
else if (!mMap->addKnownNode(d.first, d.second, &sf).isGood())
|
||||
{
|
||||
@@ -224,7 +218,7 @@ TransactionAcquire::takeNodes(
|
||||
}
|
||||
|
||||
trigger(peer);
|
||||
progress_ = true;
|
||||
makeProgress();
|
||||
return SHAMapAddNode::useful();
|
||||
}
|
||||
catch (std::exception const& ex)
|
||||
@@ -260,8 +254,8 @@ TransactionAcquire::stillNeed()
|
||||
{
|
||||
ScopedLockType sl(mtx_);
|
||||
|
||||
if (timeouts_ > NORM_TIMEOUTS)
|
||||
timeouts_ = NORM_TIMEOUTS;
|
||||
if (timeouts_ > TX_NORM_TIMEOUTS)
|
||||
timeouts_ = TX_NORM_TIMEOUTS;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_APP_LEDGER_TRANSACTIONACQUIRE_H_INCLUDED
|
||||
#define RIPPLE_APP_LEDGER_TRANSACTIONACQUIRE_H_INCLUDED
|
||||
|
||||
#include <ripple/app/ledger/impl/TimeoutCounter.h>
|
||||
#include <ripple/app/main/Application.h>
|
||||
#include <ripple/overlay/PeerSet.h>
|
||||
#include <ripple/shamap/SHAMap.h>
|
||||
@@ -55,12 +56,16 @@ public:
|
||||
|
||||
private:
|
||||
std::shared_ptr<SHAMap> mMap;
|
||||
bool mHaveRoot;
|
||||
std::unique_ptr<PeerSet> mPeerSet;
|
||||
|
||||
void
|
||||
onTimer(bool progress, ScopedLockType& peerSetLock) override;
|
||||
|
||||
/** The have acquired the tx set we were looking to.
|
||||
|
||||
@note Note that this function is called with the PeerSet lock
|
||||
held, so we cannot do real work in it.
|
||||
*/
|
||||
void
|
||||
done();
|
||||
|
||||
@@ -69,6 +74,7 @@ private:
|
||||
|
||||
void
|
||||
trigger(std::shared_ptr<Peer> const&);
|
||||
|
||||
std::weak_ptr<TimeoutCounter>
|
||||
pmDowncast() override;
|
||||
};
|
||||
|
||||
@@ -395,7 +395,8 @@ public:
|
||||
*this,
|
||||
m_collectorManager->collector(),
|
||||
[this](std::shared_ptr<SHAMap> const& set, bool fromAcquire) {
|
||||
gotTXSet(set, fromAcquire);
|
||||
if (set)
|
||||
m_networkOPs->mapComplete(set, fromAcquire);
|
||||
}))
|
||||
|
||||
, m_ledgerReplayer(std::make_unique<LedgerReplayer>(
|
||||
@@ -650,13 +651,6 @@ public:
|
||||
return m_acceptedLedgerCache;
|
||||
}
|
||||
|
||||
void
|
||||
gotTXSet(std::shared_ptr<SHAMap> const& set, bool fromAcquire)
|
||||
{
|
||||
if (set)
|
||||
m_networkOPs->mapComplete(set, fromAcquire);
|
||||
}
|
||||
|
||||
TransactionMaster&
|
||||
getMasterTransaction() override
|
||||
{
|
||||
|
||||
@@ -133,6 +133,9 @@ public:
|
||||
public:
|
||||
Application();
|
||||
|
||||
Application(Application const&) = delete;
|
||||
Application(Application&&) = delete;
|
||||
|
||||
virtual ~Application() = default;
|
||||
|
||||
virtual bool
|
||||
|
||||
@@ -249,8 +249,8 @@ PeerImp::send(std::shared_ptr<Message> const& m)
|
||||
if (detaching_)
|
||||
return;
|
||||
|
||||
auto validator = m->getValidatorKey();
|
||||
if (validator && !squelch_.expireSquelch(*validator))
|
||||
if (auto validator = m->getValidatorKey();
|
||||
validator && !squelch_.expireSquelch(*validator))
|
||||
return;
|
||||
|
||||
overlay_.reportTraffic(
|
||||
@@ -267,30 +267,21 @@ PeerImp::send(std::shared_ptr<Message> const& m)
|
||||
// a small senq periodically
|
||||
large_sendq_ = 0;
|
||||
}
|
||||
else if (auto sink = journal_.debug();
|
||||
sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
|
||||
{
|
||||
std::string const n = name();
|
||||
sink << (n.empty() ? remote_address_.to_string() : n)
|
||||
<< " sendq: " << sendq_size;
|
||||
}
|
||||
|
||||
send_queue_.push(m);
|
||||
|
||||
if (sendq_size != 0)
|
||||
return;
|
||||
|
||||
boost::asio::async_write(
|
||||
stream_,
|
||||
boost::asio::buffer(
|
||||
send_queue_.front()->getBuffer(compressionEnabled_)),
|
||||
bind_executor(
|
||||
strand_,
|
||||
std::bind(
|
||||
&PeerImp::onWriteMessage,
|
||||
shared_from_this(),
|
||||
std::placeholders::_1,
|
||||
std::placeholders::_2)));
|
||||
if (sendq_size == 0)
|
||||
boost::asio::async_write(
|
||||
stream_,
|
||||
boost::asio::buffer(
|
||||
send_queue_.front()->getBuffer(compressionEnabled_)),
|
||||
bind_executor(
|
||||
strand_,
|
||||
std::bind(
|
||||
&PeerImp::onWriteMessage,
|
||||
shared_from_this(),
|
||||
std::placeholders::_1,
|
||||
std::placeholders::_2)));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -359,10 +350,9 @@ PeerImp::charge(Resource::Charge const& fee)
|
||||
bool
|
||||
PeerImp::crawl() const
|
||||
{
|
||||
auto const iter = headers_.find("Crawl");
|
||||
if (iter == headers_.end())
|
||||
return false;
|
||||
return boost::iequals(iter->value(), "public");
|
||||
if (auto const iter = headers_.find("Crawl"); iter != headers_.end())
|
||||
return boost::iequals(iter->value(), "public");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -1985,14 +1975,38 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMLedgerData> const& m)
|
||||
// Otherwise check if received data for a candidate transaction set
|
||||
if (m->type() == protocol::liTS_CANDIDATE)
|
||||
{
|
||||
std::weak_ptr<PeerImp> weak{shared_from_this()};
|
||||
std::vector<std::pair<SHAMapNodeID, Slice>> data;
|
||||
data.reserve(m->nodes().size());
|
||||
|
||||
for (auto const& node : m->nodes())
|
||||
{
|
||||
if (!node.has_nodeid() || !node.has_nodedata())
|
||||
{
|
||||
charge(Resource::feeInvalidRequest);
|
||||
return;
|
||||
}
|
||||
|
||||
auto const id = deserializeSHAMapNodeID(node.nodeid());
|
||||
|
||||
if (!id)
|
||||
{
|
||||
charge(Resource::feeBadData);
|
||||
return;
|
||||
}
|
||||
|
||||
data.emplace_back(*id, makeSlice(node.nodedata()));
|
||||
}
|
||||
|
||||
app_.getJobQueue().addJob(
|
||||
jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
|
||||
if (auto peer = weak.lock())
|
||||
{
|
||||
peer->app_.getInboundTransactions().gotData(
|
||||
ledgerHash, peer, m);
|
||||
}
|
||||
jtTXN_DATA,
|
||||
"recvPeerData",
|
||||
[w = weak_from_this(), ledgerHash, d = std::move(data), m]() {
|
||||
// We capture `m` to keep its data alive, because we're
|
||||
// implicitly referencing it from `d` (it holds slices!)
|
||||
(void)m;
|
||||
|
||||
if (auto p = w.lock())
|
||||
p->app_.getInboundTransactions().gotData(ledgerHash, p, d);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#ifndef RIPPLE_SHAMAP_SHAMAPADDNODE_H_INCLUDED
|
||||
#define RIPPLE_SHAMAP_SHAMAPADDNODE_H_INCLUDED
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
namespace ripple {
|
||||
@@ -28,102 +29,97 @@ namespace ripple {
|
||||
class SHAMapAddNode
|
||||
{
|
||||
private:
|
||||
int mGood;
|
||||
int mBad;
|
||||
int mDuplicate;
|
||||
std::uint32_t good_;
|
||||
std::uint32_t bad_;
|
||||
std::uint32_t duplicate_;
|
||||
|
||||
constexpr SHAMapAddNode(int good, int bad, int duplicate)
|
||||
: good_(good), bad_(bad), duplicate_(duplicate)
|
||||
{
|
||||
}
|
||||
|
||||
public:
|
||||
SHAMapAddNode();
|
||||
constexpr SHAMapAddNode() : SHAMapAddNode(0, 0, 0)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
incInvalid();
|
||||
void
|
||||
incUseful();
|
||||
void
|
||||
incDuplicate();
|
||||
void
|
||||
reset();
|
||||
int
|
||||
|
||||
[[nodiscard]] std::uint32_t
|
||||
getGood() const;
|
||||
bool
|
||||
|
||||
[[nodiscard]] bool
|
||||
isGood() const;
|
||||
bool
|
||||
|
||||
[[nodiscard]] bool
|
||||
isInvalid() const;
|
||||
bool
|
||||
|
||||
[[nodiscard]] bool
|
||||
isUseful() const;
|
||||
std::string
|
||||
|
||||
[[nodiscard]] std::string
|
||||
get() const;
|
||||
|
||||
SHAMapAddNode&
|
||||
operator+=(SHAMapAddNode const& n);
|
||||
|
||||
static SHAMapAddNode
|
||||
[[nodiscard]] static SHAMapAddNode
|
||||
duplicate();
|
||||
static SHAMapAddNode
|
||||
|
||||
[[nodiscard]] static SHAMapAddNode
|
||||
useful();
|
||||
static SHAMapAddNode
|
||||
|
||||
[[nodiscard]] static SHAMapAddNode
|
||||
invalid();
|
||||
|
||||
private:
|
||||
SHAMapAddNode(int good, int bad, int duplicate);
|
||||
};
|
||||
|
||||
inline SHAMapAddNode::SHAMapAddNode() : mGood(0), mBad(0), mDuplicate(0)
|
||||
{
|
||||
}
|
||||
|
||||
inline SHAMapAddNode::SHAMapAddNode(int good, int bad, int duplicate)
|
||||
: mGood(good), mBad(bad), mDuplicate(duplicate)
|
||||
{
|
||||
}
|
||||
|
||||
inline void
|
||||
SHAMapAddNode::incInvalid()
|
||||
{
|
||||
++mBad;
|
||||
++bad_;
|
||||
}
|
||||
|
||||
inline void
|
||||
SHAMapAddNode::incUseful()
|
||||
{
|
||||
++mGood;
|
||||
++good_;
|
||||
}
|
||||
|
||||
inline void
|
||||
SHAMapAddNode::incDuplicate()
|
||||
{
|
||||
++mDuplicate;
|
||||
++duplicate_;
|
||||
}
|
||||
|
||||
inline void
|
||||
SHAMapAddNode::reset()
|
||||
{
|
||||
mGood = mBad = mDuplicate = 0;
|
||||
}
|
||||
|
||||
inline int
|
||||
inline std::uint32_t
|
||||
SHAMapAddNode::getGood() const
|
||||
{
|
||||
return mGood;
|
||||
return good_;
|
||||
}
|
||||
|
||||
inline bool
|
||||
SHAMapAddNode::isInvalid() const
|
||||
{
|
||||
return mBad > 0;
|
||||
return bad_ != 0;
|
||||
}
|
||||
|
||||
inline bool
|
||||
SHAMapAddNode::isUseful() const
|
||||
{
|
||||
return mGood > 0;
|
||||
return good_ != 0;
|
||||
}
|
||||
|
||||
inline SHAMapAddNode&
|
||||
SHAMapAddNode::operator+=(SHAMapAddNode const& n)
|
||||
{
|
||||
mGood += n.mGood;
|
||||
mBad += n.mBad;
|
||||
mDuplicate += n.mDuplicate;
|
||||
good_ += n.good_;
|
||||
bad_ += n.bad_;
|
||||
duplicate_ += n.duplicate_;
|
||||
|
||||
return *this;
|
||||
}
|
||||
@@ -131,53 +127,33 @@ SHAMapAddNode::operator+=(SHAMapAddNode const& n)
|
||||
inline bool
|
||||
SHAMapAddNode::isGood() const
|
||||
{
|
||||
return (mGood + mDuplicate) > mBad;
|
||||
return (good_ + duplicate_) > bad_;
|
||||
}
|
||||
|
||||
inline SHAMapAddNode
|
||||
SHAMapAddNode::duplicate()
|
||||
{
|
||||
return SHAMapAddNode(0, 0, 1);
|
||||
return {0, 0, 1};
|
||||
}
|
||||
|
||||
inline SHAMapAddNode
|
||||
SHAMapAddNode::useful()
|
||||
{
|
||||
return SHAMapAddNode(1, 0, 0);
|
||||
return {1, 0, 0};
|
||||
}
|
||||
|
||||
inline SHAMapAddNode
|
||||
SHAMapAddNode::invalid()
|
||||
{
|
||||
return SHAMapAddNode(0, 1, 0);
|
||||
return {0, 1, 0};
|
||||
}
|
||||
|
||||
inline std::string
|
||||
SHAMapAddNode::get() const
|
||||
{
|
||||
std::string ret;
|
||||
if (mGood > 0)
|
||||
{
|
||||
ret.append("good:");
|
||||
ret.append(std::to_string(mGood));
|
||||
}
|
||||
if (mBad > 0)
|
||||
{
|
||||
if (!ret.empty())
|
||||
ret.append(" ");
|
||||
ret.append("bad:");
|
||||
ret.append(std::to_string(mBad));
|
||||
}
|
||||
if (mDuplicate > 0)
|
||||
{
|
||||
if (!ret.empty())
|
||||
ret.append(" ");
|
||||
ret.append("dupe:");
|
||||
ret.append(std::to_string(mDuplicate));
|
||||
}
|
||||
if (ret.empty())
|
||||
ret = "no nodes processed";
|
||||
return ret;
|
||||
return "{ good: " + std::to_string(good_) +
|
||||
", bad: " + std::to_string(bad_) +
|
||||
", dup: " + std::to_string(duplicate_) + " }";
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -722,9 +722,9 @@ public:
|
||||
TaskStatus
|
||||
taskStatus(std::shared_ptr<T> const& t)
|
||||
{
|
||||
if (t->failed_)
|
||||
if (t->hasFailed())
|
||||
return TaskStatus::Failed;
|
||||
if (t->complete_)
|
||||
if (t->hasCompleted())
|
||||
return TaskStatus::Completed;
|
||||
return TaskStatus::NotDone;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user