Compare commits

..

2 Commits

Author SHA1 Message Date
Pratik Mankawde
75d06cc5aa Merge branch 'develop' into pratik/Charge-higher-fees-for-and-reject-heavy-TMGetObjectByHash-queries 2025-12-04 17:03:42 +00:00
Pratik Mankawde
0eb0495632 first draft 2025-12-04 16:54:33 +00:00
13 changed files with 93 additions and 407 deletions

View File

@@ -141,37 +141,26 @@ Alternatively, you can pull the patched recipes into the repository and use them
locally: locally:
```bash ```bash
# Extract the version number from the lockfile.
function extract_version {
version=$(cat conan.lock | sed -nE "s@.+${1}/(.+)#.+@\1@p" | head -n1)
echo ${version}
}
# Define which recipes to export.
recipes=(ed25519 grpc secp256k1 snappy soci)
# Selectively check out the recipes from our CCI fork.
cd external cd external
mkdir -p conan-center-index mkdir -p conan-center-index
cd conan-center-index cd conan-center-index
git init git init
git remote add origin git@github.com:XRPLF/conan-center-index.git git remote add origin git@github.com:XRPLF/conan-center-index.git
git sparse-checkout init git sparse-checkout init
for recipe in ${recipes[@]}; do git sparse-checkout set recipes/ed25519
echo "Checking out ${recipe}..." git sparse-checkout add recipes/grpc
git sparse-checkout add recipes/${recipe}/all git sparse-checkout add recipes/secp256k1
done git sparse-checkout add recipes/snappy
git sparse-checkout add recipes/soci
git fetch origin master git fetch origin master
git checkout master git checkout master
rm -rf .git
cd ../.. cd ../..
conan export --version 2015.03 external/conan-center-index/recipes/ed25519/all
# Export the recipes into the local cache. conan export --version 1.72.0 external/conan-center-index/recipes/grpc/all
for recipe in ${recipes[@]}; do conan export --version 0.7.0 external/conan-center-index/recipes/secp256k1/all
version=$(extract_version ${recipe}) conan export --version 1.1.10 external/conan-center-index/recipes/snappy/all
echo "Exporting ${recipe}/${version}..." conan export --version 4.0.3 external/conan-center-index/recipes/soci/all
conan export --version $(extract_version ${recipe}) \
external/conan-center-index/recipes/${recipe}/all
done
``` ```
In the case we switch to a newer version of a dependency that still requires a In the case we switch to a newer version of a dependency that still requires a

View File

@@ -1051,11 +1051,10 @@
# The online delete process checks periodically # The online delete process checks periodically
# that rippled is still in sync with the network, # that rippled is still in sync with the network,
# and that the validated ledger is less than # and that the validated ledger is less than
# 'age_threshold_seconds' old, and that all # 'age_threshold_seconds' old. If not, then continue
# recent ledgers are available. If not, then continue
# sleeping for this number of seconds and # sleeping for this number of seconds and
# checking until healthy. # checking until healthy.
# Default is 1. # Default is 5.
# #
# Notes: # Notes:
# The 'node_db' entry configures the primary, persistent storage. # The 'node_db' entry configures the primary, persistent storage.

View File

@@ -67,6 +67,7 @@ XRPL_FEATURE(Clawback, Supported::yes, VoteBehavior::DefaultNo
XRPL_FIX (UniversalNumber, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (UniversalNumber, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes) XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FIX (TMGetObjectByHashLimit, Supported::yes, VoteBehavior::DefaultYes)
// The following amendments are obsolete, but must remain supported // The following amendments are obsolete, but must remain supported
// because they could potentially get enabled. // because they could potentially get enabled.

View File

@@ -2,7 +2,6 @@
#include <test/jtx/Env.h> #include <test/jtx/Env.h>
#include <xrpld/app/ledger/LedgerMaster.h> #include <xrpld/app/ledger/LedgerMaster.h>
#include <xrpld/app/misc/SHAMapStore.h>
namespace ripple { namespace ripple {
namespace test { namespace test {
@@ -101,88 +100,6 @@ class LedgerMaster_test : public beast::unit_test::suite
} }
} }
void
testCompleteLedgerRange(FeatureBitset features)
{
// Note that this test is intentionally very similar to
// SHAMapStore_test::testLedgerGaps, but has a different
// focus.
testcase("Complete Ledger operations");
using namespace test::jtx;
auto const deleteInterval = 8;
Env env{*this, envconfig([](auto cfg) {
return online_delete(std::move(cfg), deleteInterval);
})};
auto const alice = Account("alice");
env.fund(XRP(1000), alice);
env.close();
auto& lm = env.app().getLedgerMaster();
LedgerIndex minSeq = 2;
LedgerIndex maxSeq = env.closed()->info().seq;
auto& store = env.app().getSHAMapStore();
store.rendezvous();
LedgerIndex lastRotated = store.getLastRotated();
BEAST_EXPECTS(maxSeq == 3, to_string(maxSeq));
BEAST_EXPECTS(
lm.getCompleteLedgers() == "2-3", lm.getCompleteLedgers());
BEAST_EXPECTS(lastRotated == 3, to_string(lastRotated));
BEAST_EXPECT(lm.missingFromCompleteLedgerRange(minSeq, maxSeq) == 0);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq + 1, maxSeq - 1) == 0);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq - 1, maxSeq + 1) == 2);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq - 2, maxSeq - 2) == 2);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq + 2, maxSeq + 2) == 2);
// Close enough ledgers to rotate a few times
for (int i = 0; i < 24; ++i)
{
for (int t = 0; t < 3; ++t)
{
env(noop(alice));
}
env.close();
store.rendezvous();
++maxSeq;
if (maxSeq == lastRotated + deleteInterval)
{
minSeq = lastRotated;
lastRotated = maxSeq;
}
BEAST_EXPECTS(
env.closed()->info().seq == maxSeq,
to_string(env.closed()->info().seq));
BEAST_EXPECTS(
store.getLastRotated() == lastRotated,
to_string(store.getLastRotated()));
std::stringstream expectedRange;
expectedRange << minSeq << "-" << maxSeq;
BEAST_EXPECTS(
lm.getCompleteLedgers() == expectedRange.str(),
lm.getCompleteLedgers());
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq, maxSeq) == 0);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq + 1, maxSeq - 1) == 0);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq - 1, maxSeq + 1) == 2);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq - 2, maxSeq - 2) == 2);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq + 2, maxSeq + 2) == 2);
}
}
public: public:
void void
run() override run() override
@@ -196,7 +113,6 @@ public:
testWithFeats(FeatureBitset features) testWithFeats(FeatureBitset features)
{ {
testTxnIdFromIndex(features); testTxnIdFromIndex(features);
testCompleteLedgerRange(features);
} }
}; };

View File

@@ -1,7 +1,6 @@
#include <test/jtx.h> #include <test/jtx.h>
#include <test/jtx/envconfig.h> #include <test/jtx/envconfig.h>
#include <xrpld/app/ledger/LedgerMaster.h>
#include <xrpld/app/main/Application.h> #include <xrpld/app/main/Application.h>
#include <xrpld/app/main/NodeStoreScheduler.h> #include <xrpld/app/main/NodeStoreScheduler.h>
#include <xrpld/app/misc/SHAMapStore.h> #include <xrpld/app/misc/SHAMapStore.h>
@@ -11,8 +10,6 @@
#include <xrpl/nodestore/detail/DatabaseRotatingImp.h> #include <xrpl/nodestore/detail/DatabaseRotatingImp.h>
#include <xrpl/protocol/jss.h> #include <xrpl/protocol/jss.h>
#include <thread>
namespace ripple { namespace ripple {
namespace test { namespace test {
@@ -23,8 +20,10 @@ class SHAMapStore_test : public beast::unit_test::suite
static auto static auto
onlineDelete(std::unique_ptr<Config> cfg) onlineDelete(std::unique_ptr<Config> cfg)
{ {
using namespace jtx; cfg->LEDGER_HISTORY = deleteInterval;
return online_delete(std::move(cfg), deleteInterval); auto& section = cfg->section(ConfigSection::nodeDatabase());
section.set("online_delete", std::to_string(deleteInterval));
return cfg;
} }
static auto static auto
@@ -627,184 +626,6 @@ public:
BEAST_EXPECT(dbr->getName() == "3"); BEAST_EXPECT(dbr->getName() == "3");
} }
void
testLedgerGaps()
{
// Note that this test is intentionally very similar to
// LedgerMaster_test::testCompleteLedgerRange, but has a different
// focus.
testcase("Wait for ledger gaps to fill in");
using namespace test::jtx;
Env env{*this, envconfig(onlineDelete)};
std::map<LedgerIndex, uint256> hashes;
auto failureMessage = [&](char const* label,
auto expected,
auto actual) {
std::stringstream ss;
ss << label << ": Expected: " << expected << ", Got: " << actual;
return ss.str();
};
auto const alice = Account("alice");
env.fund(XRP(1000), alice);
env.close();
auto& lm = env.app().getLedgerMaster();
LedgerIndex minSeq = 2;
LedgerIndex maxSeq = env.closed()->info().seq;
auto& store = env.app().getSHAMapStore();
store.rendezvous();
LedgerIndex lastRotated = store.getLastRotated();
BEAST_EXPECTS(maxSeq == 3, to_string(maxSeq));
BEAST_EXPECTS(
lm.getCompleteLedgers() == "2-3", lm.getCompleteLedgers());
BEAST_EXPECTS(lastRotated == 3, to_string(lastRotated));
BEAST_EXPECT(lm.missingFromCompleteLedgerRange(minSeq, maxSeq) == 0);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq + 1, maxSeq - 1) == 0);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq - 1, maxSeq + 1) == 2);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq - 2, maxSeq - 2) == 2);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq + 2, maxSeq + 2) == 2);
// Close enough ledgers to rotate a few times
while (maxSeq < 20)
{
for (int t = 0; t < 3; ++t)
{
env(noop(alice));
}
env.close();
store.rendezvous();
++maxSeq;
if (maxSeq + 1 == lastRotated + deleteInterval)
{
using namespace std::chrono_literals;
// The next ledger will trigger a rotation. Delete the
// current ledger from LedgerMaster.
std::this_thread::sleep_for(100ms);
LedgerIndex const deleteSeq = maxSeq;
while (!lm.haveLedger(deleteSeq))
{
std::this_thread::sleep_for(100ms);
}
lm.clearLedger(deleteSeq);
auto expectedRange =
[](auto minSeq, auto deleteSeq, auto maxSeq) {
std::stringstream expectedRange;
expectedRange << minSeq << "-" << (deleteSeq - 1);
if (deleteSeq + 1 == maxSeq)
expectedRange << "," << maxSeq;
else if (deleteSeq < maxSeq)
expectedRange << "," << (deleteSeq + 1) << "-"
<< maxSeq;
return expectedRange.str();
};
BEAST_EXPECTS(
lm.getCompleteLedgers() ==
expectedRange(minSeq, deleteSeq, maxSeq),
failureMessage(
"Complete ledgers",
expectedRange(minSeq, deleteSeq, maxSeq),
lm.getCompleteLedgers()));
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq, maxSeq) == 1);
// Close another ledger, which will trigger a rotation, but the
// rotation will be stuck until the missing ledger is filled in.
env.close();
// DO NOT CALL rendezvous()! You'll end up with a deadlock.
++maxSeq;
// Nothing has changed
BEAST_EXPECTS(
store.getLastRotated() == lastRotated,
failureMessage(
"lastRotated", lastRotated, store.getLastRotated()));
BEAST_EXPECTS(
lm.getCompleteLedgers() ==
expectedRange(minSeq, deleteSeq, maxSeq),
failureMessage(
"Complete ledgers",
expectedRange(minSeq, deleteSeq, maxSeq),
lm.getCompleteLedgers()));
// Close 5 more ledgers, waiting one second in between to
// simulate the ledger making progress while online delete waits
// for the missing ledger to be filled in.
// This ensures the healthWait check has time to run and
// detect the gap.
for (int l = 0; l < 5; ++l)
{
env.close();
// DO NOT CALL rendezvous()! You'll end up with a deadlock.
++maxSeq;
// Nothing has changed
BEAST_EXPECTS(
store.getLastRotated() == lastRotated,
failureMessage(
"lastRotated",
lastRotated,
store.getLastRotated()));
BEAST_EXPECTS(
lm.getCompleteLedgers() ==
expectedRange(minSeq, deleteSeq, maxSeq),
failureMessage(
"Complete Ledgers",
expectedRange(minSeq, deleteSeq, maxSeq),
lm.getCompleteLedgers()));
std::this_thread::sleep_for(1s);
}
// Put the missing ledger back in LedgerMaster
lm.setLedgerRangePresent(deleteSeq, deleteSeq);
// Wait for the rotation to finish
store.rendezvous();
minSeq = lastRotated;
lastRotated = deleteSeq + 1;
}
BEAST_EXPECT(maxSeq != lastRotated + deleteInterval);
BEAST_EXPECTS(
env.closed()->info().seq == maxSeq,
failureMessage("maxSeq", maxSeq, env.closed()->info().seq));
BEAST_EXPECTS(
store.getLastRotated() == lastRotated,
failureMessage(
"lastRotated", lastRotated, store.getLastRotated()));
std::stringstream expectedRange;
expectedRange << minSeq << "-" << maxSeq;
BEAST_EXPECTS(
lm.getCompleteLedgers() == expectedRange.str(),
failureMessage(
"CompleteLedgers",
expectedRange.str(),
lm.getCompleteLedgers()));
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq, maxSeq) == 0);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq + 1, maxSeq - 1) == 0);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq - 1, maxSeq + 1) == 2);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq - 2, maxSeq - 2) == 2);
BEAST_EXPECT(
lm.missingFromCompleteLedgerRange(minSeq + 2, maxSeq + 2) == 2);
}
}
void void
run() override run() override
{ {
@@ -812,7 +633,6 @@ public:
testAutomatic(); testAutomatic();
testCanDelete(); testCanDelete();
testRotate(); testRotate();
testLedgerGaps();
} }
}; };

View File

@@ -58,17 +58,6 @@ envconfig(F&& modfunc, Args&&... args)
return modfunc(envconfig(), std::forward<Args>(args)...); return modfunc(envconfig(), std::forward<Args>(args)...);
} }
/// @brief adjust config to enable online_delete
///
/// @param cfg config instance to be modified
///
/// @param deleteInterval how many new ledgers should be available before
/// rotating. Defaults to 8, because the standalone minimum is 8.
///
/// @return unique_ptr to Config instance
std::unique_ptr<Config>
online_delete(std::unique_ptr<Config> cfg, std::uint32_t deleteInterval = 8);
/// @brief adjust config so no admin ports are enabled /// @brief adjust config so no admin ports are enabled
/// ///
/// this is intended for use with envconfig, as in /// this is intended for use with envconfig, as in

View File

@@ -53,15 +53,6 @@ setupConfigForUnitTests(Config& cfg)
namespace jtx { namespace jtx {
std::unique_ptr<Config>
online_delete(std::unique_ptr<Config> cfg, std::uint32_t deleteInterval)
{
cfg->LEDGER_HISTORY = deleteInterval;
auto& section = cfg->section(ConfigSection::nodeDatabase());
section.set("online_delete", std::to_string(deleteInterval));
return cfg;
}
std::unique_ptr<Config> std::unique_ptr<Config>
no_admin(std::unique_ptr<Config> cfg) no_admin(std::unique_ptr<Config> cfg)
{ {

View File

@@ -108,10 +108,7 @@ public:
failedSave(std::uint32_t seq, uint256 const& hash); failedSave(std::uint32_t seq, uint256 const& hash);
std::string std::string
getCompleteLedgers() const; getCompleteLedgers();
std::size_t
missingFromCompleteLedgerRange(LedgerIndex first, LedgerIndex last) const;
/** Apply held transactions to the open ledger /** Apply held transactions to the open ledger
This is normally called as we close the ledger. This is normally called as we close the ledger.
@@ -328,7 +325,7 @@ private:
// A set of transactions to replay during the next close // A set of transactions to replay during the next close
std::unique_ptr<LedgerReplay> replayData; std::unique_ptr<LedgerReplay> replayData;
std::recursive_mutex mutable mCompleteLock; std::recursive_mutex mCompleteLock;
RangeSet<std::uint32_t> mCompleteLedgers; RangeSet<std::uint32_t> mCompleteLedgers;
// Publish thread is running. // Publish thread is running.

View File

@@ -1571,36 +1571,12 @@ LedgerMaster::getPublishedLedger()
} }
std::string std::string
LedgerMaster::getCompleteLedgers() const LedgerMaster::getCompleteLedgers()
{ {
std::lock_guard sl(mCompleteLock); std::lock_guard sl(mCompleteLock);
return to_string(mCompleteLedgers); return to_string(mCompleteLedgers);
} }
std::size_t
LedgerMaster::missingFromCompleteLedgerRange(
LedgerIndex first,
LedgerIndex last) const
{
// Make a copy of the range to avoid holding the lock
auto const range = [&] {
std::lock_guard sl(mCompleteLock);
return mCompleteLedgers;
}();
std::size_t missing = 0;
for (LedgerIndex idx = first; idx <= last; ++idx)
{
if (!boost::icl::contains(range, idx))
{
++missing;
}
}
return missing;
}
std::optional<NetClock::time_point> std::optional<NetClock::time_point>
LedgerMaster::getCloseTimeBySeq(LedgerIndex ledgerIndex) LedgerMaster::getCloseTimeBySeq(LedgerIndex ledgerIndex)
{ {

View File

@@ -289,18 +289,6 @@ SHAMapStoreImp::run()
validatedSeq >= lastRotated + deleteInterval_ && validatedSeq >= lastRotated + deleteInterval_ &&
canDelete_ >= lastRotated - 1 && healthWait() == keepGoing; canDelete_ >= lastRotated - 1 && healthWait() == keepGoing;
JLOG(journal_.debug())
<< "run: Setting lastGoodValidatedLedger_ to " << validatedSeq;
{
// Note that this is set after the healthWait() check, so that we
// don't start the rotation until the validated ledger is fully
// processed. It is not guaranteed to be done at this point. It also
// allows the testLedgerGaps unit test to work.
std::unique_lock<std::mutex> lock(mutex_);
lastGoodValidatedLedger_ = validatedSeq;
}
// will delete up to (not including) lastRotated // will delete up to (not including) lastRotated
if (readyToRotate) if (readyToRotate)
{ {
@@ -309,9 +297,7 @@ SHAMapStoreImp::run()
<< lastRotated << " deleteInterval " << deleteInterval_ << lastRotated << " deleteInterval " << deleteInterval_
<< " canDelete_ " << canDelete_ << " state " << " canDelete_ " << canDelete_ << " state "
<< app_.getOPs().strOperatingMode(false) << " age " << app_.getOPs().strOperatingMode(false) << " age "
<< ledgerMaster_->getValidatedLedgerAge().count() << ledgerMaster_->getValidatedLedgerAge().count() << 's';
<< "s. Complete ledgers: "
<< ledgerMaster_->getCompleteLedgers();
clearPrior(lastRotated); clearPrior(lastRotated);
if (healthWait() == stopping) if (healthWait() == stopping)
@@ -374,10 +360,7 @@ SHAMapStoreImp::run()
clearCaches(validatedSeq); clearCaches(validatedSeq);
}); });
JLOG(journal_.warn()) JLOG(journal_.warn()) << "finished rotation " << validatedSeq;
<< "finished rotation. validatedSeq: " << validatedSeq
<< ", lastRotated: " << lastRotated << ". Complete ledgers: "
<< ledgerMaster_->getCompleteLedgers();
} }
} }
} }
@@ -632,47 +615,22 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated)
SHAMapStoreImp::HealthResult SHAMapStoreImp::HealthResult
SHAMapStoreImp::healthWait() SHAMapStoreImp::healthWait()
{ {
auto index = ledgerMaster_->getValidLedgerIndex();
auto age = ledgerMaster_->getValidatedLedgerAge(); auto age = ledgerMaster_->getValidatedLedgerAge();
OperatingMode mode = netOPs_->getOperatingMode(); OperatingMode mode = netOPs_->getOperatingMode();
std::unique_lock lock(mutex_); std::unique_lock lock(mutex_);
while (!stop_ && (mode != OperatingMode::FULL || age > ageThreshold_))
auto numMissing = ledgerMaster_->missingFromCompleteLedgerRange(
lastGoodValidatedLedger_, index);
while (
!stop_ &&
(mode != OperatingMode::FULL || age > ageThreshold_ || numMissing > 0))
{ {
// this value shouldn't change, so grab it while we have the
// lock
auto const lastGood = lastGoodValidatedLedger_;
lock.unlock(); lock.unlock();
JLOG(journal_.warn()) << "Waiting " << recoveryWaitTime_.count()
auto const stream = mode != OperatingMode::FULL || age > ageThreshold_ << "s for node to stabilize. state: "
? journal_.warn() << app_.getOPs().strOperatingMode(mode, false)
: journal_.info(); << ". age " << age.count() << 's';
JLOG(stream) << "Waiting " << recoveryWaitTime_.count()
<< "s for node to stabilize. state: "
<< app_.getOPs().strOperatingMode(mode, false) << ". age "
<< age.count() << "s. Missing ledgers: " << numMissing
<< ". Expect: " << lastGood << "-" << index
<< ". Complete ledgers: "
<< ledgerMaster_->getCompleteLedgers();
std::this_thread::sleep_for(recoveryWaitTime_); std::this_thread::sleep_for(recoveryWaitTime_);
index = ledgerMaster_->getValidLedgerIndex();
age = ledgerMaster_->getValidatedLedgerAge(); age = ledgerMaster_->getValidatedLedgerAge();
mode = netOPs_->getOperatingMode(); mode = netOPs_->getOperatingMode();
numMissing =
ledgerMaster_->missingFromCompleteLedgerRange(lastGood, index);
lock.lock(); lock.lock();
} }
JLOG(journal_.debug()) << "healthWait: Setting lastGoodValidatedLedger_ to "
<< index;
lastGoodValidatedLedger_ = index;
return stop_ ? stopping : keepGoing; return stop_ ? stopping : keepGoing;
} }

View File

@@ -71,11 +71,6 @@ private:
std::thread thread_; std::thread thread_;
bool stop_ = false; bool stop_ = false;
bool healthy_ = true; bool healthy_ = true;
// Used to prevent ledger gaps from forming during online deletion. Keeps
// track of the last validated ledger that was processed without gaps. There
// are no guarantees about gaps while online delete is not running. For
// that, use advisory_delete and check for gaps externally.
LedgerIndex lastGoodValidatedLedger_ = 0;
mutable std::condition_variable cond_; mutable std::condition_variable cond_;
mutable std::condition_variable rendezvous_; mutable std::condition_variable rendezvous_;
mutable std::mutex mutex_; mutable std::mutex mutex_;
@@ -89,11 +84,11 @@ private:
std::uint32_t deleteBatch_ = 100; std::uint32_t deleteBatch_ = 100;
std::chrono::milliseconds backOff_{100}; std::chrono::milliseconds backOff_{100};
std::chrono::seconds ageThreshold_{60}; std::chrono::seconds ageThreshold_{60};
/// If the node is out of sync, or any recent ledgers are not /// If the node is out of sync during an online_delete healthWait()
/// available during an online_delete healthWait() call, sleep /// call, sleep the thread for this time, and continue checking until
/// the thread for this time, and continue checking until recovery. /// recovery.
/// See also: "recovery_wait_seconds" in rippled-example.cfg /// See also: "recovery_wait_seconds" in rippled-example.cfg
std::chrono::seconds recoveryWaitTime_{1}; std::chrono::seconds recoveryWaitTime_{5};
// these do not exist upon SHAMapStore creation, but do exist // these do not exist upon SHAMapStore creation, but do exist
// as of run() or before // as of run() or before
@@ -217,8 +212,6 @@ private:
enum HealthResult { stopping, keepGoing }; enum HealthResult { stopping, keepGoing };
[[nodiscard]] HealthResult [[nodiscard]] HealthResult
healthWait(); healthWait();
bool
hasCompleteRange(LedgerIndex first, LedgerIndex last);
public: public:
void void

View File

@@ -18,6 +18,7 @@
#include <xrpl/basics/base64.h> #include <xrpl/basics/base64.h>
#include <xrpl/basics/random.h> #include <xrpl/basics/random.h>
#include <xrpl/basics/safe_cast.h> #include <xrpl/basics/safe_cast.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/TxFlags.h> #include <xrpl/protocol/TxFlags.h>
#include <xrpl/protocol/digest.h> #include <xrpl/protocol/digest.h>
@@ -2589,9 +2590,51 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
reply.set_ledgerhash(packet.ledgerhash()); reply.set_ledgerhash(packet.ledgerhash());
} }
fee_.update( // Get validated rules to check if the fix is enabled
Resource::feeModerateBurdenPeer, auto const rules = app_.getLedgerMaster().getValidatedRules();
" received a get object by hash request");
// Charge resource fee based on request size when fix is enabled
if (rules.enabled(fixTMGetObjectByHashLimit))
{
// Enforce per-request object cap
if (packet.objects_size() > Tuning::maxGetObjectByHash)
{
fee_.update(Resource::feeMalformedRequest, "too many objects");
return;
}
// Charge heavier fee for large requests (>256 objects)
if (packet.objects_size() > 256)
{
fee_.update(
Resource::feeHeavyBurdenPeer,
"large get object by hash request");
}
else if (packet.objects_size() > 64)
{
fee_.update(
Resource::feeModerateBurdenPeer,
"moderate get object by hash request");
}
else
{
fee_.update(
Resource::feeTrivialPeer,
"small get object by hash request");
}
}
else
{
// Legacy behavior: charge moderate fee for all requests
fee_.update(
Resource::feeModerateBurdenPeer,
"received a get object by hash request");
}
// Track reply bytes and stop when over budget (16 MiB) when fix is enabled
std::size_t replyBudgetBytes =
rules.enabled(fixTMGetObjectByHashLimit) ? megabytes(16) : 0;
std::size_t replyBytes = 0;
// This is a very minimal implementation // This is a very minimal implementation
for (int i = 0; i < packet.objects_size(); ++i) for (int i = 0; i < packet.objects_size(); ++i)
@@ -2606,17 +2649,28 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)}; auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
if (nodeObject) if (nodeObject)
{ {
auto const dataSz = nodeObject->getData().size();
// Check if adding this object would exceed the reply budget
// (only when fix is enabled)
if (replyBudgetBytes > 0 &&
replyBytes + dataSz + 64 > replyBudgetBytes)
break;
protocol::TMIndexedObject& newObj = *reply.add_objects(); protocol::TMIndexedObject& newObj = *reply.add_objects();
newObj.set_hash(hash.begin(), hash.size()); newObj.set_hash(hash.begin(), hash.size());
newObj.set_data( newObj.set_data(
&nodeObject->getData().front(), &nodeObject->getData().front(),
nodeObject->getData().size()); dataSz);
if (obj.has_nodeid()) if (obj.has_nodeid())
newObj.set_index(obj.nodeid()); newObj.set_index(obj.nodeid());
if (obj.has_ledgerseq()) if (obj.has_ledgerseq())
newObj.set_ledgerseq(obj.ledgerseq()); newObj.set_ledgerseq(obj.ledgerseq());
// Track reply bytes when fix is enabled
if (replyBudgetBytes > 0)
replyBytes += dataSz + 64; // include modest overhead estimate
// VFALCO NOTE "seq" in the message is obsolete // VFALCO NOTE "seq" in the message is obsolete
} }
} }

View File

@@ -22,6 +22,9 @@ enum {
/** The hard cap on the number of ledger entries in a single reply. */ /** The hard cap on the number of ledger entries in a single reply. */
hardMaxReplyNodes = 12288, hardMaxReplyNodes = 12288,
/** Hard cap on TMGetObjectByHash objects per request (non-TRANSACTIONS). */
maxGetObjectByHash = 1024,
/** How many timer intervals a sendq has to stay large before we disconnect /** How many timer intervals a sendq has to stay large before we disconnect
*/ */
sendqIntervals = 4, sendqIntervals = 4,