Compare commits

..

4 Commits

Author SHA1 Message Date
Vito
c5d7ebe93d restores missing linebreak 2026-02-05 10:24:14 +01:00
Ed Hennis
d0b5ca9dab Merge branch 'develop' into tapanito/lending-fix-amendment 2026-02-04 18:21:55 -04:00
Vito
5e51893e9b fixes a typo 2026-02-04 11:31:58 +01:00
Vito
3422c11d02 adds lending v1.1 fix amendment 2026-02-04 11:30:41 +01:00
12 changed files with 227 additions and 51 deletions

View File

@@ -940,7 +940,23 @@
#
# path Location to store the database
#
# Optional keys for NuDB and RocksDB:
# Optional keys
#
# cache_size Size of cache for database records. Default is 16384.
# Setting this value to 0 will use the default value.
#
# cache_age Length of time in minutes to keep database records
# cached. Default is 5 minutes. Setting this value to
# 0 will use the default value.
#
# Note: if neither cache_size nor cache_age is
# specified, the cache for database records will not
# be created. If only one of cache_size or cache_age
# is specified, the cache will be created using the
# default value for the unspecified parameter.
#
# Note: the cache will not be created if online_delete
# is specified.
#
# fast_load Boolean. If set, load the last persisted ledger
# from disk upon process start before syncing to
@@ -948,6 +964,8 @@
# if sufficient IOPS capacity is available.
# Default 0.
#
# Optional keys for NuDB or RocksDB:
#
# earliest_seq The default is 32570 to match the XRP ledger
# network's earliest allowed sequence. Alternate
# networks may set this value. Minimum value of 1.

View File

@@ -133,6 +133,10 @@ public:
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback);
/** Remove expired entries from the positive and negative caches. */
virtual void
sweep() = 0;
/** Gather statistics pertaining to read and write activities.
*
* @param obj Json object reference into which to place counters.

View File

@@ -23,6 +23,32 @@ public:
beast::Journal j)
: Database(scheduler, readThreads, config, j), backend_(std::move(backend))
{
std::optional<int> cacheSize, cacheAge;
if (config.exists("cache_size"))
{
cacheSize = get<int>(config, "cache_size");
if (cacheSize.value() < 0)
{
Throw<std::runtime_error>("Specified negative value for cache_size");
}
}
if (config.exists("cache_age"))
{
cacheAge = get<int>(config, "cache_age");
if (cacheAge.value() < 0)
{
Throw<std::runtime_error>("Specified negative value for cache_age");
}
}
if (cacheSize != 0 || cacheAge != 0)
{
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
"DatabaseNodeImp", cacheSize.value_or(0), std::chrono::minutes(cacheAge.value_or(0)), stopwatch(), j);
}
XRPL_ASSERT(
backend_,
"xrpl::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
@@ -77,7 +103,13 @@ public:
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback) override;
void
sweep() override;
private:
// Cache for database objects. This cache is not always initialized. Check
// for null before using.
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
// Persistent key/value storage
std::shared_ptr<Backend> backend_;

View File

@@ -55,6 +55,9 @@ public:
void
sync() override;
void
sweep() override;
private:
std::shared_ptr<Backend> writableBackend_;
std::shared_ptr<Backend> archiveBackend_;

View File

@@ -16,6 +16,7 @@
// Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order.
XRPL_FIX (LendingProtocolV1_1, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (BatchInnerSigs, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(LendingProtocol, Supported::yes, VoteBehavior::DefaultNo)

View File

@@ -10,6 +10,11 @@ DatabaseNodeImp::store(NodeObjectType type, Blob&& data, uint256 const& hash, st
auto obj = NodeObject::createObject(type, std::move(data), hash);
backend_->store(obj);
if (cache_)
{
// After the store, replace a negative cache entry if there is one
cache_->canonicalize(hash, obj, [](std::shared_ptr<NodeObject> const& n) { return n->getType() == hotDUMMY; });
}
}
void
@@ -18,36 +23,77 @@ DatabaseNodeImp::asyncFetch(
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
{
if (cache_)
{
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
if (obj)
{
callback(obj->getType() == hotDUMMY ? nullptr : obj);
return;
}
}
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
}
void
DatabaseNodeImp::sweep()
{
if (cache_)
cache_->sweep();
}
std::shared_ptr<NodeObject>
DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport& fetchReport, bool duplicate)
{
std::shared_ptr<NodeObject> nodeObject = nullptr;
Status status;
std::shared_ptr<NodeObject> nodeObject = cache_ ? cache_->fetch(hash) : nullptr;
try
if (!nodeObject)
{
status = backend_->fetch(hash.data(), &nodeObject);
}
catch (std::exception const& e)
{
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": Exception fetching from backend: " << e.what();
Rethrow();
}
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not " << (cache_ ? "cached" : "found");
switch (status)
Status status;
try
{
status = backend_->fetch(hash.data(), &nodeObject);
}
catch (std::exception const& e)
{
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": Exception fetching from backend: " << e.what();
Rethrow();
}
switch (status)
{
case ok:
if (cache_)
{
if (nodeObject)
cache_->canonicalize_replace_client(hash, nodeObject);
else
{
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nodeObject = notFound;
}
}
break;
case notFound:
break;
case dataCorrupt:
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": nodestore data is corrupted";
break;
default:
JLOG(j_.warn()) << "fetchNodeObject " << hash << ": backend returns unknown result " << status;
break;
}
}
else
{
case ok:
case notFound:
break;
case dataCorrupt:
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": nodestore data is corrupted";
break;
default:
JLOG(j_.warn()) << "fetchNodeObject " << hash << ": backend returns unknown result " << status;
break;
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record found in cache";
if (nodeObject->getType() == hotDUMMY)
nodeObject.reset();
}
if (nodeObject)
@@ -59,36 +105,66 @@ DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport
std::vector<std::shared_ptr<NodeObject>>
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
{
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
using namespace std::chrono;
auto const before = steady_clock::now();
std::vector<uint256 const*> batch{};
batch.reserve(hashes.size());
std::unordered_map<uint256 const*, size_t> indexMap;
std::vector<uint256 const*> cacheMisses;
uint64_t hits = 0;
uint64_t fetches = 0;
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
batch.push_back(&hash);
}
// Get the node objects that match the hashes from the backend. To protect
// against the backends returning fewer or more results than expected, the
// container is resized to the number of hashes.
auto results = backend_->fetchBatch(batch).first;
XRPL_ASSERT(
results.size() == hashes.size() || results.empty(),
"number of output objects either matches number of input hashes or is empty");
results.resize(hashes.size());
for (size_t i = 0; i < results.size(); ++i)
{
if (!results[i])
// See if the object already exists in the cache
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
++fetches;
if (!nObj)
{
JLOG(j_.error()) << "fetchBatch - "
<< "record not found in db. hash = " << strHex(hashes[i]);
// Try the database
indexMap[&hash] = i;
cacheMisses.push_back(&hash);
}
else
{
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
// It was in the cache.
++hits;
}
}
JLOG(j_.debug()) << "fetchBatch - cache hits = " << (hashes.size() - cacheMisses.size())
<< " - cache misses = " << cacheMisses.size();
auto dbResults = backend_->fetchBatch(cacheMisses).first;
for (size_t i = 0; i < dbResults.size(); ++i)
{
auto nObj = std::move(dbResults[i]);
size_t index = indexMap[cacheMisses[i]];
auto const& hash = hashes[index];
if (nObj)
{
// Ensure all threads get the same object
if (cache_)
cache_->canonicalize_replace_client(hash, nObj);
}
else
{
JLOG(j_.error()) << "fetchBatch - "
<< "record not found in db or cache. hash = " << strHex(hash);
if (cache_)
{
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nObj = std::move(notFound);
}
}
results[index] = std::move(nObj);
}
auto fetchDurationUs = std::chrono::duration_cast<std::chrono::microseconds>(steady_clock::now() - before).count();
updateFetchMetrics(hashes.size(), 0, fetchDurationUs);
updateFetchMetrics(fetches, hits, fetchDurationUs);
return results;
}

View File

@@ -93,6 +93,12 @@ DatabaseRotatingImp::store(NodeObjectType type, Blob&& data, uint256 const& hash
storeStats(1, nObj->getData().size());
}
void
DatabaseRotatingImp::sweep()
{
// nothing to do
}
std::shared_ptr<NodeObject>
DatabaseRotatingImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport& fetchReport, bool duplicate)
{

View File

@@ -1701,10 +1701,21 @@ class LoanBroker_test : public beast::unit_test::suite
testRIPD4274MPT();
}
void
testFixAmendmentEnabled()
{
using namespace jtx;
testcase("testFixAmendmentEnabled");
Env env{*this};
BEAST_EXPECT(env.enabled(fixLendingProtocolV1_1));
}
public:
void
run() override
{
testFixAmendmentEnabled();
testLoanBrokerSetDebtMaximum();
testLoanBrokerCoverDepositNullVault();

View File

@@ -490,8 +490,19 @@ public:
Env env(*this, envconfig(onlineDelete));
/////////////////////////////////////////////////////////////
// Create NodeStore with two backends to allow online deletion of data.
// Normally, SHAMapStoreImp handles all these details.
// Create the backend. Normally, SHAMapStoreImp handles all these
// details
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set(
"cache_size", std::to_string(env.app().config().getValueFor(SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set(
"cache_age", std::to_string(env.app().config().getValueFor(SizedItem::treeCacheAge, std::nullopt)));
NodeStoreScheduler scheduler(env.app().getJobQueue());
std::string const writableDb = "write";
@@ -499,8 +510,9 @@ public:
auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
// Create NodeStore with two backends to allow online deletion of
// data
constexpr int readThreads = 4;
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
scheduler,
readThreads,

View File

@@ -908,6 +908,10 @@ public:
JLOG(m_journal.debug()) << "MasterTransaction sweep. Size before: " << oldMasterTxSize
<< "; size after: " << masterTxCache.size();
}
{
// Does not appear to have an associated cache.
getNodeStore().sweep();
}
{
std::size_t const oldLedgerMasterCacheSize = getLedgerMaster().getFetchPackCacheSize();

View File

@@ -130,6 +130,14 @@ std::unique_ptr<NodeStore::Database>
SHAMapStoreImp::makeNodeStore(int readThreads)
{
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set("cache_size", std::to_string(app_.config().getValueFor(SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set("cache_age", std::to_string(app_.config().getValueFor(SizedItem::treeCacheAge, std::nullopt)));
std::unique_ptr<NodeStore::Database> db;
if (deleteInterval_)
@@ -218,6 +226,8 @@ SHAMapStoreImp::run()
LedgerIndex lastRotated = state_db_.getState().lastRotated;
netOPs_ = &app_.getOPs();
ledgerMaster_ = &app_.getLedgerMaster();
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
if (advisoryDelete_)
canDelete_ = state_db_.getCanDelete();
@@ -480,19 +490,16 @@ void
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
{
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
// Also clear the FullBelowCache so its generation counter is bumped.
// This prevents stale "full below" markers from persisting across
// backend rotation/online deletion and interfering with SHAMap sync.
app_.getNodeFamily().getFullBelowCache()->clear();
fullBelowCache_->clear();
}
void
SHAMapStoreImp::freshenCaches()
{
if (freshenCache(*app_.getNodeFamily().getTreeNodeCache()))
if (freshenCache(*treeNodeCache_))
return;
if (freshenCache(app_.getMasterTransaction().getCache()))
return;
freshenCache(app_.getMasterTransaction().getCache());
}
void

View File

@@ -93,6 +93,8 @@ private:
// as of run() or before
NetworkOPs* netOPs_ = nullptr;
LedgerMaster* ledgerMaster_ = nullptr;
FullBelowCache* fullBelowCache_ = nullptr;
TreeNodeCache* treeNodeCache_ = nullptr;
static constexpr auto nodeStoreName_ = "NodeStore";