Compare commits

..

21 Commits

Author SHA1 Message Date
Bart
803cd100f3 refactor: Use uint256 directly as key instead of void pointer 2026-02-02 11:39:53 -05:00
Bart
9639b79155 Copilot feedback 2026-02-02 10:59:45 -05:00
Bart
eabd485927 Merge branch 'develop' into bthomee/disable-cache 2026-01-29 09:28:48 +00:00
Bart
f3ea3e9646 Merge branch 'develop' into bthomee/disable-cache 2025-11-22 11:06:58 -05:00
Bart Thomee
4306d9ccc3 Restore freshening caches of tree node cache 2025-09-17 12:17:35 -04:00
Bart Thomee
1a4d9732ca Merge branch 'develop' into bthomee/disable-cache 2025-09-17 11:43:06 -04:00
Bart
aad6edb6b1 Merge branch 'develop' into bthomee/disable-cache 2025-08-13 08:03:40 -04:00
Bart
a4a1c4eecf Merge branch 'develop' into bthomee/disable-cache 2025-07-03 15:43:50 -04:00
Bart
fca6a8768f Merge branch 'develop' into bthomee/disable-cache 2025-06-02 12:02:43 -04:00
Bart
d96c4164b9 Merge branch 'develop' into bthomee/disable-cache 2025-05-22 09:18:07 -04:00
Bart Thomee
965fc75e8a Reserve vector size 2025-05-20 10:07:12 -04:00
Bart Thomee
2fa1c711d3 Removed unused config values 2025-05-20 09:50:13 -04:00
Bart Thomee
4650e7d2c6 Removed unused caches from SHAMapStoreImp 2025-05-20 09:49:55 -04:00
Bart Thomee
a213127852 Remove cache from SHAMapStoreImp 2025-05-19 16:59:43 -04:00
Bart Thomee
6e7537dada Remove cache from DatabaseNodeImp 2025-05-19 16:51:32 -04:00
Bart Thomee
0777f7c64b Merge branch 'develop' into bthomee/disable-cache 2025-05-19 16:37:11 -04:00
Bart Thomee
39bfcaf95c Merge branch 'develop' into bthomee/disable-cache 2025-05-17 18:26:07 -04:00
Bart Thomee
61c9a19868 Merge branch 'develop' into bthomee/disable-cache 2025-05-07 11:02:43 -04:00
Bart Thomee
d01851bc5a Only disable the database cache 2025-04-01 13:24:18 -04:00
Bart Thomee
d1703842e7 Fully disable cache 2025-04-01 11:41:20 -04:00
Bart Thomee
8d31b1739d TEST: Disable tagged cache to measure performance 2025-03-28 13:21:19 -04:00
17 changed files with 64 additions and 281 deletions

View File

@@ -940,23 +940,7 @@
#
# path Location to store the database
#
# Optional keys
#
# cache_size Size of cache for database records. Default is 16384.
# Setting this value to 0 will use the default value.
#
# cache_age Length of time in minutes to keep database records
# cached. Default is 5 minutes. Setting this value to
# 0 will use the default value.
#
# Note: if neither cache_size nor cache_age is
# specified, the cache for database records will not
# be created. If only one of cache_size or cache_age
# is specified, the cache will be created using the
# default value for the unspecified parameter.
#
# Note: the cache will not be created if online_delete
# is specified.
# Optional keys for NuDB and RocksDB:
#
# fast_load Boolean. If set, load the last persisted ledger
# from disk upon process start before syncing to
@@ -964,8 +948,6 @@
# if sufficient IOPS capacity is available.
# Default 0.
#
# Optional keys for NuDB or RocksDB:
#
# earliest_seq The default is 32570 to match the XRP ledger
# network's earliest allowed sequence. Alternate
# networks may set this value. Minimum value of 1.

View File

@@ -77,16 +77,16 @@ public:
If the object is not found or an error is encountered, the
result will indicate the condition.
@note This will be called concurrently.
@param key A pointer to the key data.
@param hash The hash of the object.
@param pObject [out] The created object if successful.
@return The result of the operation.
*/
virtual Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) = 0;
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) = 0;
/** Fetch a batch synchronously. */
virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) = 0;
fetchBatch(std::vector<uint256> const& hashes) = 0;
/** Store a single object.
Depending on the implementation this may happen immediately

View File

@@ -24,32 +24,6 @@ public:
beast::Journal j)
: Database(scheduler, readThreads, config, j), backend_(std::move(backend))
{
std::optional<int> cacheSize, cacheAge;
if (config.exists("cache_size"))
{
cacheSize = get<int>(config, "cache_size");
if (cacheSize.value() < 0)
{
Throw<std::runtime_error>("Specified negative value for cache_size");
}
}
if (config.exists("cache_age"))
{
cacheAge = get<int>(config, "cache_age");
if (cacheAge.value() < 0)
{
Throw<std::runtime_error>("Specified negative value for cache_age");
}
}
if (cacheSize != 0 || cacheAge != 0)
{
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
"DatabaseNodeImp", cacheSize.value_or(0), std::chrono::minutes(cacheAge.value_or(0)), stopwatch(), j);
}
XRPL_ASSERT(
backend_,
"xrpl::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
@@ -108,9 +82,6 @@ public:
sweep() override;
private:
// Cache for database objects. This cache is not always initialized. Check
// for null before using.
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
// Persistent key/value storage
std::shared_ptr<Backend> backend_;

View File

@@ -244,7 +244,6 @@ ApplyView::emptyDirDelete(Keylet const& directory)
bool
ApplyView::dirRemove(Keylet const& directory, std::uint64_t page, uint256 const& key, bool keepRoot)
{
keepRoot = false;
auto node = peek(keylet::page(directory, page));
if (!node)

View File

@@ -10,11 +10,6 @@ DatabaseNodeImp::store(NodeObjectType type, Blob&& data, uint256 const& hash, st
auto obj = NodeObject::createObject(type, std::move(data), hash);
backend_->store(obj);
if (cache_)
{
// After the store, replace a negative cache entry if there is one
cache_->canonicalize(hash, obj, [](std::shared_ptr<NodeObject> const& n) { return n->getType() == hotDUMMY; });
}
}
void
@@ -23,77 +18,41 @@ DatabaseNodeImp::asyncFetch(
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
{
if (cache_)
{
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
if (obj)
{
callback(obj->getType() == hotDUMMY ? nullptr : obj);
return;
}
}
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
}
void
DatabaseNodeImp::sweep()
{
if (cache_)
cache_->sweep();
}
std::shared_ptr<NodeObject>
DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport& fetchReport, bool duplicate)
{
std::shared_ptr<NodeObject> nodeObject = cache_ ? cache_->fetch(hash) : nullptr;
std::shared_ptr<NodeObject> nodeObject = nullptr;
Status status;
if (!nodeObject)
try
{
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not " << (cache_ ? "cached" : "found");
Status status;
try
{
status = backend_->fetch(hash.data(), &nodeObject);
}
catch (std::exception const& e)
{
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": Exception fetching from backend: " << e.what();
Rethrow();
}
switch (status)
{
case ok:
if (cache_)
{
if (nodeObject)
cache_->canonicalize_replace_client(hash, nodeObject);
else
{
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nodeObject = notFound;
}
}
break;
case notFound:
break;
case dataCorrupt:
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": nodestore data is corrupted";
break;
default:
JLOG(j_.warn()) << "fetchNodeObject " << hash << ": backend returns unknown result " << status;
break;
}
status = backend_->fetch(hash, &nodeObject);
}
else
catch (std::exception const& e)
{
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record found in cache";
if (nodeObject->getType() == hotDUMMY)
nodeObject.reset();
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": Exception fetching from backend: " << e.what();
Rethrow();
}
switch (status)
{
case ok:
case notFound:
break;
case dataCorrupt:
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": nodestore data is corrupted";
break;
default:
JLOG(j_.warn()) << "fetchNodeObject " << hash << ": backend returns unknown result " << status;
break;
}
if (nodeObject)
@@ -105,66 +64,22 @@ DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport
std::vector<std::shared_ptr<NodeObject>>
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
{
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
using namespace std::chrono;
auto const before = steady_clock::now();
std::unordered_map<uint256 const*, size_t> indexMap;
std::vector<uint256 const*> cacheMisses;
uint64_t hits = 0;
uint64_t fetches = 0;
for (size_t i = 0; i < hashes.size(); ++i)
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
results = backend_->fetchBatch(hashes).first;
for (size_t i = 0; i < results.size(); ++i)
{
auto const& hash = hashes[i];
// See if the object already exists in the cache
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
++fetches;
if (!nObj)
{
// Try the database
indexMap[&hash] = i;
cacheMisses.push_back(&hash);
}
else
{
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
// It was in the cache.
++hits;
}
}
JLOG(j_.debug()) << "fetchBatch - cache hits = " << (hashes.size() - cacheMisses.size())
<< " - cache misses = " << cacheMisses.size();
auto dbResults = backend_->fetchBatch(cacheMisses).first;
for (size_t i = 0; i < dbResults.size(); ++i)
{
auto nObj = std::move(dbResults[i]);
size_t index = indexMap[cacheMisses[i]];
auto const& hash = hashes[index];
if (nObj)
{
// Ensure all threads get the same object
if (cache_)
cache_->canonicalize_replace_client(hash, nObj);
}
else
if (!results[i])
{
JLOG(j_.error()) << "fetchBatch - "
<< "record not found in db or cache. hash = " << strHex(hash);
if (cache_)
{
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nObj = std::move(notFound);
}
<< "record not found in db. hash = " << strHex(hashes[i]);
}
results[index] = std::move(nObj);
}
auto fetchDurationUs = std::chrono::duration_cast<std::chrono::microseconds>(steady_clock::now() - before).count();
updateFetchMetrics(fetches, hits, fetchDurationUs);
updateFetchMetrics(hashes.size(), 0, fetchDurationUs);
return results;
}

View File

@@ -107,7 +107,7 @@ DatabaseRotatingImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchRe
std::shared_ptr<NodeObject> nodeObject;
try
{
status = backend->fetch(hash.data(), &nodeObject);
status = backend->fetch(hash, &nodeObject);
}
catch (std::exception const& e)
{

View File

@@ -115,10 +115,9 @@ public:
//--------------------------------------------------------------------------
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
{
XRPL_ASSERT(db_, "xrpl::NodeStore::MemoryBackend::fetch : non-null database");
uint256 const hash(uint256::fromVoid(key));
std::lock_guard _(db_->mutex);
@@ -133,14 +132,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -177,17 +177,17 @@ public:
}
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pno) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pno) override
{
Status status;
pno->reset();
nudb::error_code ec;
db_.fetch(
key,
[key, pno, &status](void const* data, std::size_t size) {
hash.data(),
[hash, pno, &status](void const* data, std::size_t size) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(data, size, bf);
DecodedBlob decoded(key, result.first, result.second);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk())
{
status = dataCorrupt;
@@ -205,14 +205,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -36,13 +36,13 @@ public:
}
Status
fetch(void const*, std::shared_ptr<NodeObject>*) override
fetch(uint256 const&, std::shared_ptr<NodeObject>*) override
{
return notFound;
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
return {};
}

View File

@@ -237,7 +237,7 @@ public:
//--------------------------------------------------------------------------
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
{
XRPL_ASSERT(m_db, "xrpl::NodeStore::RocksDBBackend::fetch : non-null database");
pObject->reset();
@@ -245,7 +245,7 @@ public:
Status status(ok);
rocksdb::ReadOptions const options;
rocksdb::Slice const slice(static_cast<char const*>(key), m_keyBytes);
rocksdb::Slice const slice(reinterpret_cast<char const*>(hash.data()), m_keyBytes);
std::string string;
@@ -253,7 +253,7 @@ public:
if (getStatus.ok())
{
DecodedBlob decoded(key, string.data(), string.size());
DecodedBlob decoded(hash.data(), string.data(), string.size());
if (decoded.wasOk())
{
@@ -288,14 +288,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -490,19 +490,8 @@ public:
Env env(*this, envconfig(onlineDelete));
/////////////////////////////////////////////////////////////
// Create the backend. Normally, SHAMapStoreImp handles all these
// details
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set(
"cache_size", std::to_string(env.app().config().getValueFor(SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set(
"cache_age", std::to_string(env.app().config().getValueFor(SizedItem::treeCacheAge, std::nullopt)));
// Create NodeStore with two backends to allow online deletion of data.
// Normally, SHAMapStoreImp handles all these details.
NodeStoreScheduler scheduler(env.app().getJobQueue());
std::string const writableDb = "write";
@@ -510,9 +499,8 @@ public:
auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
// Create NodeStore with two backends to allow online deletion of
// data
constexpr int readThreads = 4;
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
scheduler,
readThreads,

View File

@@ -138,7 +138,7 @@ public:
{
std::shared_ptr<NodeObject> object;
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
Status const status = backend.fetch(batch[i]->getHash(), &object);
BEAST_EXPECT(status == ok);
@@ -158,7 +158,7 @@ public:
{
std::shared_ptr<NodeObject> object;
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
Status const status = backend.fetch(batch[i]->getHash(), &object);
BEAST_EXPECT(status == notFound);
}

View File

@@ -313,7 +313,7 @@ public:
std::shared_ptr<NodeObject> obj;
std::shared_ptr<NodeObject> result;
obj = seq1_.obj(dist_(gen_));
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result && isSame(result, obj));
}
catch (std::exception const& e)
@@ -371,9 +371,9 @@ public:
{
try
{
auto const key = seq2_.key(i);
auto const hash = seq2_.key(i);
std::shared_ptr<NodeObject> result;
backend_.fetch(key.data(), &result);
backend_.fetch(hash, &result);
suite_.expect(!result);
}
catch (std::exception const& e)
@@ -438,9 +438,9 @@ public:
{
if (rand_(gen_) < missingNodePercent)
{
auto const key = seq2_.key(dist_(gen_));
auto const hash = seq2_.key(dist_(gen_));
std::shared_ptr<NodeObject> result;
backend_.fetch(key.data(), &result);
backend_.fetch(hash, &result);
suite_.expect(!result);
}
else
@@ -448,7 +448,7 @@ public:
std::shared_ptr<NodeObject> obj;
std::shared_ptr<NodeObject> result;
obj = seq1_.obj(dist_(gen_));
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result && isSame(result, obj));
}
}
@@ -525,7 +525,7 @@ public:
auto const j = older_(gen_);
obj = seq1_.obj(j);
std::shared_ptr<NodeObject> result1;
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result != nullptr);
suite_.expect(isSame(result, obj));
}
@@ -543,7 +543,7 @@ public:
std::shared_ptr<NodeObject> result;
auto const j = recent_(gen_);
obj = seq1_.obj(j);
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(!result || isSame(result, obj));
break;
}

View File

@@ -130,14 +130,6 @@ std::unique_ptr<NodeStore::Database>
SHAMapStoreImp::makeNodeStore(int readThreads)
{
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set("cache_size", std::to_string(app_.config().getValueFor(SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set("cache_age", std::to_string(app_.config().getValueFor(SizedItem::treeCacheAge, std::nullopt)));
std::unique_ptr<NodeStore::Database> db;
if (deleteInterval_)
@@ -226,8 +218,6 @@ SHAMapStoreImp::run()
LedgerIndex lastRotated = state_db_.getState().lastRotated;
netOPs_ = &app_.getOPs();
ledgerMaster_ = &app_.getLedgerMaster();
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
if (advisoryDelete_)
canDelete_ = state_db_.getCanDelete();
@@ -490,16 +480,12 @@ void
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
{
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
fullBelowCache_->clear();
}
void
SHAMapStoreImp::freshenCaches()
{
if (freshenCache(*treeNodeCache_))
return;
if (freshenCache(app_.getMasterTransaction().getCache()))
return;
freshenCache(*app_.getNodeFamily().getTreeNodeCache()) && freshenCache(app_.getMasterTransaction().getCache());
}
void

View File

@@ -94,8 +94,6 @@ private:
// as of run() or before
NetworkOPs* netOPs_ = nullptr;
LedgerMaster* ledgerMaster_ = nullptr;
FullBelowCache* fullBelowCache_ = nullptr;
TreeNodeCache* treeNodeCache_ = nullptr;
static constexpr auto nodeStoreName_ = "NodeStore";

View File

@@ -993,7 +993,7 @@ ValidNewAccountRoot::finalize(
JLOG(j.fatal()) << "Invariant failed: account root created illegally";
return false;
}
} // namespace xrpl
//------------------------------------------------------------------------------
@@ -3254,42 +3254,4 @@ ValidVault::finalize(STTx const& tx, TER const ret, XRPAmount const fee, ReadVie
return true;
}
//------------------------------------------------------------------------------
void
NoEmptyDirectory::visitEntry(
bool isDelete,
std::shared_ptr<SLE const> const& before,
std::shared_ptr<SLE const> const& after)
{
if (isDelete)
return;
if (before && before->getType() != ltDIR_NODE)
return;
if (after && after->getType() != ltDIR_NODE)
return;
if (!after->isFieldPresent(sfOwner))
// Not an account dir
return;
bad_ = after->at(sfIndexes).empty();
}
bool
NoEmptyDirectory::finalize(
STTx const& tx,
TER const result,
XRPAmount const,
ReadView const& view,
beast::Journal const& j)
{
if (bad_)
{
JLOG(j.fatal()) << "Invariant failed: empty owner directory.";
return false;
}
return true;
}
} // namespace xrpl

View File

@@ -677,22 +677,6 @@ public:
finalize(STTx const&, TER const, XRPAmount const, ReadView const&, beast::Journal const&);
};
/**
* @brief Invariants: An account's directory should never be empty
*
*/
class NoEmptyDirectory
{
bool bad_ = false;
public:
void
visitEntry(bool, std::shared_ptr<SLE const> const&, std::shared_ptr<SLE const> const&);
bool
finalize(STTx const&, TER const, XRPAmount const, ReadView const&, beast::Journal const&);
};
// additional invariant checks can be declared above and then added to this
// tuple
using InvariantChecks = std::tuple<
@@ -719,8 +703,7 @@ using InvariantChecks = std::tuple<
ValidPseudoAccounts,
ValidLoanBroker,
ValidLoan,
ValidVault,
NoEmptyDirectory>;
ValidVault>;
/**
* @brief get a tuple of all invariant checks