Compare commits

...

26 Commits

Author SHA1 Message Date
Bart
6780d42a21 Use std::bit_cast 2026-02-12 11:17:18 -05:00
Bart
360843f447 Merge branch 'develop' into bthomee/key 2026-02-12 09:48:45 -05:00
Bart
578a9853f2 Apply suggestion from @Copilot
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-02-11 10:13:22 -05:00
Bart
a3f5a05f95 Remove unnecessary newline 2026-02-11 09:16:42 -05:00
Bart
02db2943f3 Merge branch 'develop' into bthomee/key 2026-02-11 09:13:48 -05:00
Bart
803cd100f3 refactor: Use uint256 directly as key instead of void pointer 2026-02-02 11:39:53 -05:00
Bart
9639b79155 Copilot feedback 2026-02-02 10:59:45 -05:00
Bart
eabd485927 Merge branch 'develop' into bthomee/disable-cache 2026-01-29 09:28:48 +00:00
Bart
f3ea3e9646 Merge branch 'develop' into bthomee/disable-cache 2025-11-22 11:06:58 -05:00
Bart Thomee
4306d9ccc3 Restore freshening caches of tree node cache 2025-09-17 12:17:35 -04:00
Bart Thomee
1a4d9732ca Merge branch 'develop' into bthomee/disable-cache 2025-09-17 11:43:06 -04:00
Bart
aad6edb6b1 Merge branch 'develop' into bthomee/disable-cache 2025-08-13 08:03:40 -04:00
Bart
a4a1c4eecf Merge branch 'develop' into bthomee/disable-cache 2025-07-03 15:43:50 -04:00
Bart
fca6a8768f Merge branch 'develop' into bthomee/disable-cache 2025-06-02 12:02:43 -04:00
Bart
d96c4164b9 Merge branch 'develop' into bthomee/disable-cache 2025-05-22 09:18:07 -04:00
Bart Thomee
965fc75e8a Reserve vector size 2025-05-20 10:07:12 -04:00
Bart Thomee
2fa1c711d3 Removed unused config values 2025-05-20 09:50:13 -04:00
Bart Thomee
4650e7d2c6 Removed unused caches from SHAMapStoreImp 2025-05-20 09:49:55 -04:00
Bart Thomee
a213127852 Remove cache from SHAMapStoreImp 2025-05-19 16:59:43 -04:00
Bart Thomee
6e7537dada Remove cache from DatabaseNodeImp 2025-05-19 16:51:32 -04:00
Bart Thomee
0777f7c64b Merge branch 'develop' into bthomee/disable-cache 2025-05-19 16:37:11 -04:00
Bart Thomee
39bfcaf95c Merge branch 'develop' into bthomee/disable-cache 2025-05-17 18:26:07 -04:00
Bart Thomee
61c9a19868 Merge branch 'develop' into bthomee/disable-cache 2025-05-07 11:02:43 -04:00
Bart Thomee
d01851bc5a Only disable the database cache 2025-04-01 13:24:18 -04:00
Bart Thomee
d1703842e7 Fully disable cache 2025-04-01 11:41:20 -04:00
Bart Thomee
8d31b1739d TEST: Disable tagged cache to measure performance 2025-03-28 13:21:19 -04:00
9 changed files with 32 additions and 42 deletions

View File

@@ -76,16 +76,16 @@ public:
If the object is not found or an error is encountered, the
result will indicate the condition.
@note This will be called concurrently.
@param key A pointer to the key data.
@param hash The hash of the object.
@param pObject [out] The created object if successful.
@return The result of the operation.
*/
virtual Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) = 0;
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) = 0;
/** Fetch a batch synchronously. */
virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) = 0;
fetchBatch(std::vector<uint256> const& hashes) = 0;
/** Store a single object.
Depending on the implementation this may happen immediately

View File

@@ -29,7 +29,7 @@ DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport
try
{
status = backend_->fetch(hash.data(), &nodeObject);
status = backend_->fetch(hash, &nodeObject);
}
catch (std::exception const& e)
{
@@ -62,18 +62,10 @@ DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
using namespace std::chrono;
auto const before = steady_clock::now();
std::vector<uint256 const*> batch{};
batch.reserve(hashes.size());
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
batch.push_back(&hash);
}
// Get the node objects that match the hashes from the backend. To protect
// against the backends returning fewer or more results than expected, the
// container is resized to the number of hashes.
auto results = backend_->fetchBatch(batch).first;
auto results = backend_->fetchBatch(hashes).first;
XRPL_ASSERT(
results.size() == hashes.size() || results.empty(),
"number of output objects either matches number of input hashes or is empty");

View File

@@ -101,7 +101,7 @@ DatabaseRotatingImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchRe
std::shared_ptr<NodeObject> nodeObject;
try
{
status = backend->fetch(hash.data(), &nodeObject);
status = backend->fetch(hash, &nodeObject);
}
catch (std::exception const& e)
{

View File

@@ -115,10 +115,9 @@ public:
//--------------------------------------------------------------------------
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
{
XRPL_ASSERT(db_, "xrpl::NodeStore::MemoryBackend::fetch : non-null database");
uint256 const hash(uint256::fromVoid(key));
std::lock_guard _(db_->mutex);
@@ -133,14 +132,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -177,17 +177,17 @@ public:
}
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pno) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pno) override
{
Status status;
pno->reset();
nudb::error_code ec;
db_.fetch(
key,
[key, pno, &status](void const* data, std::size_t size) {
hash.data(),
[hash, pno, &status](void const* data, std::size_t size) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(data, size, bf);
DecodedBlob decoded(key, result.first, result.second);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk())
{
status = dataCorrupt;
@@ -205,14 +205,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -36,13 +36,13 @@ public:
}
Status
fetch(void const*, std::shared_ptr<NodeObject>*) override
fetch(uint256 const&, std::shared_ptr<NodeObject>*) override
{
return notFound;
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
return {};
}

View File

@@ -237,7 +237,7 @@ public:
//--------------------------------------------------------------------------
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
{
XRPL_ASSERT(m_db, "xrpl::NodeStore::RocksDBBackend::fetch : non-null database");
pObject->reset();
@@ -245,7 +245,7 @@ public:
Status status(ok);
rocksdb::ReadOptions const options;
rocksdb::Slice const slice(static_cast<char const*>(key), m_keyBytes);
rocksdb::Slice const slice(std::bit_cast<char const*>(hash.data()), m_keyBytes);
std::string string;
@@ -253,7 +253,7 @@ public:
if (getStatus.ok())
{
DecodedBlob decoded(key, string.data(), string.size());
DecodedBlob decoded(hash.data(), string.data(), string.size());
if (decoded.wasOk())
{
@@ -288,14 +288,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -137,7 +137,7 @@ public:
{
std::shared_ptr<NodeObject> object;
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
Status const status = backend.fetch(batch[i]->getHash(), &object);
BEAST_EXPECT(status == ok);
@@ -157,7 +157,7 @@ public:
{
std::shared_ptr<NodeObject> object;
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
Status const status = backend.fetch(batch[i]->getHash(), &object);
BEAST_EXPECT(status == notFound);
}

View File

@@ -313,7 +313,7 @@ public:
std::shared_ptr<NodeObject> obj;
std::shared_ptr<NodeObject> result;
obj = seq1_.obj(dist_(gen_));
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result && isSame(result, obj));
}
catch (std::exception const& e)
@@ -371,9 +371,9 @@ public:
{
try
{
auto const key = seq2_.key(i);
auto const hash = seq2_.key(i);
std::shared_ptr<NodeObject> result;
backend_.fetch(key.data(), &result);
backend_.fetch(hash, &result);
suite_.expect(!result);
}
catch (std::exception const& e)
@@ -438,9 +438,9 @@ public:
{
if (rand_(gen_) < missingNodePercent)
{
auto const key = seq2_.key(dist_(gen_));
auto const hash = seq2_.key(dist_(gen_));
std::shared_ptr<NodeObject> result;
backend_.fetch(key.data(), &result);
backend_.fetch(hash, &result);
suite_.expect(!result);
}
else
@@ -448,7 +448,7 @@ public:
std::shared_ptr<NodeObject> obj;
std::shared_ptr<NodeObject> result;
obj = seq1_.obj(dist_(gen_));
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result && isSame(result, obj));
}
}
@@ -524,8 +524,7 @@ public:
std::shared_ptr<NodeObject> result;
auto const j = older_(gen_);
obj = seq1_.obj(j);
std::shared_ptr<NodeObject> result1;
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result != nullptr);
suite_.expect(isSame(result, obj));
}
@@ -543,7 +542,7 @@ public:
std::shared_ptr<NodeObject> result;
auto const j = recent_(gen_);
obj = seq1_.obj(j);
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(!result || isSame(result, obj));
break;
}