Compare commits

...

1 Commits

Author SHA1 Message Date
Bart
803cd100f3 refactor: Use uint256 directly as key instead of void pointer 2026-02-02 11:39:53 -05:00
9 changed files with 32 additions and 41 deletions

View File

@@ -77,16 +77,16 @@ public:
If the object is not found or an error is encountered, the
result will indicate the condition.
@note This will be called concurrently.
@param key A pointer to the key data.
@param hash The hash of the object.
@param pObject [out] The created object if successful.
@return The result of the operation.
*/
virtual Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) = 0;
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) = 0;
/** Fetch a batch synchronously. */
virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) = 0;
fetchBatch(std::vector<uint256> const& hashes) = 0;
/** Store a single object.
Depending on the implementation this may happen immediately

View File

@@ -34,7 +34,7 @@ DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport
try
{
status = backend_->fetch(hash.data(), &nodeObject);
status = backend_->fetch(hash, &nodeObject);
}
catch (std::exception const& e)
{
@@ -67,16 +67,8 @@ DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
using namespace std::chrono;
auto const before = steady_clock::now();
std::vector<uint256 const*> batch{};
batch.reserve(hashes.size());
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
batch.push_back(&hash);
}
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
results = backend_->fetchBatch(batch).first;
results = backend_->fetchBatch(hashes).first;
for (size_t i = 0; i < results.size(); ++i)
{
if (!results[i])

View File

@@ -107,7 +107,7 @@ DatabaseRotatingImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchRe
std::shared_ptr<NodeObject> nodeObject;
try
{
status = backend->fetch(hash.data(), &nodeObject);
status = backend->fetch(hash, &nodeObject);
}
catch (std::exception const& e)
{

View File

@@ -115,10 +115,9 @@ public:
//--------------------------------------------------------------------------
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
{
XRPL_ASSERT(db_, "xrpl::NodeStore::MemoryBackend::fetch : non-null database");
uint256 const hash(uint256::fromVoid(key));
std::lock_guard _(db_->mutex);
@@ -133,14 +132,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -177,17 +177,17 @@ public:
}
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pno) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pno) override
{
Status status;
pno->reset();
nudb::error_code ec;
db_.fetch(
key,
[key, pno, &status](void const* data, std::size_t size) {
hash.data(),
[hash, pno, &status](void const* data, std::size_t size) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(data, size, bf);
DecodedBlob decoded(key, result.first, result.second);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk())
{
status = dataCorrupt;
@@ -205,14 +205,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -36,13 +36,13 @@ public:
}
Status
fetch(void const*, std::shared_ptr<NodeObject>*) override
fetch(uint256 const&, std::shared_ptr<NodeObject>*) override
{
return notFound;
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
return {};
}

View File

@@ -237,7 +237,7 @@ public:
//--------------------------------------------------------------------------
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
{
XRPL_ASSERT(m_db, "xrpl::NodeStore::RocksDBBackend::fetch : non-null database");
pObject->reset();
@@ -245,7 +245,7 @@ public:
Status status(ok);
rocksdb::ReadOptions const options;
rocksdb::Slice const slice(static_cast<char const*>(key), m_keyBytes);
rocksdb::Slice const slice(reinterpret_cast<char const*>(hash.data()), m_keyBytes);
std::string string;
@@ -253,7 +253,7 @@ public:
if (getStatus.ok())
{
DecodedBlob decoded(key, string.data(), string.size());
DecodedBlob decoded(hash.data(), string.data(), string.size());
if (decoded.wasOk())
{
@@ -288,14 +288,14 @@ public:
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
fetchBatch(std::vector<uint256> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
Status status = fetch(h, &nObj);
if (status != ok)
results.push_back({});
else

View File

@@ -138,7 +138,7 @@ public:
{
std::shared_ptr<NodeObject> object;
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
Status const status = backend.fetch(batch[i]->getHash(), &object);
BEAST_EXPECT(status == ok);
@@ -158,7 +158,7 @@ public:
{
std::shared_ptr<NodeObject> object;
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
Status const status = backend.fetch(batch[i]->getHash(), &object);
BEAST_EXPECT(status == notFound);
}

View File

@@ -313,7 +313,7 @@ public:
std::shared_ptr<NodeObject> obj;
std::shared_ptr<NodeObject> result;
obj = seq1_.obj(dist_(gen_));
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result && isSame(result, obj));
}
catch (std::exception const& e)
@@ -371,9 +371,9 @@ public:
{
try
{
auto const key = seq2_.key(i);
auto const hash = seq2_.key(i);
std::shared_ptr<NodeObject> result;
backend_.fetch(key.data(), &result);
backend_.fetch(hash, &result);
suite_.expect(!result);
}
catch (std::exception const& e)
@@ -438,9 +438,9 @@ public:
{
if (rand_(gen_) < missingNodePercent)
{
auto const key = seq2_.key(dist_(gen_));
auto const hash = seq2_.key(dist_(gen_));
std::shared_ptr<NodeObject> result;
backend_.fetch(key.data(), &result);
backend_.fetch(hash, &result);
suite_.expect(!result);
}
else
@@ -448,7 +448,7 @@ public:
std::shared_ptr<NodeObject> obj;
std::shared_ptr<NodeObject> result;
obj = seq1_.obj(dist_(gen_));
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result && isSame(result, obj));
}
}
@@ -525,7 +525,7 @@ public:
auto const j = older_(gen_);
obj = seq1_.obj(j);
std::shared_ptr<NodeObject> result1;
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(result != nullptr);
suite_.expect(isSame(result, obj));
}
@@ -543,7 +543,7 @@ public:
std::shared_ptr<NodeObject> result;
auto const j = recent_(gen_);
obj = seq1_.obj(j);
backend_.fetch(obj->getHash().data(), &result);
backend_.fetch(obj->getHash(), &result);
suite_.expect(!result || isSame(result, obj));
break;
}