mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-06 23:15:32 +00:00
Compare commits
21 Commits
develop
...
bthomee/ke
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
803cd100f3 | ||
|
|
9639b79155 | ||
|
|
eabd485927 | ||
|
|
f3ea3e9646 | ||
|
|
4306d9ccc3 | ||
|
|
1a4d9732ca | ||
|
|
aad6edb6b1 | ||
|
|
a4a1c4eecf | ||
|
|
fca6a8768f | ||
|
|
d96c4164b9 | ||
|
|
965fc75e8a | ||
|
|
2fa1c711d3 | ||
|
|
4650e7d2c6 | ||
|
|
a213127852 | ||
|
|
6e7537dada | ||
|
|
0777f7c64b | ||
|
|
39bfcaf95c | ||
|
|
61c9a19868 | ||
|
|
d01851bc5a | ||
|
|
d1703842e7 | ||
|
|
8d31b1739d |
@@ -940,23 +940,7 @@
|
|||||||
#
|
#
|
||||||
# path Location to store the database
|
# path Location to store the database
|
||||||
#
|
#
|
||||||
# Optional keys
|
# Optional keys for NuDB and RocksDB:
|
||||||
#
|
|
||||||
# cache_size Size of cache for database records. Default is 16384.
|
|
||||||
# Setting this value to 0 will use the default value.
|
|
||||||
#
|
|
||||||
# cache_age Length of time in minutes to keep database records
|
|
||||||
# cached. Default is 5 minutes. Setting this value to
|
|
||||||
# 0 will use the default value.
|
|
||||||
#
|
|
||||||
# Note: if neither cache_size nor cache_age is
|
|
||||||
# specified, the cache for database records will not
|
|
||||||
# be created. If only one of cache_size or cache_age
|
|
||||||
# is specified, the cache will be created using the
|
|
||||||
# default value for the unspecified parameter.
|
|
||||||
#
|
|
||||||
# Note: the cache will not be created if online_delete
|
|
||||||
# is specified.
|
|
||||||
#
|
#
|
||||||
# fast_load Boolean. If set, load the last persisted ledger
|
# fast_load Boolean. If set, load the last persisted ledger
|
||||||
# from disk upon process start before syncing to
|
# from disk upon process start before syncing to
|
||||||
@@ -964,8 +948,6 @@
|
|||||||
# if sufficient IOPS capacity is available.
|
# if sufficient IOPS capacity is available.
|
||||||
# Default 0.
|
# Default 0.
|
||||||
#
|
#
|
||||||
# Optional keys for NuDB or RocksDB:
|
|
||||||
#
|
|
||||||
# earliest_seq The default is 32570 to match the XRP ledger
|
# earliest_seq The default is 32570 to match the XRP ledger
|
||||||
# network's earliest allowed sequence. Alternate
|
# network's earliest allowed sequence. Alternate
|
||||||
# networks may set this value. Minimum value of 1.
|
# networks may set this value. Minimum value of 1.
|
||||||
|
|||||||
@@ -77,16 +77,16 @@ public:
|
|||||||
If the object is not found or an error is encountered, the
|
If the object is not found or an error is encountered, the
|
||||||
result will indicate the condition.
|
result will indicate the condition.
|
||||||
@note This will be called concurrently.
|
@note This will be called concurrently.
|
||||||
@param key A pointer to the key data.
|
@param hash The hash of the object.
|
||||||
@param pObject [out] The created object if successful.
|
@param pObject [out] The created object if successful.
|
||||||
@return The result of the operation.
|
@return The result of the operation.
|
||||||
*/
|
*/
|
||||||
virtual Status
|
virtual Status
|
||||||
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) = 0;
|
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) = 0;
|
||||||
|
|
||||||
/** Fetch a batch synchronously. */
|
/** Fetch a batch synchronously. */
|
||||||
virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||||
fetchBatch(std::vector<uint256 const*> const& hashes) = 0;
|
fetchBatch(std::vector<uint256> const& hashes) = 0;
|
||||||
|
|
||||||
/** Store a single object.
|
/** Store a single object.
|
||||||
Depending on the implementation this may happen immediately
|
Depending on the implementation this may happen immediately
|
||||||
|
|||||||
@@ -24,32 +24,6 @@ public:
|
|||||||
beast::Journal j)
|
beast::Journal j)
|
||||||
: Database(scheduler, readThreads, config, j), backend_(std::move(backend))
|
: Database(scheduler, readThreads, config, j), backend_(std::move(backend))
|
||||||
{
|
{
|
||||||
std::optional<int> cacheSize, cacheAge;
|
|
||||||
|
|
||||||
if (config.exists("cache_size"))
|
|
||||||
{
|
|
||||||
cacheSize = get<int>(config, "cache_size");
|
|
||||||
if (cacheSize.value() < 0)
|
|
||||||
{
|
|
||||||
Throw<std::runtime_error>("Specified negative value for cache_size");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config.exists("cache_age"))
|
|
||||||
{
|
|
||||||
cacheAge = get<int>(config, "cache_age");
|
|
||||||
if (cacheAge.value() < 0)
|
|
||||||
{
|
|
||||||
Throw<std::runtime_error>("Specified negative value for cache_age");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cacheSize != 0 || cacheAge != 0)
|
|
||||||
{
|
|
||||||
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
|
|
||||||
"DatabaseNodeImp", cacheSize.value_or(0), std::chrono::minutes(cacheAge.value_or(0)), stopwatch(), j);
|
|
||||||
}
|
|
||||||
|
|
||||||
XRPL_ASSERT(
|
XRPL_ASSERT(
|
||||||
backend_,
|
backend_,
|
||||||
"xrpl::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
|
"xrpl::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
|
||||||
@@ -108,9 +82,6 @@ public:
|
|||||||
sweep() override;
|
sweep() override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Cache for database objects. This cache is not always initialized. Check
|
|
||||||
// for null before using.
|
|
||||||
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
|
|
||||||
// Persistent key/value storage
|
// Persistent key/value storage
|
||||||
std::shared_ptr<Backend> backend_;
|
std::shared_ptr<Backend> backend_;
|
||||||
|
|
||||||
|
|||||||
@@ -10,11 +10,6 @@ DatabaseNodeImp::store(NodeObjectType type, Blob&& data, uint256 const& hash, st
|
|||||||
|
|
||||||
auto obj = NodeObject::createObject(type, std::move(data), hash);
|
auto obj = NodeObject::createObject(type, std::move(data), hash);
|
||||||
backend_->store(obj);
|
backend_->store(obj);
|
||||||
if (cache_)
|
|
||||||
{
|
|
||||||
// After the store, replace a negative cache entry if there is one
|
|
||||||
cache_->canonicalize(hash, obj, [](std::shared_ptr<NodeObject> const& n) { return n->getType() == hotDUMMY; });
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -23,77 +18,41 @@ DatabaseNodeImp::asyncFetch(
|
|||||||
std::uint32_t ledgerSeq,
|
std::uint32_t ledgerSeq,
|
||||||
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
|
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
|
||||||
{
|
{
|
||||||
if (cache_)
|
|
||||||
{
|
|
||||||
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
|
|
||||||
if (obj)
|
|
||||||
{
|
|
||||||
callback(obj->getType() == hotDUMMY ? nullptr : obj);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
|
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
DatabaseNodeImp::sweep()
|
DatabaseNodeImp::sweep()
|
||||||
{
|
{
|
||||||
if (cache_)
|
|
||||||
cache_->sweep();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<NodeObject>
|
std::shared_ptr<NodeObject>
|
||||||
DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport& fetchReport, bool duplicate)
|
DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport& fetchReport, bool duplicate)
|
||||||
{
|
{
|
||||||
std::shared_ptr<NodeObject> nodeObject = cache_ ? cache_->fetch(hash) : nullptr;
|
std::shared_ptr<NodeObject> nodeObject = nullptr;
|
||||||
|
Status status;
|
||||||
|
|
||||||
if (!nodeObject)
|
try
|
||||||
{
|
{
|
||||||
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not " << (cache_ ? "cached" : "found");
|
status = backend_->fetch(hash, &nodeObject);
|
||||||
|
|
||||||
Status status;
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
status = backend_->fetch(hash.data(), &nodeObject);
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": Exception fetching from backend: " << e.what();
|
|
||||||
Rethrow();
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (status)
|
|
||||||
{
|
|
||||||
case ok:
|
|
||||||
if (cache_)
|
|
||||||
{
|
|
||||||
if (nodeObject)
|
|
||||||
cache_->canonicalize_replace_client(hash, nodeObject);
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
|
|
||||||
cache_->canonicalize_replace_client(hash, notFound);
|
|
||||||
if (notFound->getType() != hotDUMMY)
|
|
||||||
nodeObject = notFound;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case notFound:
|
|
||||||
break;
|
|
||||||
case dataCorrupt:
|
|
||||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": nodestore data is corrupted";
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
JLOG(j_.warn()) << "fetchNodeObject " << hash << ": backend returns unknown result " << status;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else
|
catch (std::exception const& e)
|
||||||
{
|
{
|
||||||
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record found in cache";
|
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": Exception fetching from backend: " << e.what();
|
||||||
if (nodeObject->getType() == hotDUMMY)
|
Rethrow();
|
||||||
nodeObject.reset();
|
}
|
||||||
|
|
||||||
|
switch (status)
|
||||||
|
{
|
||||||
|
case ok:
|
||||||
|
case notFound:
|
||||||
|
break;
|
||||||
|
case dataCorrupt:
|
||||||
|
JLOG(j_.fatal()) << "fetchNodeObject " << hash << ": nodestore data is corrupted";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
JLOG(j_.warn()) << "fetchNodeObject " << hash << ": backend returns unknown result " << status;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nodeObject)
|
if (nodeObject)
|
||||||
@@ -105,66 +64,22 @@ DatabaseNodeImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchReport
|
|||||||
std::vector<std::shared_ptr<NodeObject>>
|
std::vector<std::shared_ptr<NodeObject>>
|
||||||
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
|
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
|
||||||
{
|
{
|
||||||
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
|
|
||||||
using namespace std::chrono;
|
using namespace std::chrono;
|
||||||
auto const before = steady_clock::now();
|
auto const before = steady_clock::now();
|
||||||
std::unordered_map<uint256 const*, size_t> indexMap;
|
|
||||||
std::vector<uint256 const*> cacheMisses;
|
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
|
||||||
uint64_t hits = 0;
|
results = backend_->fetchBatch(hashes).first;
|
||||||
uint64_t fetches = 0;
|
for (size_t i = 0; i < results.size(); ++i)
|
||||||
for (size_t i = 0; i < hashes.size(); ++i)
|
|
||||||
{
|
{
|
||||||
auto const& hash = hashes[i];
|
if (!results[i])
|
||||||
// See if the object already exists in the cache
|
|
||||||
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
|
|
||||||
++fetches;
|
|
||||||
if (!nObj)
|
|
||||||
{
|
|
||||||
// Try the database
|
|
||||||
indexMap[&hash] = i;
|
|
||||||
cacheMisses.push_back(&hash);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
|
|
||||||
// It was in the cache.
|
|
||||||
++hits;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
JLOG(j_.debug()) << "fetchBatch - cache hits = " << (hashes.size() - cacheMisses.size())
|
|
||||||
<< " - cache misses = " << cacheMisses.size();
|
|
||||||
auto dbResults = backend_->fetchBatch(cacheMisses).first;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < dbResults.size(); ++i)
|
|
||||||
{
|
|
||||||
auto nObj = std::move(dbResults[i]);
|
|
||||||
size_t index = indexMap[cacheMisses[i]];
|
|
||||||
auto const& hash = hashes[index];
|
|
||||||
|
|
||||||
if (nObj)
|
|
||||||
{
|
|
||||||
// Ensure all threads get the same object
|
|
||||||
if (cache_)
|
|
||||||
cache_->canonicalize_replace_client(hash, nObj);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
{
|
||||||
JLOG(j_.error()) << "fetchBatch - "
|
JLOG(j_.error()) << "fetchBatch - "
|
||||||
<< "record not found in db or cache. hash = " << strHex(hash);
|
<< "record not found in db. hash = " << strHex(hashes[i]);
|
||||||
if (cache_)
|
|
||||||
{
|
|
||||||
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
|
|
||||||
cache_->canonicalize_replace_client(hash, notFound);
|
|
||||||
if (notFound->getType() != hotDUMMY)
|
|
||||||
nObj = std::move(notFound);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
results[index] = std::move(nObj);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto fetchDurationUs = std::chrono::duration_cast<std::chrono::microseconds>(steady_clock::now() - before).count();
|
auto fetchDurationUs = std::chrono::duration_cast<std::chrono::microseconds>(steady_clock::now() - before).count();
|
||||||
updateFetchMetrics(fetches, hits, fetchDurationUs);
|
updateFetchMetrics(hashes.size(), 0, fetchDurationUs);
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ DatabaseRotatingImp::fetchNodeObject(uint256 const& hash, std::uint32_t, FetchRe
|
|||||||
std::shared_ptr<NodeObject> nodeObject;
|
std::shared_ptr<NodeObject> nodeObject;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
status = backend->fetch(hash.data(), &nodeObject);
|
status = backend->fetch(hash, &nodeObject);
|
||||||
}
|
}
|
||||||
catch (std::exception const& e)
|
catch (std::exception const& e)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -115,10 +115,9 @@ public:
|
|||||||
//--------------------------------------------------------------------------
|
//--------------------------------------------------------------------------
|
||||||
|
|
||||||
Status
|
Status
|
||||||
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
|
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
|
||||||
{
|
{
|
||||||
XRPL_ASSERT(db_, "xrpl::NodeStore::MemoryBackend::fetch : non-null database");
|
XRPL_ASSERT(db_, "xrpl::NodeStore::MemoryBackend::fetch : non-null database");
|
||||||
uint256 const hash(uint256::fromVoid(key));
|
|
||||||
|
|
||||||
std::lock_guard _(db_->mutex);
|
std::lock_guard _(db_->mutex);
|
||||||
|
|
||||||
@@ -133,14 +132,14 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
fetchBatch(std::vector<uint256> const& hashes) override
|
||||||
{
|
{
|
||||||
std::vector<std::shared_ptr<NodeObject>> results;
|
std::vector<std::shared_ptr<NodeObject>> results;
|
||||||
results.reserve(hashes.size());
|
results.reserve(hashes.size());
|
||||||
for (auto const& h : hashes)
|
for (auto const& h : hashes)
|
||||||
{
|
{
|
||||||
std::shared_ptr<NodeObject> nObj;
|
std::shared_ptr<NodeObject> nObj;
|
||||||
Status status = fetch(h->begin(), &nObj);
|
Status status = fetch(h, &nObj);
|
||||||
if (status != ok)
|
if (status != ok)
|
||||||
results.push_back({});
|
results.push_back({});
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -177,17 +177,17 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
Status
|
Status
|
||||||
fetch(void const* key, std::shared_ptr<NodeObject>* pno) override
|
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pno) override
|
||||||
{
|
{
|
||||||
Status status;
|
Status status;
|
||||||
pno->reset();
|
pno->reset();
|
||||||
nudb::error_code ec;
|
nudb::error_code ec;
|
||||||
db_.fetch(
|
db_.fetch(
|
||||||
key,
|
hash.data(),
|
||||||
[key, pno, &status](void const* data, std::size_t size) {
|
[hash, pno, &status](void const* data, std::size_t size) {
|
||||||
nudb::detail::buffer bf;
|
nudb::detail::buffer bf;
|
||||||
auto const result = nodeobject_decompress(data, size, bf);
|
auto const result = nodeobject_decompress(data, size, bf);
|
||||||
DecodedBlob decoded(key, result.first, result.second);
|
DecodedBlob decoded(hash.data(), result.first, result.second);
|
||||||
if (!decoded.wasOk())
|
if (!decoded.wasOk())
|
||||||
{
|
{
|
||||||
status = dataCorrupt;
|
status = dataCorrupt;
|
||||||
@@ -205,14 +205,14 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
fetchBatch(std::vector<uint256> const& hashes) override
|
||||||
{
|
{
|
||||||
std::vector<std::shared_ptr<NodeObject>> results;
|
std::vector<std::shared_ptr<NodeObject>> results;
|
||||||
results.reserve(hashes.size());
|
results.reserve(hashes.size());
|
||||||
for (auto const& h : hashes)
|
for (auto const& h : hashes)
|
||||||
{
|
{
|
||||||
std::shared_ptr<NodeObject> nObj;
|
std::shared_ptr<NodeObject> nObj;
|
||||||
Status status = fetch(h->begin(), &nObj);
|
Status status = fetch(h, &nObj);
|
||||||
if (status != ok)
|
if (status != ok)
|
||||||
results.push_back({});
|
results.push_back({});
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -36,13 +36,13 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
Status
|
Status
|
||||||
fetch(void const*, std::shared_ptr<NodeObject>*) override
|
fetch(uint256 const&, std::shared_ptr<NodeObject>*) override
|
||||||
{
|
{
|
||||||
return notFound;
|
return notFound;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
fetchBatch(std::vector<uint256> const& hashes) override
|
||||||
{
|
{
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -237,7 +237,7 @@ public:
|
|||||||
//--------------------------------------------------------------------------
|
//--------------------------------------------------------------------------
|
||||||
|
|
||||||
Status
|
Status
|
||||||
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
|
fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
|
||||||
{
|
{
|
||||||
XRPL_ASSERT(m_db, "xrpl::NodeStore::RocksDBBackend::fetch : non-null database");
|
XRPL_ASSERT(m_db, "xrpl::NodeStore::RocksDBBackend::fetch : non-null database");
|
||||||
pObject->reset();
|
pObject->reset();
|
||||||
@@ -245,7 +245,7 @@ public:
|
|||||||
Status status(ok);
|
Status status(ok);
|
||||||
|
|
||||||
rocksdb::ReadOptions const options;
|
rocksdb::ReadOptions const options;
|
||||||
rocksdb::Slice const slice(static_cast<char const*>(key), m_keyBytes);
|
rocksdb::Slice const slice(reinterpret_cast<char const*>(hash.data()), m_keyBytes);
|
||||||
|
|
||||||
std::string string;
|
std::string string;
|
||||||
|
|
||||||
@@ -253,7 +253,7 @@ public:
|
|||||||
|
|
||||||
if (getStatus.ok())
|
if (getStatus.ok())
|
||||||
{
|
{
|
||||||
DecodedBlob decoded(key, string.data(), string.size());
|
DecodedBlob decoded(hash.data(), string.data(), string.size());
|
||||||
|
|
||||||
if (decoded.wasOk())
|
if (decoded.wasOk())
|
||||||
{
|
{
|
||||||
@@ -288,14 +288,14 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
|
||||||
fetchBatch(std::vector<uint256 const*> const& hashes) override
|
fetchBatch(std::vector<uint256> const& hashes) override
|
||||||
{
|
{
|
||||||
std::vector<std::shared_ptr<NodeObject>> results;
|
std::vector<std::shared_ptr<NodeObject>> results;
|
||||||
results.reserve(hashes.size());
|
results.reserve(hashes.size());
|
||||||
for (auto const& h : hashes)
|
for (auto const& h : hashes)
|
||||||
{
|
{
|
||||||
std::shared_ptr<NodeObject> nObj;
|
std::shared_ptr<NodeObject> nObj;
|
||||||
Status status = fetch(h->begin(), &nObj);
|
Status status = fetch(h, &nObj);
|
||||||
if (status != ok)
|
if (status != ok)
|
||||||
results.push_back({});
|
results.push_back({});
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -490,19 +490,8 @@ public:
|
|||||||
Env env(*this, envconfig(onlineDelete));
|
Env env(*this, envconfig(onlineDelete));
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////
|
||||||
// Create the backend. Normally, SHAMapStoreImp handles all these
|
// Create NodeStore with two backends to allow online deletion of data.
|
||||||
// details
|
// Normally, SHAMapStoreImp handles all these details.
|
||||||
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
|
|
||||||
|
|
||||||
// Provide default values:
|
|
||||||
if (!nscfg.exists("cache_size"))
|
|
||||||
nscfg.set(
|
|
||||||
"cache_size", std::to_string(env.app().config().getValueFor(SizedItem::treeCacheSize, std::nullopt)));
|
|
||||||
|
|
||||||
if (!nscfg.exists("cache_age"))
|
|
||||||
nscfg.set(
|
|
||||||
"cache_age", std::to_string(env.app().config().getValueFor(SizedItem::treeCacheAge, std::nullopt)));
|
|
||||||
|
|
||||||
NodeStoreScheduler scheduler(env.app().getJobQueue());
|
NodeStoreScheduler scheduler(env.app().getJobQueue());
|
||||||
|
|
||||||
std::string const writableDb = "write";
|
std::string const writableDb = "write";
|
||||||
@@ -510,9 +499,8 @@ public:
|
|||||||
auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
|
auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
|
||||||
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
|
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
|
||||||
|
|
||||||
// Create NodeStore with two backends to allow online deletion of
|
|
||||||
// data
|
|
||||||
constexpr int readThreads = 4;
|
constexpr int readThreads = 4;
|
||||||
|
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
|
||||||
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
|
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
|
||||||
scheduler,
|
scheduler,
|
||||||
readThreads,
|
readThreads,
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<NodeObject> object;
|
std::shared_ptr<NodeObject> object;
|
||||||
|
|
||||||
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
|
Status const status = backend.fetch(batch[i]->getHash(), &object);
|
||||||
|
|
||||||
BEAST_EXPECT(status == ok);
|
BEAST_EXPECT(status == ok);
|
||||||
|
|
||||||
@@ -158,7 +158,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<NodeObject> object;
|
std::shared_ptr<NodeObject> object;
|
||||||
|
|
||||||
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object);
|
Status const status = backend.fetch(batch[i]->getHash(), &object);
|
||||||
|
|
||||||
BEAST_EXPECT(status == notFound);
|
BEAST_EXPECT(status == notFound);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -313,7 +313,7 @@ public:
|
|||||||
std::shared_ptr<NodeObject> obj;
|
std::shared_ptr<NodeObject> obj;
|
||||||
std::shared_ptr<NodeObject> result;
|
std::shared_ptr<NodeObject> result;
|
||||||
obj = seq1_.obj(dist_(gen_));
|
obj = seq1_.obj(dist_(gen_));
|
||||||
backend_.fetch(obj->getHash().data(), &result);
|
backend_.fetch(obj->getHash(), &result);
|
||||||
suite_.expect(result && isSame(result, obj));
|
suite_.expect(result && isSame(result, obj));
|
||||||
}
|
}
|
||||||
catch (std::exception const& e)
|
catch (std::exception const& e)
|
||||||
@@ -371,9 +371,9 @@ public:
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
auto const key = seq2_.key(i);
|
auto const hash = seq2_.key(i);
|
||||||
std::shared_ptr<NodeObject> result;
|
std::shared_ptr<NodeObject> result;
|
||||||
backend_.fetch(key.data(), &result);
|
backend_.fetch(hash, &result);
|
||||||
suite_.expect(!result);
|
suite_.expect(!result);
|
||||||
}
|
}
|
||||||
catch (std::exception const& e)
|
catch (std::exception const& e)
|
||||||
@@ -438,9 +438,9 @@ public:
|
|||||||
{
|
{
|
||||||
if (rand_(gen_) < missingNodePercent)
|
if (rand_(gen_) < missingNodePercent)
|
||||||
{
|
{
|
||||||
auto const key = seq2_.key(dist_(gen_));
|
auto const hash = seq2_.key(dist_(gen_));
|
||||||
std::shared_ptr<NodeObject> result;
|
std::shared_ptr<NodeObject> result;
|
||||||
backend_.fetch(key.data(), &result);
|
backend_.fetch(hash, &result);
|
||||||
suite_.expect(!result);
|
suite_.expect(!result);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@@ -448,7 +448,7 @@ public:
|
|||||||
std::shared_ptr<NodeObject> obj;
|
std::shared_ptr<NodeObject> obj;
|
||||||
std::shared_ptr<NodeObject> result;
|
std::shared_ptr<NodeObject> result;
|
||||||
obj = seq1_.obj(dist_(gen_));
|
obj = seq1_.obj(dist_(gen_));
|
||||||
backend_.fetch(obj->getHash().data(), &result);
|
backend_.fetch(obj->getHash(), &result);
|
||||||
suite_.expect(result && isSame(result, obj));
|
suite_.expect(result && isSame(result, obj));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -525,7 +525,7 @@ public:
|
|||||||
auto const j = older_(gen_);
|
auto const j = older_(gen_);
|
||||||
obj = seq1_.obj(j);
|
obj = seq1_.obj(j);
|
||||||
std::shared_ptr<NodeObject> result1;
|
std::shared_ptr<NodeObject> result1;
|
||||||
backend_.fetch(obj->getHash().data(), &result);
|
backend_.fetch(obj->getHash(), &result);
|
||||||
suite_.expect(result != nullptr);
|
suite_.expect(result != nullptr);
|
||||||
suite_.expect(isSame(result, obj));
|
suite_.expect(isSame(result, obj));
|
||||||
}
|
}
|
||||||
@@ -543,7 +543,7 @@ public:
|
|||||||
std::shared_ptr<NodeObject> result;
|
std::shared_ptr<NodeObject> result;
|
||||||
auto const j = recent_(gen_);
|
auto const j = recent_(gen_);
|
||||||
obj = seq1_.obj(j);
|
obj = seq1_.obj(j);
|
||||||
backend_.fetch(obj->getHash().data(), &result);
|
backend_.fetch(obj->getHash(), &result);
|
||||||
suite_.expect(!result || isSame(result, obj));
|
suite_.expect(!result || isSame(result, obj));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -130,14 +130,6 @@ std::unique_ptr<NodeStore::Database>
|
|||||||
SHAMapStoreImp::makeNodeStore(int readThreads)
|
SHAMapStoreImp::makeNodeStore(int readThreads)
|
||||||
{
|
{
|
||||||
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
|
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
|
||||||
|
|
||||||
// Provide default values:
|
|
||||||
if (!nscfg.exists("cache_size"))
|
|
||||||
nscfg.set("cache_size", std::to_string(app_.config().getValueFor(SizedItem::treeCacheSize, std::nullopt)));
|
|
||||||
|
|
||||||
if (!nscfg.exists("cache_age"))
|
|
||||||
nscfg.set("cache_age", std::to_string(app_.config().getValueFor(SizedItem::treeCacheAge, std::nullopt)));
|
|
||||||
|
|
||||||
std::unique_ptr<NodeStore::Database> db;
|
std::unique_ptr<NodeStore::Database> db;
|
||||||
|
|
||||||
if (deleteInterval_)
|
if (deleteInterval_)
|
||||||
@@ -226,8 +218,6 @@ SHAMapStoreImp::run()
|
|||||||
LedgerIndex lastRotated = state_db_.getState().lastRotated;
|
LedgerIndex lastRotated = state_db_.getState().lastRotated;
|
||||||
netOPs_ = &app_.getOPs();
|
netOPs_ = &app_.getOPs();
|
||||||
ledgerMaster_ = &app_.getLedgerMaster();
|
ledgerMaster_ = &app_.getLedgerMaster();
|
||||||
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
|
|
||||||
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
|
|
||||||
|
|
||||||
if (advisoryDelete_)
|
if (advisoryDelete_)
|
||||||
canDelete_ = state_db_.getCanDelete();
|
canDelete_ = state_db_.getCanDelete();
|
||||||
@@ -490,16 +480,12 @@ void
|
|||||||
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
|
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
|
||||||
{
|
{
|
||||||
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
|
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
|
||||||
fullBelowCache_->clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SHAMapStoreImp::freshenCaches()
|
SHAMapStoreImp::freshenCaches()
|
||||||
{
|
{
|
||||||
if (freshenCache(*treeNodeCache_))
|
freshenCache(*app_.getNodeFamily().getTreeNodeCache()) && freshenCache(app_.getMasterTransaction().getCache());
|
||||||
return;
|
|
||||||
if (freshenCache(app_.getMasterTransaction().getCache()))
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|||||||
@@ -94,8 +94,6 @@ private:
|
|||||||
// as of run() or before
|
// as of run() or before
|
||||||
NetworkOPs* netOPs_ = nullptr;
|
NetworkOPs* netOPs_ = nullptr;
|
||||||
LedgerMaster* ledgerMaster_ = nullptr;
|
LedgerMaster* ledgerMaster_ = nullptr;
|
||||||
FullBelowCache* fullBelowCache_ = nullptr;
|
|
||||||
TreeNodeCache* treeNodeCache_ = nullptr;
|
|
||||||
|
|
||||||
static constexpr auto nodeStoreName_ = "NodeStore";
|
static constexpr auto nodeStoreName_ = "NodeStore";
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user