mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-04 19:25:51 +00:00
Compare commits
17 Commits
zhang/grot
...
bthomee/di
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4306d9ccc3 | ||
|
|
1a4d9732ca | ||
|
|
aad6edb6b1 | ||
|
|
a4a1c4eecf | ||
|
|
fca6a8768f | ||
|
|
d96c4164b9 | ||
|
|
965fc75e8a | ||
|
|
2fa1c711d3 | ||
|
|
4650e7d2c6 | ||
|
|
a213127852 | ||
|
|
6e7537dada | ||
|
|
0777f7c64b | ||
|
|
39bfcaf95c | ||
|
|
61c9a19868 | ||
|
|
d01851bc5a | ||
|
|
d1703842e7 | ||
|
|
8d31b1739d |
@@ -940,23 +940,7 @@
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
# Optional keys
|
||||
#
|
||||
# cache_size Size of cache for database records. Default is 16384.
|
||||
# Setting this value to 0 will use the default value.
|
||||
#
|
||||
# cache_age Length of time in minutes to keep database records
|
||||
# cached. Default is 5 minutes. Setting this value to
|
||||
# 0 will use the default value.
|
||||
#
|
||||
# Note: if neither cache_size nor cache_age is
|
||||
# specified, the cache for database records will not
|
||||
# be created. If only one of cache_size or cache_age
|
||||
# is specified, the cache will be created using the
|
||||
# default value for the unspecified parameter.
|
||||
#
|
||||
# Note: the cache will not be created if online_delete
|
||||
# is specified.
|
||||
# Optional keys for NuDB and RocksDB:
|
||||
#
|
||||
# fast_load Boolean. If set, load the last persisted ledger
|
||||
# from disk upon process start before syncing to
|
||||
@@ -964,8 +948,6 @@
|
||||
# if sufficient IOPS capacity is available.
|
||||
# Default 0.
|
||||
#
|
||||
# Optional keys for NuDB or RocksDB:
|
||||
#
|
||||
# earliest_seq The default is 32570 to match the XRP ledger
|
||||
# network's earliest allowed sequence. Alternate
|
||||
# networks may set this value. Minimum value of 1.
|
||||
|
||||
@@ -558,23 +558,8 @@ public:
|
||||
Env env(*this, envconfig(onlineDelete));
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Create the backend. Normally, SHAMapStoreImp handles all these
|
||||
// details
|
||||
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
|
||||
|
||||
// Provide default values:
|
||||
if (!nscfg.exists("cache_size"))
|
||||
nscfg.set(
|
||||
"cache_size",
|
||||
std::to_string(env.app().config().getValueFor(
|
||||
SizedItem::treeCacheSize, std::nullopt)));
|
||||
|
||||
if (!nscfg.exists("cache_age"))
|
||||
nscfg.set(
|
||||
"cache_age",
|
||||
std::to_string(env.app().config().getValueFor(
|
||||
SizedItem::treeCacheAge, std::nullopt)));
|
||||
|
||||
// Create NodeStore with two backends to allow online deletion of data.
|
||||
// Normally, SHAMapStoreImp handles all these details.
|
||||
NodeStoreScheduler scheduler(env.app().getJobQueue());
|
||||
|
||||
std::string const writableDb = "write";
|
||||
@@ -582,9 +567,8 @@ public:
|
||||
auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
|
||||
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
|
||||
|
||||
// Create NodeStore with two backends to allow online deletion of
|
||||
// data
|
||||
constexpr int readThreads = 4;
|
||||
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
|
||||
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
|
||||
scheduler,
|
||||
readThreads,
|
||||
|
||||
@@ -162,20 +162,6 @@ std::unique_ptr<NodeStore::Database>
|
||||
SHAMapStoreImp::makeNodeStore(int readThreads)
|
||||
{
|
||||
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
|
||||
|
||||
// Provide default values:
|
||||
if (!nscfg.exists("cache_size"))
|
||||
nscfg.set(
|
||||
"cache_size",
|
||||
std::to_string(app_.config().getValueFor(
|
||||
SizedItem::treeCacheSize, std::nullopt)));
|
||||
|
||||
if (!nscfg.exists("cache_age"))
|
||||
nscfg.set(
|
||||
"cache_age",
|
||||
std::to_string(app_.config().getValueFor(
|
||||
SizedItem::treeCacheAge, std::nullopt)));
|
||||
|
||||
std::unique_ptr<NodeStore::Database> db;
|
||||
|
||||
if (deleteInterval_)
|
||||
@@ -269,8 +255,6 @@ SHAMapStoreImp::run()
|
||||
LedgerIndex lastRotated = state_db_.getState().lastRotated;
|
||||
netOPs_ = &app_.getOPs();
|
||||
ledgerMaster_ = &app_.getLedgerMaster();
|
||||
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
|
||||
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
|
||||
|
||||
if (advisoryDelete_)
|
||||
canDelete_ = state_db_.getCanDelete();
|
||||
@@ -563,16 +547,13 @@ void
|
||||
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
|
||||
{
|
||||
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
|
||||
fullBelowCache_->clear();
|
||||
}
|
||||
|
||||
void
|
||||
SHAMapStoreImp::freshenCaches()
|
||||
{
|
||||
if (freshenCache(*treeNodeCache_))
|
||||
return;
|
||||
if (freshenCache(app_.getMasterTransaction().getCache()))
|
||||
return;
|
||||
freshenCache(*app_.getNodeFamily().getTreeNodeCache());
|
||||
freshenCache(app_.getMasterTransaction().getCache());
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -112,8 +112,6 @@ private:
|
||||
// as of run() or before
|
||||
NetworkOPs* netOPs_ = nullptr;
|
||||
LedgerMaster* ledgerMaster_ = nullptr;
|
||||
FullBelowCache* fullBelowCache_ = nullptr;
|
||||
TreeNodeCache* treeNodeCache_ = nullptr;
|
||||
|
||||
static constexpr auto nodeStoreName_ = "NodeStore";
|
||||
|
||||
|
||||
@@ -33,14 +33,6 @@ DatabaseNodeImp::store(
|
||||
|
||||
auto obj = NodeObject::createObject(type, std::move(data), hash);
|
||||
backend_->store(obj);
|
||||
if (cache_)
|
||||
{
|
||||
// After the store, replace a negative cache entry if there is one
|
||||
cache_->canonicalize(
|
||||
hash, obj, [](std::shared_ptr<NodeObject> const& n) {
|
||||
return n->getType() == hotDUMMY;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -49,23 +41,12 @@ DatabaseNodeImp::asyncFetch(
|
||||
std::uint32_t ledgerSeq,
|
||||
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
|
||||
{
|
||||
if (cache_)
|
||||
{
|
||||
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
|
||||
if (obj)
|
||||
{
|
||||
callback(obj->getType() == hotDUMMY ? nullptr : obj);
|
||||
return;
|
||||
}
|
||||
}
|
||||
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
|
||||
}
|
||||
|
||||
void
|
||||
DatabaseNodeImp::sweep()
|
||||
{
|
||||
if (cache_)
|
||||
cache_->sweep();
|
||||
}
|
||||
|
||||
std::shared_ptr<NodeObject>
|
||||
@@ -75,64 +56,33 @@ DatabaseNodeImp::fetchNodeObject(
|
||||
FetchReport& fetchReport,
|
||||
bool duplicate)
|
||||
{
|
||||
std::shared_ptr<NodeObject> nodeObject =
|
||||
cache_ ? cache_->fetch(hash) : nullptr;
|
||||
std::shared_ptr<NodeObject> nodeObject = nullptr;
|
||||
Status status;
|
||||
|
||||
if (!nodeObject)
|
||||
try
|
||||
{
|
||||
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not "
|
||||
<< (cache_ ? "cached" : "found");
|
||||
|
||||
Status status;
|
||||
|
||||
try
|
||||
{
|
||||
status = backend_->fetch(hash.data(), &nodeObject);
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j_.fatal())
|
||||
<< "fetchNodeObject " << hash
|
||||
<< ": Exception fetching from backend: " << e.what();
|
||||
Rethrow();
|
||||
}
|
||||
|
||||
switch (status)
|
||||
{
|
||||
case ok:
|
||||
if (cache_)
|
||||
{
|
||||
if (nodeObject)
|
||||
cache_->canonicalize_replace_client(hash, nodeObject);
|
||||
else
|
||||
{
|
||||
auto notFound =
|
||||
NodeObject::createObject(hotDUMMY, {}, hash);
|
||||
cache_->canonicalize_replace_client(hash, notFound);
|
||||
if (notFound->getType() != hotDUMMY)
|
||||
nodeObject = notFound;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case notFound:
|
||||
break;
|
||||
case dataCorrupt:
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash
|
||||
<< ": nodestore data is corrupted";
|
||||
break;
|
||||
default:
|
||||
JLOG(j_.warn())
|
||||
<< "fetchNodeObject " << hash
|
||||
<< ": backend returns unknown result " << status;
|
||||
break;
|
||||
}
|
||||
status = backend_->fetch(hash.data(), &nodeObject);
|
||||
}
|
||||
else
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
JLOG(j_.trace()) << "fetchNodeObject " << hash
|
||||
<< ": record found in cache";
|
||||
if (nodeObject->getType() == hotDUMMY)
|
||||
nodeObject.reset();
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash
|
||||
<< ": Exception fetching from backend: " << e.what();
|
||||
Rethrow();
|
||||
}
|
||||
|
||||
switch (status)
|
||||
{
|
||||
case ok:
|
||||
case notFound:
|
||||
break;
|
||||
case dataCorrupt:
|
||||
JLOG(j_.fatal()) << "fetchNodeObject " << hash
|
||||
<< ": nodestore data is corrupted";
|
||||
break;
|
||||
default:
|
||||
JLOG(j_.warn()) << "fetchNodeObject " << hash
|
||||
<< ": backend returns unknown result " << status;
|
||||
break;
|
||||
}
|
||||
|
||||
if (nodeObject)
|
||||
@@ -144,71 +94,33 @@ DatabaseNodeImp::fetchNodeObject(
|
||||
std::vector<std::shared_ptr<NodeObject>>
|
||||
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
|
||||
{
|
||||
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
|
||||
using namespace std::chrono;
|
||||
auto const before = steady_clock::now();
|
||||
std::unordered_map<uint256 const*, size_t> indexMap;
|
||||
std::vector<uint256 const*> cacheMisses;
|
||||
uint64_t hits = 0;
|
||||
uint64_t fetches = 0;
|
||||
|
||||
std::vector<uint256 const*> batch{hashes.size()};
|
||||
for (size_t i = 0; i < hashes.size(); ++i)
|
||||
{
|
||||
auto const& hash = hashes[i];
|
||||
// See if the object already exists in the cache
|
||||
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
|
||||
++fetches;
|
||||
if (!nObj)
|
||||
{
|
||||
// Try the database
|
||||
indexMap[&hash] = i;
|
||||
cacheMisses.push_back(&hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
|
||||
// It was in the cache.
|
||||
++hits;
|
||||
}
|
||||
batch.push_back(&hash);
|
||||
}
|
||||
|
||||
JLOG(j_.debug()) << "fetchBatch - cache hits = "
|
||||
<< (hashes.size() - cacheMisses.size())
|
||||
<< " - cache misses = " << cacheMisses.size();
|
||||
auto dbResults = backend_->fetchBatch(cacheMisses).first;
|
||||
|
||||
for (size_t i = 0; i < dbResults.size(); ++i)
|
||||
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
|
||||
results = backend_->fetchBatch(batch).first;
|
||||
for (size_t i = 0; i < results.size(); ++i)
|
||||
{
|
||||
auto nObj = std::move(dbResults[i]);
|
||||
size_t index = indexMap[cacheMisses[i]];
|
||||
auto const& hash = hashes[index];
|
||||
|
||||
if (nObj)
|
||||
{
|
||||
// Ensure all threads get the same object
|
||||
if (cache_)
|
||||
cache_->canonicalize_replace_client(hash, nObj);
|
||||
}
|
||||
else
|
||||
if (!results[i])
|
||||
{
|
||||
JLOG(j_.error())
|
||||
<< "fetchBatch - "
|
||||
<< "record not found in db or cache. hash = " << strHex(hash);
|
||||
if (cache_)
|
||||
{
|
||||
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
|
||||
cache_->canonicalize_replace_client(hash, notFound);
|
||||
if (notFound->getType() != hotDUMMY)
|
||||
nObj = std::move(notFound);
|
||||
}
|
||||
<< "record not found in db. hash = " << strHex(hashes[i]);
|
||||
}
|
||||
results[index] = std::move(nObj);
|
||||
}
|
||||
|
||||
auto fetchDurationUs =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
steady_clock::now() - before)
|
||||
.count();
|
||||
updateFetchMetrics(fetches, hits, fetchDurationUs);
|
||||
updateFetchMetrics(hashes.size(), 0, fetchDurationUs);
|
||||
return results;
|
||||
}
|
||||
|
||||
|
||||
@@ -45,38 +45,6 @@ public:
|
||||
: Database(scheduler, readThreads, config, j)
|
||||
, backend_(std::move(backend))
|
||||
{
|
||||
std::optional<int> cacheSize, cacheAge;
|
||||
|
||||
if (config.exists("cache_size"))
|
||||
{
|
||||
cacheSize = get<int>(config, "cache_size");
|
||||
if (cacheSize.value() < 0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"Specified negative value for cache_size");
|
||||
}
|
||||
}
|
||||
|
||||
if (config.exists("cache_age"))
|
||||
{
|
||||
cacheAge = get<int>(config, "cache_age");
|
||||
if (cacheAge.value() < 0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"Specified negative value for cache_age");
|
||||
}
|
||||
}
|
||||
|
||||
if (cacheSize != 0 || cacheAge != 0)
|
||||
{
|
||||
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
|
||||
"DatabaseNodeImp",
|
||||
cacheSize.value_or(0),
|
||||
std::chrono::minutes(cacheAge.value_or(0)),
|
||||
stopwatch(),
|
||||
j);
|
||||
}
|
||||
|
||||
XRPL_ASSERT(
|
||||
backend_,
|
||||
"ripple::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
|
||||
@@ -137,9 +105,6 @@ public:
|
||||
sweep() override;
|
||||
|
||||
private:
|
||||
// Cache for database objects. This cache is not always initialized. Check
|
||||
// for null before using.
|
||||
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
|
||||
// Persistent key/value storage
|
||||
std::shared_ptr<Backend> backend_;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user