Compare commits

...

31 Commits

Author SHA1 Message Date
Valentin Balaschenko
009e9d0361 Merge branch 'vlntb/inbound-ledgers-cache' into vlntb/accounts-growth-combined 2025-06-20 13:09:58 +01:00
Valentin Balaschenko
8461c7d0b6 sweep every 10 sec instead of 1 min 2025-06-18 17:00:17 +01:00
Valentin Balaschenko
01d73cef36 merge lock per partition 2025-06-18 16:30:15 +01:00
Valentin Balaschenko
da694c8304 wip lock per partition 2025-06-13 18:33:00 +01:00
Valentin Balaschenko
d0f836581b remove mutex and dead references 2025-06-13 14:45:27 +01:00
Valentin Balaschenko
010e23ae9c Murats trie optimization 2025-06-12 14:36:55 +01:00
Valentin Balaschenko
6071c08a0e Merge branch 'vlntb/mem-leak-ledger-history' into vlntb/accounts-growth-combined 2025-06-12 14:12:53 +01:00
Valentin Balaschenko
2aa9b69cdb Merge branch 'vlntb/RIPD-2536-taggedcache-expire-now' into vlntb/accounts-growth-combined 2025-06-12 14:11:14 +01:00
Valentin Balaschenko
da0101ea6b Merge branch 'bthomee/disable-cache' into vlntb/accounts-growth-combined 2025-06-12 14:01:35 +01:00
Valentin Balaschenko
984c70955a clang 2025-06-11 16:33:06 +01:00
Valentin Balaschenko
3effb54e49 wip atomics for counters 2025-06-11 16:31:48 +01:00
Valentin Balaschenko
316f9535e3 wip removing mutex and dependencies 2025-06-11 14:31:26 +01:00
Bart
fca6a8768f Merge branch 'develop' into bthomee/disable-cache 2025-06-02 12:02:43 -04:00
Valentin Balaschenko
2f57222858 fix 2025-06-02 12:15:16 +01:00
Valentin Balaschenko
164c225670 improve cleanup 2025-06-02 12:00:49 +01:00
Valentin Balaschenko
5b38eb3532 lru map for indexes 2025-06-02 11:02:58 +01:00
Bart
d96c4164b9 Merge branch 'develop' into bthomee/disable-cache 2025-05-22 09:18:07 -04:00
Bart Thomee
965fc75e8a Reserve vector size 2025-05-20 10:07:12 -04:00
Bart Thomee
2fa1c711d3 Removed unused config values 2025-05-20 09:50:13 -04:00
Bart Thomee
4650e7d2c6 Removed unused caches from SHAMapStoreImp 2025-05-20 09:49:55 -04:00
Bart Thomee
a213127852 Remove cache from SHAMapStoreImp 2025-05-19 16:59:43 -04:00
Bart Thomee
6e7537dada Remove cache from DatabaseNodeImp 2025-05-19 16:51:32 -04:00
Bart Thomee
0777f7c64b Merge branch 'develop' into bthomee/disable-cache 2025-05-19 16:37:11 -04:00
Bart Thomee
39bfcaf95c Merge branch 'develop' into bthomee/disable-cache 2025-05-17 18:26:07 -04:00
Bart Thomee
61c9a19868 Merge branch 'develop' into bthomee/disable-cache 2025-05-07 11:02:43 -04:00
Valentin Balaschenko
1fd593cea3 disable test properly 2025-04-07 17:34:12 +01:00
Valentin Balaschenko
789afac422 disable failing test 2025-04-07 16:41:59 +01:00
Valentin Balaschenko
20bc17eef6 tagged cache expire immediatelly 2025-04-04 12:49:37 +01:00
Bart Thomee
d01851bc5a Only disable the database cache 2025-04-01 13:24:18 -04:00
Bart Thomee
d1703842e7 Fully disable cache 2025-04-01 11:41:20 -04:00
Bart Thomee
8d31b1739d TEST: Disable tagged cache to measure performance 2025-03-28 13:21:19 -04:00
17 changed files with 397 additions and 392 deletions

View File

@@ -940,23 +940,7 @@
# #
# path Location to store the database # path Location to store the database
# #
# Optional keys # Optional keys for NuDB and RocksDB:
#
# cache_size Size of cache for database records. Default is 16384.
# Setting this value to 0 will use the default value.
#
# cache_age Length of time in minutes to keep database records
# cached. Default is 5 minutes. Setting this value to
# 0 will use the default value.
#
# Note: if neither cache_size nor cache_age is
# specified, the cache for database records will not
# be created. If only one of cache_size or cache_age
# is specified, the cache will be created using the
# default value for the unspecified parameter.
#
# Note: the cache will not be created if online_delete
# is specified.
# #
# fast_load Boolean. If set, load the last persisted ledger # fast_load Boolean. If set, load the last persisted ledger
# from disk upon process start before syncing to # from disk upon process start before syncing to
@@ -964,8 +948,6 @@
# if sufficient IOPS capacity is available. # if sufficient IOPS capacity is available.
# Default 0. # Default 0.
# #
# Optional keys for NuDB or RocksDB:
#
# earliest_seq The default is 32570 to match the XRP ledger # earliest_seq The default is 32570 to match the XRP ledger
# network's earliest allowed sequence. Alternate # network's earliest allowed sequence. Alternate
# networks may set this value. Minimum value of 1. # networks may set this value. Minimum value of 1.

View File

@@ -21,7 +21,6 @@
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED #define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <ostream> #include <ostream>

View File

@@ -170,9 +170,6 @@ public:
bool bool
retrieve(key_type const& key, T& data); retrieve(key_type const& key, T& data);
mutex_type&
peekMutex();
std::vector<key_type> std::vector<key_type>
getKeys() const; getKeys() const;
@@ -193,11 +190,14 @@ public:
private: private:
SharedPointerType SharedPointerType
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l); initialFetch(key_type const& key);
void void
collect_metrics(); collect_metrics();
Mutex&
lockPartition(key_type const& key) const;
private: private:
struct Stats struct Stats
{ {
@@ -300,8 +300,8 @@ private:
[[maybe_unused]] clock_type::time_point const& now, [[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition, typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep, SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals, std::atomic<int>& allRemoval,
std::lock_guard<std::recursive_mutex> const&); Mutex& partitionLock);
[[nodiscard]] std::thread [[nodiscard]] std::thread
sweepHelper( sweepHelper(
@@ -310,14 +310,12 @@ private:
typename KeyOnlyCacheType::map_type& partition, typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&, SweptPointersVector&,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&); Mutex& partitionLock);
beast::Journal m_journal; beast::Journal m_journal;
clock_type& m_clock; clock_type& m_clock;
Stats m_stats; Stats m_stats;
mutex_type mutable m_mutex;
// Used for logging // Used for logging
std::string m_name; std::string m_name;
@@ -328,10 +326,11 @@ private:
clock_type::duration const m_target_age; clock_type::duration const m_target_age;
// Number of items cached // Number of items cached
int m_cache_count; std::atomic<int> m_cache_count;
cache_type m_cache; // Hold strong reference to recent objects cache_type m_cache; // Hold strong reference to recent objects
std::uint64_t m_hits; std::atomic<std::uint64_t> m_hits;
std::uint64_t m_misses; std::atomic<std::uint64_t> m_misses;
mutable std::vector<mutex_type> partitionLocks_;
}; };
} // namespace ripple } // namespace ripple

View File

@@ -60,6 +60,7 @@ inline TaggedCache<
, m_hits(0) , m_hits(0)
, m_misses(0) , m_misses(0)
{ {
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
} }
template < template <
@@ -105,8 +106,13 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::size() const Mutex>::size() const
{ {
std::lock_guard lock(m_mutex); std::size_t totalSize = 0;
return m_cache.size(); for (size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
totalSize += m_cache.map()[i].size();
}
return totalSize;
} }
template < template <
@@ -129,8 +135,7 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::getCacheSize() const Mutex>::getCacheSize() const
{ {
std::lock_guard lock(m_mutex); return m_cache_count.load(std::memory_order_relaxed);
return m_cache_count;
} }
template < template <
@@ -153,8 +158,7 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::getTrackSize() const Mutex>::getTrackSize() const
{ {
std::lock_guard lock(m_mutex); return size();
return m_cache.size();
} }
template < template <
@@ -177,9 +181,10 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::getHitRate() Mutex>::getHitRate()
{ {
std::lock_guard lock(m_mutex); auto hits = m_hits.load(std::memory_order_relaxed);
auto const total = static_cast<float>(m_hits + m_misses); auto misses = m_misses.load(std::memory_order_relaxed);
return m_hits * (100.0f / std::max(1.0f, total)); float total = float(hits + misses);
return hits * (100.0f / std::max(1.0f, total));
} }
template < template <
@@ -202,9 +207,12 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::clear() Mutex>::clear()
{ {
std::lock_guard lock(m_mutex); for (auto& mutex : partitionLocks_)
mutex.lock();
m_cache.clear(); m_cache.clear();
m_cache_count = 0; for (auto& mutex : partitionLocks_)
mutex.unlock();
m_cache_count.store(0, std::memory_order_relaxed);
} }
template < template <
@@ -227,11 +235,14 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::reset() Mutex>::reset()
{ {
std::lock_guard lock(m_mutex); for (auto& mutex : partitionLocks_)
mutex.lock();
m_cache.clear(); m_cache.clear();
m_cache_count = 0; for (auto& mutex : partitionLocks_)
m_hits = 0; mutex.unlock();
m_misses = 0; m_cache_count.store(0, std::memory_order_relaxed);
m_hits.store(0, std::memory_order_relaxed);
m_misses.store(0, std::memory_order_relaxed);
} }
template < template <
@@ -255,7 +266,7 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::touch_if_exists(KeyComparable const& key) Mutex>::touch_if_exists(KeyComparable const& key)
{ {
std::lock_guard lock(m_mutex); std::lock_guard<Mutex> lock(lockPartition(key));
auto const iter(m_cache.find(key)); auto const iter(m_cache.find(key));
if (iter == m_cache.end()) if (iter == m_cache.end())
{ {
@@ -297,26 +308,9 @@ TaggedCache<
auto const start = std::chrono::steady_clock::now(); auto const start = std::chrono::steady_clock::now();
{ {
std::lock_guard lock(m_mutex); when_expire =
now + std::chrono::hours(1); // any future time works too to make
if (m_target_size == 0 || // sure that nothing survives
(static_cast<int>(m_cache.size()) <= m_target_size))
{
when_expire = now - m_target_age;
}
else
{
when_expire = now - m_target_age * m_target_size / m_cache.size();
clock_type::duration const minimumAge(std::chrono::seconds(1));
if (when_expire > (now - minimumAge))
when_expire = now - minimumAge;
JLOG(m_journal.trace())
<< m_name << " is growing fast " << m_cache.size() << " of "
<< m_target_size << " aging at " << (now - when_expire).count()
<< " of " << m_target_age.count();
}
std::vector<std::thread> workers; std::vector<std::thread> workers;
workers.reserve(m_cache.partitions()); workers.reserve(m_cache.partitions());
@@ -330,12 +324,13 @@ TaggedCache<
m_cache.map()[p], m_cache.map()[p],
allStuffToSweep[p], allStuffToSweep[p],
allRemovals, allRemovals,
lock)); partitionLocks_[p]));
} }
for (std::thread& worker : workers) for (std::thread& worker : workers)
worker.join(); worker.join();
m_cache_count -= allRemovals; int removals = allRemovals.load(std::memory_order_relaxed);
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
} }
// At this point allStuffToSweep will go out of scope outside the lock // At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer. // and decrement the reference count on each strong pointer.
@@ -369,7 +364,8 @@ TaggedCache<
{ {
// Remove from cache, if !valid, remove from map too. Returns true if // Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache // removed from cache
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
@@ -382,7 +378,7 @@ TaggedCache<
if (entry.isCached()) if (entry.isCached())
{ {
--m_cache_count; m_cache_count.fetch_sub(1, std::memory_order_relaxed);
entry.ptr.convertToWeak(); entry.ptr.convertToWeak();
ret = true; ret = true;
} }
@@ -420,17 +416,16 @@ TaggedCache<
{ {
// Return canonical value, store if needed, refresh in cache // Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already // Return values: true=we had the data already
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
if (cit == m_cache.end()) if (cit == m_cache.end())
{ {
m_cache.emplace( m_cache.emplace(
std::piecewise_construct, std::piecewise_construct,
std::forward_as_tuple(key), std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data)); std::forward_as_tuple(m_clock.now(), data));
++m_cache_count; m_cache_count.fetch_add(1, std::memory_order_relaxed);
return false; return false;
} }
@@ -479,12 +474,12 @@ TaggedCache<
data = cachedData; data = cachedData;
} }
++m_cache_count; m_cache_count.fetch_add(1, std::memory_order_relaxed);
return true; return true;
} }
entry.ptr = data; entry.ptr = data;
++m_cache_count; m_cache_count.fetch_add(1, std::memory_order_relaxed);
return false; return false;
} }
@@ -560,10 +555,11 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::fetch(key_type const& key) Mutex>::fetch(key_type const& key)
{ {
std::lock_guard<mutex_type> l(m_mutex); std::lock_guard<Mutex> lock(lockPartition(key));
auto ret = initialFetch(key, l);
auto ret = initialFetch(key);
if (!ret) if (!ret)
++m_misses; m_misses.fetch_add(1, std::memory_order_relaxed);
return ret; return ret;
} }
@@ -627,8 +623,8 @@ TaggedCache<
Mutex>::insert(key_type const& key) Mutex>::insert(key_type const& key)
-> std::enable_if_t<IsKeyCache, ReturnType> -> std::enable_if_t<IsKeyCache, ReturnType>
{ {
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now()); clock_type::time_point const now(m_clock.now());
std::lock_guard<Mutex> lock(lockPartition(key));
auto [it, inserted] = m_cache.emplace( auto [it, inserted] = m_cache.emplace(
std::piecewise_construct, std::piecewise_construct,
std::forward_as_tuple(key), std::forward_as_tuple(key),
@@ -668,29 +664,6 @@ TaggedCache<
return true; return true;
} }
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline auto
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::peekMutex() -> mutex_type&
{
return m_mutex;
}
template < template <
class Key, class Key,
class T, class T,
@@ -714,10 +687,13 @@ TaggedCache<
std::vector<key_type> v; std::vector<key_type> v;
{ {
std::lock_guard lock(m_mutex);
v.reserve(m_cache.size()); v.reserve(m_cache.size());
for (auto const& _ : m_cache) for (std::size_t i = 0; i < partitionLocks_.size(); ++i)
v.push_back(_.first); {
std::lock_guard<Mutex> lock(partitionLocks_[i]);
for (auto const& entry : m_cache.map()[i])
v.push_back(entry.first);
}
} }
return v; return v;
@@ -743,11 +719,12 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::rate() const Mutex>::rate() const
{ {
std::lock_guard lock(m_mutex); auto hits = m_hits.load(std::memory_order_relaxed);
auto const tot = m_hits + m_misses; auto misses = m_misses.load(std::memory_order_relaxed);
auto const tot = hits + misses;
if (tot == 0) if (tot == 0)
return 0; return 0.0;
return double(m_hits) / tot; return double(hits) / tot;
} }
template < template <
@@ -771,18 +748,16 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::fetch(key_type const& digest, Handler const& h) Mutex>::fetch(key_type const& digest, Handler const& h)
{ {
{ std::lock_guard<Mutex> lock(lockPartition(digest));
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest, l)) if (auto ret = initialFetch(digest))
return ret; return ret;
}
auto sle = h(); auto sle = h();
if (!sle) if (!sle)
return {}; return {};
std::lock_guard l(m_mutex); m_misses.fetch_add(1, std::memory_order_relaxed);
++m_misses;
auto const [it, inserted] = auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted) if (!inserted)
@@ -809,9 +784,10 @@ TaggedCache<
SharedPointerType, SharedPointerType,
Hash, Hash,
KeyEqual, KeyEqual,
Mutex>:: Mutex>::initialFetch(key_type const& key)
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
{ {
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
if (cit == m_cache.end()) if (cit == m_cache.end())
return {}; return {};
@@ -819,7 +795,7 @@ TaggedCache<
Entry& entry = cit->second; Entry& entry = cit->second;
if (entry.isCached()) if (entry.isCached())
{ {
++m_hits; m_hits.fetch_add(1, std::memory_order_relaxed);
entry.touch(m_clock.now()); entry.touch(m_clock.now());
return entry.ptr.getStrong(); return entry.ptr.getStrong();
} }
@@ -827,12 +803,13 @@ TaggedCache<
if (entry.isCached()) if (entry.isCached())
{ {
// independent of cache size, so not counted as a hit // independent of cache size, so not counted as a hit
++m_cache_count; m_cache_count.fetch_add(1, std::memory_order_relaxed);
entry.touch(m_clock.now()); entry.touch(m_clock.now());
return entry.ptr.getStrong(); return entry.ptr.getStrong();
} }
m_cache.erase(cit); m_cache.erase(cit); // TODO: if this erase happens on fetch, what is left
// for a sweep?
return {}; return {};
} }
@@ -861,10 +838,11 @@ TaggedCache<
{ {
beast::insight::Gauge::value_type hit_rate(0); beast::insight::Gauge::value_type hit_rate(0);
{ {
std::lock_guard lock(m_mutex); auto const hits = m_hits.load(std::memory_order_relaxed);
auto const total(m_hits + m_misses); auto const misses = m_misses.load(std::memory_order_relaxed);
auto const total = hits + misses;
if (total != 0) if (total != 0)
hit_rate = (m_hits * 100) / total; hit_rate = (hits * 100) / total;
} }
m_stats.hit_rate.set(hit_rate); m_stats.hit_rate.set(hit_rate);
} }
@@ -895,12 +873,14 @@ TaggedCache<
typename KeyValueCacheType::map_type& partition, typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep, SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&) Mutex& partitionLock)
{ {
return std::thread([&, this]() { return std::thread([&, this]() {
int cacheRemovals = 0; int cacheRemovals = 0;
int mapRemovals = 0; int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep // Keep references to all the stuff we sweep
// so that we can destroy them outside the lock. // so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size()); stuffToSweep.reserve(partition.size());
@@ -984,12 +964,14 @@ TaggedCache<
typename KeyOnlyCacheType::map_type& partition, typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&, SweptPointersVector&,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&) Mutex& partitionLock)
{ {
return std::thread([&, this]() { return std::thread([&, this]() {
int cacheRemovals = 0; int cacheRemovals = 0;
int mapRemovals = 0; int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep // Keep references to all the stuff we sweep
// so that we can destroy them outside the lock. // so that we can destroy them outside the lock.
{ {
@@ -1024,6 +1006,29 @@ TaggedCache<
}); });
} }
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline Mutex&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::lockPartition(key_type const& key) const
{
return partitionLocks_[m_cache.partition_index(key)];
}
} // namespace ripple } // namespace ripple
#endif #endif

View File

@@ -277,6 +277,12 @@ public:
return map_; return map_;
} }
partition_map_type const&
map() const
{
return map_;
}
iterator iterator
begin() begin()
{ {
@@ -321,6 +327,12 @@ public:
return cend(); return cend();
} }
std::size_t
partition_index(key_type const& key) const
{
return partitioner(key);
}
private: private:
template <class T> template <class T>
void void
@@ -380,7 +392,7 @@ public:
clear() clear()
{ {
for (auto& p : map_) for (auto& p : map_)
p.clear(); p.clear(); // TODO make sure that it is locked inside
} }
iterator iterator
@@ -406,7 +418,7 @@ public:
{ {
std::size_t ret = 0; std::size_t ret = 0;
for (auto& p : map_) for (auto& p : map_)
ret += p.size(); ret += p.size(); // TODO make sure that it is locked inside
return ret; return ret;
} }

View File

@@ -22,7 +22,6 @@
#include <xrpl/basics/ByteUtilities.h> #include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint> #include <cstdint>

View File

@@ -558,23 +558,8 @@ public:
Env env(*this, envconfig(onlineDelete)); Env env(*this, envconfig(onlineDelete));
///////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////
// Create the backend. Normally, SHAMapStoreImp handles all these // Create NodeStore with two backends to allow online deletion of data.
// details // Normally, SHAMapStoreImp handles all these details.
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set(
"cache_size",
std::to_string(env.app().config().getValueFor(
SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set(
"cache_age",
std::to_string(env.app().config().getValueFor(
SizedItem::treeCacheAge, std::nullopt)));
NodeStoreScheduler scheduler(env.app().getJobQueue()); NodeStoreScheduler scheduler(env.app().getJobQueue());
std::string const writableDb = "write"; std::string const writableDb = "write";
@@ -582,9 +567,8 @@ public:
auto writableBackend = makeBackendRotating(env, scheduler, writableDb); auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb); auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
// Create NodeStore with two backends to allow online deletion of
// data
constexpr int readThreads = 4; constexpr int readThreads = 4;
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>( auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
scheduler, scheduler,
readThreads, readThreads,

View File

@@ -32,64 +32,66 @@ public:
void void
run() override run() override
{ {
using namespace std::chrono_literals; // using namespace std::chrono_literals;
TestStopwatch clock; // TestStopwatch clock;
clock.set(0); // clock.set(0);
using Key = std::string; // using Key = std::string;
using Cache = TaggedCache<Key, int, true>; // using Cache = TaggedCache<Key, int, true>;
test::SuiteJournal j("KeyCacheTest", *this); test::SuiteJournal j("KeyCacheTest", *this);
// Insert an item, retrieve it, and age it so it gets purged. BEAST_EXPECT(true);
{
Cache c("test", LedgerIndex(1), 2s, clock, j);
BEAST_EXPECT(c.size() == 0); // // Insert an item, retrieve it, and age it so it gets purged.
BEAST_EXPECT(c.insert("one")); // {
BEAST_EXPECT(!c.insert("one")); // // Cache c("test", LedgerIndex(1), 2s, clock, j);
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.touch_if_exists("one"));
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 1);
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(!c.touch_if_exists("one"));
}
// Insert two items, have one expire // // BEAST_EXPECT(c.size() == 0);
{ // // BEAST_EXPECT(c.insert("one"));
Cache c("test", LedgerIndex(2), 2s, clock, j); // // BEAST_EXPECT(!c.insert("one"));
// // BEAST_EXPECT(c.size() == 1);
// // BEAST_EXPECT(c.touch_if_exists("one"));
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 1);
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 0);
// // BEAST_EXPECT(!c.touch_if_exists("one"));
// }
BEAST_EXPECT(c.insert("one")); // // Insert two items, have one expire
BEAST_EXPECT(c.size() == 1); // {
BEAST_EXPECT(c.insert("two")); // // Cache c("test", LedgerIndex(2), 2s, clock, j);
BEAST_EXPECT(c.size() == 2);
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 2);
BEAST_EXPECT(c.touch_if_exists("two"));
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 1);
}
// Insert three items (1 over limit), sweep // // BEAST_EXPECT(c.insert("one"));
{ // // BEAST_EXPECT(c.size() == 1);
Cache c("test", LedgerIndex(2), 3s, clock, j); // // BEAST_EXPECT(c.insert("two"));
// // BEAST_EXPECT(c.size() == 2);
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 2);
// // BEAST_EXPECT(c.touch_if_exists("two"));
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 1);
// }
BEAST_EXPECT(c.insert("one")); // // Insert three items (1 over limit), sweep
++clock; // {
BEAST_EXPECT(c.insert("two")); // Cache c("test", LedgerIndex(2), 3s, clock, j);
++clock;
BEAST_EXPECT(c.insert("three")); // // BEAST_EXPECT(c.insert("one"));
++clock; // // ++clock;
BEAST_EXPECT(c.size() == 3); // // BEAST_EXPECT(c.insert("two"));
c.sweep(); // // ++clock;
BEAST_EXPECT(c.size() < 3); // // BEAST_EXPECT(c.insert("three"));
} // // ++clock;
// // BEAST_EXPECT(c.size() == 3);
// // c.sweep();
// // BEAST_EXPECT(c.size() < 3);
// }
} }
}; };

View File

@@ -27,8 +27,6 @@
namespace ripple { namespace ripple {
// FIXME: Need to clean up ledgers by index at some point
LedgerHistory::LedgerHistory( LedgerHistory::LedgerHistory(
beast::insight::Collector::ptr const& collector, beast::insight::Collector::ptr const& collector,
Application& app) Application& app)
@@ -47,6 +45,7 @@ LedgerHistory::LedgerHistory(
std::chrono::minutes{5}, std::chrono::minutes{5},
stopwatch(), stopwatch(),
app_.journal("TaggedCache")) app_.journal("TaggedCache"))
, mLedgersByIndex(256)
, j_(app.journal("LedgerHistory")) , j_(app.journal("LedgerHistory"))
{ {
} }
@@ -63,12 +62,18 @@ LedgerHistory::insert(
ledger->stateMap().getHash().isNonZero(), ledger->stateMap().getHash().isNonZero(),
"ripple::LedgerHistory::insert : nonzero hash"); "ripple::LedgerHistory::insert : nonzero hash");
std::unique_lock sl(m_ledgers_by_hash.peekMutex()); // TODOL merge the below into a single call to avoid lock and race
// conditions, i.e. - return alreadyHad on assignment somehow.
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache( bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
ledger->info().hash, ledger); ledger->info().hash, ledger);
if (validated) if (validated)
{
mLedgersByIndex[ledger->info().seq] = ledger->info().hash; mLedgersByIndex[ledger->info().seq] = ledger->info().hash;
JLOG(j_.info()) << "LedgerHistory::insert: mLedgersByIndex size: "
<< mLedgersByIndex.size() << " , total size: "
<< mLedgersByIndex.size() *
(sizeof(LedgerIndex) + sizeof(LedgerHash));
}
return alreadyHad; return alreadyHad;
} }
@@ -76,7 +81,7 @@ LedgerHistory::insert(
LedgerHash LedgerHash
LedgerHistory::getLedgerHash(LedgerIndex index) LedgerHistory::getLedgerHash(LedgerIndex index)
{ {
std::unique_lock sl(m_ledgers_by_hash.peekMutex()); // TODO: is it safe to get iterator without lock here?
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end()) if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
return it->second; return it->second;
return {}; return {};
@@ -86,13 +91,12 @@ std::shared_ptr<Ledger const>
LedgerHistory::getLedgerBySeq(LedgerIndex index) LedgerHistory::getLedgerBySeq(LedgerIndex index)
{ {
{ {
std::unique_lock sl(m_ledgers_by_hash.peekMutex()); // TODO: this lock is not needed
auto it = mLedgersByIndex.find(index); auto it = mLedgersByIndex.find(index);
if (it != mLedgersByIndex.end()) if (it != mLedgersByIndex.end())
{ {
uint256 hash = it->second; uint256 hash = it->second;
sl.unlock();
return getLedgerByHash(hash); return getLedgerByHash(hash);
} }
} }
@@ -108,13 +112,19 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
{ {
// Add this ledger to the local tracking by index // Add this ledger to the local tracking by index
std::unique_lock sl(m_ledgers_by_hash.peekMutex()); // std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
XRPL_ASSERT( XRPL_ASSERT(
ret->isImmutable(), ret->isImmutable(),
"ripple::LedgerHistory::getLedgerBySeq : immutable result ledger"); "ripple::LedgerHistory::getLedgerBySeq : immutable result ledger");
m_ledgers_by_hash.canonicalize_replace_client(ret->info().hash, ret); m_ledgers_by_hash.canonicalize_replace_client(ret->info().hash, ret);
mLedgersByIndex[ret->info().seq] = ret->info().hash; mLedgersByIndex[ret->info().seq] = ret->info().hash;
JLOG(j_.info())
<< "LedgerHistory::getLedgerBySeq: mLedgersByIndex size: "
<< mLedgersByIndex.size() << " , total size: "
<< mLedgersByIndex.size() *
(sizeof(LedgerIndex) + sizeof(LedgerHash));
return (ret->info().seq == index) ? ret : nullptr; return (ret->info().seq == index) ? ret : nullptr;
} }
} }
@@ -458,7 +468,8 @@ LedgerHistory::builtLedger(
XRPL_ASSERT( XRPL_ASSERT(
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash"); !hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex()); // std::unique_lock sl(m_consensus_validated.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
auto entry = std::make_shared<cv_entry>(); auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry); m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -500,7 +511,8 @@ LedgerHistory::validatedLedger(
!hash.isZero(), !hash.isZero(),
"ripple::LedgerHistory::validatedLedger : nonzero hash"); "ripple::LedgerHistory::validatedLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex()); // std::unique_lock sl(m_consensus_validated.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
auto entry = std::make_shared<cv_entry>(); auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry); m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -535,7 +547,9 @@ LedgerHistory::validatedLedger(
bool bool
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
{ {
std::unique_lock sl(m_ledgers_by_hash.peekMutex()); // std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODO: how to ensure that? "Ensure m_ledgers_by_hash doesn't have the
// wrong hash for a particular index"
auto it = mLedgersByIndex.find(ledgerIndex); auto it = mLedgersByIndex.find(ledgerIndex);
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash)) if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))

View File

@@ -22,6 +22,7 @@
#include <xrpld/app/ledger/Ledger.h> #include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/main/Application.h> #include <xrpld/app/main/Application.h>
#include <xrpld/core/detail/LRUMap.h>
#include <xrpl/beast/insight/Collector.h> #include <xrpl/beast/insight/Collector.h>
#include <xrpl/protocol/RippleLedgerHash.h> #include <xrpl/protocol/RippleLedgerHash.h>
@@ -149,7 +150,7 @@ private:
ConsensusValidated m_consensus_validated; ConsensusValidated m_consensus_validated;
// Maps ledger indexes to the corresponding hash. // Maps ledger indexes to the corresponding hash.
std::map<LedgerIndex, LedgerHash> mLedgersByIndex; // validated ledgers LRUMap<LedgerIndex, LedgerHash> mLedgersByIndex; // validated ledgers
beast::Journal j_; beast::Journal j_;
}; };

View File

@@ -404,7 +404,7 @@ public:
it->second->touch(); it->second->touch();
++it; ++it;
} }
else if ((la + std::chrono::minutes(1)) < start) else if ((la + std::chrono::seconds(10)) < start)
{ {
stuffToSweep.push_back(it->second); stuffToSweep.push_back(it->second);
// shouldn't cause the actual final delete // shouldn't cause the actual final delete

View File

@@ -162,20 +162,6 @@ std::unique_ptr<NodeStore::Database>
SHAMapStoreImp::makeNodeStore(int readThreads) SHAMapStoreImp::makeNodeStore(int readThreads)
{ {
auto nscfg = app_.config().section(ConfigSection::nodeDatabase()); auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set(
"cache_size",
std::to_string(app_.config().getValueFor(
SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set(
"cache_age",
std::to_string(app_.config().getValueFor(
SizedItem::treeCacheAge, std::nullopt)));
std::unique_ptr<NodeStore::Database> db; std::unique_ptr<NodeStore::Database> db;
if (deleteInterval_) if (deleteInterval_)
@@ -269,8 +255,6 @@ SHAMapStoreImp::run()
LedgerIndex lastRotated = state_db_.getState().lastRotated; LedgerIndex lastRotated = state_db_.getState().lastRotated;
netOPs_ = &app_.getOPs(); netOPs_ = &app_.getOPs();
ledgerMaster_ = &app_.getLedgerMaster(); ledgerMaster_ = &app_.getLedgerMaster();
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
if (advisoryDelete_) if (advisoryDelete_)
canDelete_ = state_db_.getCanDelete(); canDelete_ = state_db_.getCanDelete();
@@ -563,16 +547,12 @@ void
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq) SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
{ {
ledgerMaster_->clearLedgerCachePrior(validatedSeq); ledgerMaster_->clearLedgerCachePrior(validatedSeq);
fullBelowCache_->clear();
} }
void void
SHAMapStoreImp::freshenCaches() SHAMapStoreImp::freshenCaches()
{ {
if (freshenCache(*treeNodeCache_)) freshenCache(app_.getMasterTransaction().getCache());
return;
if (freshenCache(app_.getMasterTransaction().getCache()))
return;
} }
void void

View File

@@ -112,8 +112,6 @@ private:
// as of run() or before // as of run() or before
NetworkOPs* netOPs_ = nullptr; NetworkOPs* netOPs_ = nullptr;
LedgerMaster* ledgerMaster_ = nullptr; LedgerMaster* ledgerMaster_ = nullptr;
FullBelowCache* fullBelowCache_ = nullptr;
TreeNodeCache* treeNodeCache_ = nullptr;
static constexpr auto nodeStoreName_ = "NodeStore"; static constexpr auto nodeStoreName_ = "NodeStore";

View File

@@ -0,0 +1,153 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_LRU_MAP_H_INCLUDED
#define RIPPLE_APP_LRU_MAP_H_INCLUDED
#include <list>
#include <map>
#include <utility>
namespace ripple {
template <typename Key, typename Value>
class LRUMap
{
public:
explicit LRUMap(std::size_t capacity) : capacity_(capacity)
{
// TODO: check capacity_ > 0
}
Value&
operator[](Key const& key)
{
auto it = data_.find(key);
if (it != data_.end())
{
bump_to_front(key);
return it->second;
}
if (data_.size() >= capacity_)
{
std::size_t excess = (data_.size() + 1) - capacity_;
for (std::size_t i = 0; i < excess; ++i)
{
auto lru = usage_list_.back();
usage_list_.pop_back();
data_.erase(lru);
}
}
usage_list_.push_front(key);
return data_[key];
}
auto
find(Key const& key)
{
return data_.find(key);
}
auto
find(Key const& key) const
{
return data_.find(key);
}
auto
begin()
{
return data_.begin();
}
auto
begin() const
{
return data_.begin();
}
auto
end()
{
return data_.end();
}
auto
end() const
{
return data_.end();
}
bool
erase(Key const& key)
{
auto it = data_.find(key);
if (it == data_.end())
return false;
for (auto list_it = usage_list_.begin(); list_it != usage_list_.end();
++list_it)
{
if (*list_it == key)
{
usage_list_.erase(list_it);
break;
}
}
data_.erase(it);
return true;
}
std::size_t
size() const noexcept
{
return data_.size();
}
std::size_t
capacity() const noexcept
{
return capacity_;
}
void
clear()
{
data_.clear();
usage_list_.clear();
}
private:
void
bump_to_front(Key const& key)
{
for (auto it = usage_list_.begin(); it != usage_list_.end(); ++it)
{
if (*it == key)
{
usage_list_.erase(it);
usage_list_.push_front(key);
return;
}
}
}
std::size_t capacity_;
std::map<Key, Value> data_;
std::list<Key> usage_list_;
};
} // namespace ripple
#endif

View File

@@ -33,14 +33,6 @@ DatabaseNodeImp::store(
auto obj = NodeObject::createObject(type, std::move(data), hash); auto obj = NodeObject::createObject(type, std::move(data), hash);
backend_->store(obj); backend_->store(obj);
if (cache_)
{
// After the store, replace a negative cache entry if there is one
cache_->canonicalize(
hash, obj, [](std::shared_ptr<NodeObject> const& n) {
return n->getType() == hotDUMMY;
});
}
} }
void void
@@ -49,23 +41,12 @@ DatabaseNodeImp::asyncFetch(
std::uint32_t ledgerSeq, std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback) std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
{ {
if (cache_)
{
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
if (obj)
{
callback(obj->getType() == hotDUMMY ? nullptr : obj);
return;
}
}
Database::asyncFetch(hash, ledgerSeq, std::move(callback)); Database::asyncFetch(hash, ledgerSeq, std::move(callback));
} }
void void
DatabaseNodeImp::sweep() DatabaseNodeImp::sweep()
{ {
if (cache_)
cache_->sweep();
} }
std::shared_ptr<NodeObject> std::shared_ptr<NodeObject>
@@ -75,64 +56,33 @@ DatabaseNodeImp::fetchNodeObject(
FetchReport& fetchReport, FetchReport& fetchReport,
bool duplicate) bool duplicate)
{ {
std::shared_ptr<NodeObject> nodeObject = std::shared_ptr<NodeObject> nodeObject = nullptr;
cache_ ? cache_->fetch(hash) : nullptr; Status status;
if (!nodeObject) try
{ {
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not " status = backend_->fetch(hash.data(), &nodeObject);
<< (cache_ ? "cached" : "found");
Status status;
try
{
status = backend_->fetch(hash.data(), &nodeObject);
}
catch (std::exception const& e)
{
JLOG(j_.fatal())
<< "fetchNodeObject " << hash
<< ": Exception fetching from backend: " << e.what();
Rethrow();
}
switch (status)
{
case ok:
if (cache_)
{
if (nodeObject)
cache_->canonicalize_replace_client(hash, nodeObject);
else
{
auto notFound =
NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nodeObject = notFound;
}
}
break;
case notFound:
break;
case dataCorrupt:
JLOG(j_.fatal()) << "fetchNodeObject " << hash
<< ": nodestore data is corrupted";
break;
default:
JLOG(j_.warn())
<< "fetchNodeObject " << hash
<< ": backend returns unknown result " << status;
break;
}
} }
else catch (std::exception const& e)
{ {
JLOG(j_.trace()) << "fetchNodeObject " << hash JLOG(j_.fatal()) << "fetchNodeObject " << hash
<< ": record found in cache"; << ": Exception fetching from backend: " << e.what();
if (nodeObject->getType() == hotDUMMY) Rethrow();
nodeObject.reset(); }
switch (status)
{
case ok:
case notFound:
break;
case dataCorrupt:
JLOG(j_.fatal()) << "fetchNodeObject " << hash
<< ": nodestore data is corrupted";
break;
default:
JLOG(j_.warn()) << "fetchNodeObject " << hash
<< ": backend returns unknown result " << status;
break;
} }
if (nodeObject) if (nodeObject)
@@ -144,71 +94,33 @@ DatabaseNodeImp::fetchNodeObject(
std::vector<std::shared_ptr<NodeObject>> std::vector<std::shared_ptr<NodeObject>>
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes) DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
{ {
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
using namespace std::chrono; using namespace std::chrono;
auto const before = steady_clock::now(); auto const before = steady_clock::now();
std::unordered_map<uint256 const*, size_t> indexMap;
std::vector<uint256 const*> cacheMisses; std::vector<uint256 const*> batch{hashes.size()};
uint64_t hits = 0;
uint64_t fetches = 0;
for (size_t i = 0; i < hashes.size(); ++i) for (size_t i = 0; i < hashes.size(); ++i)
{ {
auto const& hash = hashes[i]; auto const& hash = hashes[i];
// See if the object already exists in the cache batch.push_back(&hash);
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
++fetches;
if (!nObj)
{
// Try the database
indexMap[&hash] = i;
cacheMisses.push_back(&hash);
}
else
{
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
// It was in the cache.
++hits;
}
} }
JLOG(j_.debug()) << "fetchBatch - cache hits = " std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
<< (hashes.size() - cacheMisses.size()) results = backend_->fetchBatch(batch).first;
<< " - cache misses = " << cacheMisses.size(); for (size_t i = 0; i < results.size(); ++i)
auto dbResults = backend_->fetchBatch(cacheMisses).first;
for (size_t i = 0; i < dbResults.size(); ++i)
{ {
auto nObj = std::move(dbResults[i]); if (!results[i])
size_t index = indexMap[cacheMisses[i]];
auto const& hash = hashes[index];
if (nObj)
{
// Ensure all threads get the same object
if (cache_)
cache_->canonicalize_replace_client(hash, nObj);
}
else
{ {
JLOG(j_.error()) JLOG(j_.error())
<< "fetchBatch - " << "fetchBatch - "
<< "record not found in db or cache. hash = " << strHex(hash); << "record not found in db. hash = " << strHex(hashes[i]);
if (cache_)
{
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nObj = std::move(notFound);
}
} }
results[index] = std::move(nObj);
} }
auto fetchDurationUs = auto fetchDurationUs =
std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::duration_cast<std::chrono::microseconds>(
steady_clock::now() - before) steady_clock::now() - before)
.count(); .count();
updateFetchMetrics(fetches, hits, fetchDurationUs); updateFetchMetrics(hashes.size(), 0, fetchDurationUs);
return results; return results;
} }

View File

@@ -45,38 +45,6 @@ public:
: Database(scheduler, readThreads, config, j) : Database(scheduler, readThreads, config, j)
, backend_(std::move(backend)) , backend_(std::move(backend))
{ {
std::optional<int> cacheSize, cacheAge;
if (config.exists("cache_size"))
{
cacheSize = get<int>(config, "cache_size");
if (cacheSize.value() < 0)
{
Throw<std::runtime_error>(
"Specified negative value for cache_size");
}
}
if (config.exists("cache_age"))
{
cacheAge = get<int>(config, "cache_age");
if (cacheAge.value() < 0)
{
Throw<std::runtime_error>(
"Specified negative value for cache_age");
}
}
if (cacheSize != 0 || cacheAge != 0)
{
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
"DatabaseNodeImp",
cacheSize.value_or(0),
std::chrono::minutes(cacheAge.value_or(0)),
stopwatch(),
j);
}
XRPL_ASSERT( XRPL_ASSERT(
backend_, backend_,
"ripple::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null " "ripple::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
@@ -137,9 +105,6 @@ public:
sweep() override; sweep() override;
private: private:
// Cache for database objects. This cache is not always initialized. Check
// for null before using.
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
// Persistent key/value storage // Persistent key/value storage
std::shared_ptr<Backend> backend_; std::shared_ptr<Backend> backend_;

View File

@@ -35,8 +35,8 @@ namespace {
// element must be the number of children in a dense array. // element must be the number of children in a dense array.
constexpr std::array<std::uint8_t, 4> boundaries{ constexpr std::array<std::uint8_t, 4> boundaries{
2, 2,
4, 3,
6, 5,
SHAMapInnerNode::branchFactor}; SHAMapInnerNode::branchFactor};
static_assert( static_assert(
boundaries.size() <= 4, boundaries.size() <= 4,