Compare commits

...

31 Commits

Author SHA1 Message Date
Valentin Balaschenko
009e9d0361 Merge branch 'vlntb/inbound-ledgers-cache' into vlntb/accounts-growth-combined 2025-06-20 13:09:58 +01:00
Valentin Balaschenko
8461c7d0b6 sweep every 10 sec instead of 1 min 2025-06-18 17:00:17 +01:00
Valentin Balaschenko
01d73cef36 merge lock per partition 2025-06-18 16:30:15 +01:00
Valentin Balaschenko
da694c8304 wip lock per partition 2025-06-13 18:33:00 +01:00
Valentin Balaschenko
d0f836581b remove mutex and dead references 2025-06-13 14:45:27 +01:00
Valentin Balaschenko
010e23ae9c Murats trie optimization 2025-06-12 14:36:55 +01:00
Valentin Balaschenko
6071c08a0e Merge branch 'vlntb/mem-leak-ledger-history' into vlntb/accounts-growth-combined 2025-06-12 14:12:53 +01:00
Valentin Balaschenko
2aa9b69cdb Merge branch 'vlntb/RIPD-2536-taggedcache-expire-now' into vlntb/accounts-growth-combined 2025-06-12 14:11:14 +01:00
Valentin Balaschenko
da0101ea6b Merge branch 'bthomee/disable-cache' into vlntb/accounts-growth-combined 2025-06-12 14:01:35 +01:00
Valentin Balaschenko
984c70955a clang 2025-06-11 16:33:06 +01:00
Valentin Balaschenko
3effb54e49 wip atomics for counters 2025-06-11 16:31:48 +01:00
Valentin Balaschenko
316f9535e3 wip removing mutex and dependencies 2025-06-11 14:31:26 +01:00
Bart
fca6a8768f Merge branch 'develop' into bthomee/disable-cache 2025-06-02 12:02:43 -04:00
Valentin Balaschenko
2f57222858 fix 2025-06-02 12:15:16 +01:00
Valentin Balaschenko
164c225670 improve cleanup 2025-06-02 12:00:49 +01:00
Valentin Balaschenko
5b38eb3532 lru map for indexes 2025-06-02 11:02:58 +01:00
Bart
d96c4164b9 Merge branch 'develop' into bthomee/disable-cache 2025-05-22 09:18:07 -04:00
Bart Thomee
965fc75e8a Reserve vector size 2025-05-20 10:07:12 -04:00
Bart Thomee
2fa1c711d3 Removed unused config values 2025-05-20 09:50:13 -04:00
Bart Thomee
4650e7d2c6 Removed unused caches from SHAMapStoreImp 2025-05-20 09:49:55 -04:00
Bart Thomee
a213127852 Remove cache from SHAMapStoreImp 2025-05-19 16:59:43 -04:00
Bart Thomee
6e7537dada Remove cache from DatabaseNodeImp 2025-05-19 16:51:32 -04:00
Bart Thomee
0777f7c64b Merge branch 'develop' into bthomee/disable-cache 2025-05-19 16:37:11 -04:00
Bart Thomee
39bfcaf95c Merge branch 'develop' into bthomee/disable-cache 2025-05-17 18:26:07 -04:00
Bart Thomee
61c9a19868 Merge branch 'develop' into bthomee/disable-cache 2025-05-07 11:02:43 -04:00
Valentin Balaschenko
1fd593cea3 disable test properly 2025-04-07 17:34:12 +01:00
Valentin Balaschenko
789afac422 disable failing test 2025-04-07 16:41:59 +01:00
Valentin Balaschenko
20bc17eef6 tagged cache expire immediatelly 2025-04-04 12:49:37 +01:00
Bart Thomee
d01851bc5a Only disable the database cache 2025-04-01 13:24:18 -04:00
Bart Thomee
d1703842e7 Fully disable cache 2025-04-01 11:41:20 -04:00
Bart Thomee
8d31b1739d TEST: Disable tagged cache to measure performance 2025-03-28 13:21:19 -04:00
17 changed files with 397 additions and 392 deletions

View File

@@ -940,23 +940,7 @@
#
# path Location to store the database
#
# Optional keys
#
# cache_size Size of cache for database records. Default is 16384.
# Setting this value to 0 will use the default value.
#
# cache_age Length of time in minutes to keep database records
# cached. Default is 5 minutes. Setting this value to
# 0 will use the default value.
#
# Note: if neither cache_size nor cache_age is
# specified, the cache for database records will not
# be created. If only one of cache_size or cache_age
# is specified, the cache will be created using the
# default value for the unspecified parameter.
#
# Note: the cache will not be created if online_delete
# is specified.
# Optional keys for NuDB and RocksDB:
#
# fast_load Boolean. If set, load the last persisted ledger
# from disk upon process start before syncing to
@@ -964,8 +948,6 @@
# if sufficient IOPS capacity is available.
# Default 0.
#
# Optional keys for NuDB or RocksDB:
#
# earliest_seq The default is 32570 to match the XRP ledger
# network's earliest allowed sequence. Alternate
# networks may set this value. Minimum value of 1.

View File

@@ -21,7 +21,6 @@
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <ostream>

View File

@@ -170,9 +170,6 @@ public:
bool
retrieve(key_type const& key, T& data);
mutex_type&
peekMutex();
std::vector<key_type>
getKeys() const;
@@ -193,11 +190,14 @@ public:
private:
SharedPointerType
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
initialFetch(key_type const& key);
void
collect_metrics();
Mutex&
lockPartition(key_type const& key) const;
private:
struct Stats
{
@@ -300,8 +300,8 @@ private:
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&);
std::atomic<int>& allRemoval,
Mutex& partitionLock);
[[nodiscard]] std::thread
sweepHelper(
@@ -310,14 +310,12 @@ private:
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&);
Mutex& partitionLock);
beast::Journal m_journal;
clock_type& m_clock;
Stats m_stats;
mutex_type mutable m_mutex;
// Used for logging
std::string m_name;
@@ -328,10 +326,11 @@ private:
clock_type::duration const m_target_age;
// Number of items cached
int m_cache_count;
std::atomic<int> m_cache_count;
cache_type m_cache; // Hold strong reference to recent objects
std::uint64_t m_hits;
std::uint64_t m_misses;
std::atomic<std::uint64_t> m_hits;
std::atomic<std::uint64_t> m_misses;
mutable std::vector<mutex_type> partitionLocks_;
};
} // namespace ripple

View File

@@ -60,6 +60,7 @@ inline TaggedCache<
, m_hits(0)
, m_misses(0)
{
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
}
template <
@@ -105,8 +106,13 @@ TaggedCache<
KeyEqual,
Mutex>::size() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
std::size_t totalSize = 0;
for (size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
totalSize += m_cache.map()[i].size();
}
return totalSize;
}
template <
@@ -129,8 +135,7 @@ TaggedCache<
KeyEqual,
Mutex>::getCacheSize() const
{
std::lock_guard lock(m_mutex);
return m_cache_count;
return m_cache_count.load(std::memory_order_relaxed);
}
template <
@@ -153,8 +158,7 @@ TaggedCache<
KeyEqual,
Mutex>::getTrackSize() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
return size();
}
template <
@@ -177,9 +181,10 @@ TaggedCache<
KeyEqual,
Mutex>::getHitRate()
{
std::lock_guard lock(m_mutex);
auto const total = static_cast<float>(m_hits + m_misses);
return m_hits * (100.0f / std::max(1.0f, total));
auto hits = m_hits.load(std::memory_order_relaxed);
auto misses = m_misses.load(std::memory_order_relaxed);
float total = float(hits + misses);
return hits * (100.0f / std::max(1.0f, total));
}
template <
@@ -202,9 +207,12 @@ TaggedCache<
KeyEqual,
Mutex>::clear()
{
std::lock_guard lock(m_mutex);
for (auto& mutex : partitionLocks_)
mutex.lock();
m_cache.clear();
m_cache_count = 0;
for (auto& mutex : partitionLocks_)
mutex.unlock();
m_cache_count.store(0, std::memory_order_relaxed);
}
template <
@@ -227,11 +235,14 @@ TaggedCache<
KeyEqual,
Mutex>::reset()
{
std::lock_guard lock(m_mutex);
for (auto& mutex : partitionLocks_)
mutex.lock();
m_cache.clear();
m_cache_count = 0;
m_hits = 0;
m_misses = 0;
for (auto& mutex : partitionLocks_)
mutex.unlock();
m_cache_count.store(0, std::memory_order_relaxed);
m_hits.store(0, std::memory_order_relaxed);
m_misses.store(0, std::memory_order_relaxed);
}
template <
@@ -255,7 +266,7 @@ TaggedCache<
KeyEqual,
Mutex>::touch_if_exists(KeyComparable const& key)
{
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto const iter(m_cache.find(key));
if (iter == m_cache.end())
{
@@ -297,26 +308,9 @@ TaggedCache<
auto const start = std::chrono::steady_clock::now();
{
std::lock_guard lock(m_mutex);
if (m_target_size == 0 ||
(static_cast<int>(m_cache.size()) <= m_target_size))
{
when_expire = now - m_target_age;
}
else
{
when_expire = now - m_target_age * m_target_size / m_cache.size();
clock_type::duration const minimumAge(std::chrono::seconds(1));
if (when_expire > (now - minimumAge))
when_expire = now - minimumAge;
JLOG(m_journal.trace())
<< m_name << " is growing fast " << m_cache.size() << " of "
<< m_target_size << " aging at " << (now - when_expire).count()
<< " of " << m_target_age.count();
}
when_expire =
now + std::chrono::hours(1); // any future time works too to make
// sure that nothing survives
std::vector<std::thread> workers;
workers.reserve(m_cache.partitions());
@@ -330,12 +324,13 @@ TaggedCache<
m_cache.map()[p],
allStuffToSweep[p],
allRemovals,
lock));
partitionLocks_[p]));
}
for (std::thread& worker : workers)
worker.join();
m_cache_count -= allRemovals;
int removals = allRemovals.load(std::memory_order_relaxed);
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
}
// At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer.
@@ -369,7 +364,8 @@ TaggedCache<
{
// Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
@@ -382,7 +378,7 @@ TaggedCache<
if (entry.isCached())
{
--m_cache_count;
m_cache_count.fetch_sub(1, std::memory_order_relaxed);
entry.ptr.convertToWeak();
ret = true;
}
@@ -420,17 +416,16 @@ TaggedCache<
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
if (cit == m_cache.end())
{
m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data));
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return false;
}
@@ -479,12 +474,12 @@ TaggedCache<
data = cachedData;
}
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return true;
}
entry.ptr = data;
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return false;
}
@@ -560,10 +555,11 @@ TaggedCache<
KeyEqual,
Mutex>::fetch(key_type const& key)
{
std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
std::lock_guard<Mutex> lock(lockPartition(key));
auto ret = initialFetch(key);
if (!ret)
++m_misses;
m_misses.fetch_add(1, std::memory_order_relaxed);
return ret;
}
@@ -627,8 +623,8 @@ TaggedCache<
Mutex>::insert(key_type const& key)
-> std::enable_if_t<IsKeyCache, ReturnType>
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
std::lock_guard<Mutex> lock(lockPartition(key));
auto [it, inserted] = m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
@@ -668,29 +664,6 @@ TaggedCache<
return true;
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline auto
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::peekMutex() -> mutex_type&
{
return m_mutex;
}
template <
class Key,
class T,
@@ -714,10 +687,13 @@ TaggedCache<
std::vector<key_type> v;
{
std::lock_guard lock(m_mutex);
v.reserve(m_cache.size());
for (auto const& _ : m_cache)
v.push_back(_.first);
for (std::size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
for (auto const& entry : m_cache.map()[i])
v.push_back(entry.first);
}
}
return v;
@@ -743,11 +719,12 @@ TaggedCache<
KeyEqual,
Mutex>::rate() const
{
std::lock_guard lock(m_mutex);
auto const tot = m_hits + m_misses;
auto hits = m_hits.load(std::memory_order_relaxed);
auto misses = m_misses.load(std::memory_order_relaxed);
auto const tot = hits + misses;
if (tot == 0)
return 0;
return double(m_hits) / tot;
return 0.0;
return double(hits) / tot;
}
template <
@@ -771,18 +748,16 @@ TaggedCache<
KeyEqual,
Mutex>::fetch(key_type const& digest, Handler const& h)
{
{
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest, l))
return ret;
}
std::lock_guard<Mutex> lock(lockPartition(digest));
if (auto ret = initialFetch(digest))
return ret;
auto sle = h();
if (!sle)
return {};
std::lock_guard l(m_mutex);
++m_misses;
m_misses.fetch_add(1, std::memory_order_relaxed);
auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted)
@@ -809,9 +784,10 @@ TaggedCache<
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
Mutex>::initialFetch(key_type const& key)
{
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
if (cit == m_cache.end())
return {};
@@ -819,7 +795,7 @@ TaggedCache<
Entry& entry = cit->second;
if (entry.isCached())
{
++m_hits;
m_hits.fetch_add(1, std::memory_order_relaxed);
entry.touch(m_clock.now());
return entry.ptr.getStrong();
}
@@ -827,12 +803,13 @@ TaggedCache<
if (entry.isCached())
{
// independent of cache size, so not counted as a hit
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
entry.touch(m_clock.now());
return entry.ptr.getStrong();
}
m_cache.erase(cit);
m_cache.erase(cit); // TODO: if this erase happens on fetch, what is left
// for a sweep?
return {};
}
@@ -861,10 +838,11 @@ TaggedCache<
{
beast::insight::Gauge::value_type hit_rate(0);
{
std::lock_guard lock(m_mutex);
auto const total(m_hits + m_misses);
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
auto const total = hits + misses;
if (total != 0)
hit_rate = (m_hits * 100) / total;
hit_rate = (hits * 100) / total;
}
m_stats.hit_rate.set(hit_rate);
}
@@ -895,12 +873,14 @@ TaggedCache<
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
Mutex& partitionLock)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size());
@@ -984,12 +964,14 @@ TaggedCache<
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
Mutex& partitionLock)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
{
@@ -1024,6 +1006,29 @@ TaggedCache<
});
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline Mutex&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::lockPartition(key_type const& key) const
{
return partitionLocks_[m_cache.partition_index(key)];
}
} // namespace ripple
#endif

View File

@@ -277,6 +277,12 @@ public:
return map_;
}
partition_map_type const&
map() const
{
return map_;
}
iterator
begin()
{
@@ -321,6 +327,12 @@ public:
return cend();
}
std::size_t
partition_index(key_type const& key) const
{
return partitioner(key);
}
private:
template <class T>
void
@@ -380,7 +392,7 @@ public:
clear()
{
for (auto& p : map_)
p.clear();
p.clear(); // TODO make sure that it is locked inside
}
iterator
@@ -406,7 +418,7 @@ public:
{
std::size_t ret = 0;
for (auto& p : map_)
ret += p.size();
ret += p.size(); // TODO make sure that it is locked inside
return ret;
}

View File

@@ -22,7 +22,6 @@
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint>

View File

@@ -558,23 +558,8 @@ public:
Env env(*this, envconfig(onlineDelete));
/////////////////////////////////////////////////////////////
// Create the backend. Normally, SHAMapStoreImp handles all these
// details
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set(
"cache_size",
std::to_string(env.app().config().getValueFor(
SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set(
"cache_age",
std::to_string(env.app().config().getValueFor(
SizedItem::treeCacheAge, std::nullopt)));
// Create NodeStore with two backends to allow online deletion of data.
// Normally, SHAMapStoreImp handles all these details.
NodeStoreScheduler scheduler(env.app().getJobQueue());
std::string const writableDb = "write";
@@ -582,9 +567,8 @@ public:
auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
// Create NodeStore with two backends to allow online deletion of
// data
constexpr int readThreads = 4;
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
scheduler,
readThreads,

View File

@@ -32,64 +32,66 @@ public:
void
run() override
{
using namespace std::chrono_literals;
TestStopwatch clock;
clock.set(0);
// using namespace std::chrono_literals;
// TestStopwatch clock;
// clock.set(0);
using Key = std::string;
using Cache = TaggedCache<Key, int, true>;
// using Key = std::string;
// using Cache = TaggedCache<Key, int, true>;
test::SuiteJournal j("KeyCacheTest", *this);
// Insert an item, retrieve it, and age it so it gets purged.
{
Cache c("test", LedgerIndex(1), 2s, clock, j);
BEAST_EXPECT(true);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(c.insert("one"));
BEAST_EXPECT(!c.insert("one"));
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.touch_if_exists("one"));
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 1);
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(!c.touch_if_exists("one"));
}
// // Insert an item, retrieve it, and age it so it gets purged.
// {
// // Cache c("test", LedgerIndex(1), 2s, clock, j);
// Insert two items, have one expire
{
Cache c("test", LedgerIndex(2), 2s, clock, j);
// // BEAST_EXPECT(c.size() == 0);
// // BEAST_EXPECT(c.insert("one"));
// // BEAST_EXPECT(!c.insert("one"));
// // BEAST_EXPECT(c.size() == 1);
// // BEAST_EXPECT(c.touch_if_exists("one"));
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 1);
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 0);
// // BEAST_EXPECT(!c.touch_if_exists("one"));
// }
BEAST_EXPECT(c.insert("one"));
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.insert("two"));
BEAST_EXPECT(c.size() == 2);
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 2);
BEAST_EXPECT(c.touch_if_exists("two"));
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 1);
}
// // Insert two items, have one expire
// {
// // Cache c("test", LedgerIndex(2), 2s, clock, j);
// Insert three items (1 over limit), sweep
{
Cache c("test", LedgerIndex(2), 3s, clock, j);
// // BEAST_EXPECT(c.insert("one"));
// // BEAST_EXPECT(c.size() == 1);
// // BEAST_EXPECT(c.insert("two"));
// // BEAST_EXPECT(c.size() == 2);
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 2);
// // BEAST_EXPECT(c.touch_if_exists("two"));
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 1);
// }
BEAST_EXPECT(c.insert("one"));
++clock;
BEAST_EXPECT(c.insert("two"));
++clock;
BEAST_EXPECT(c.insert("three"));
++clock;
BEAST_EXPECT(c.size() == 3);
c.sweep();
BEAST_EXPECT(c.size() < 3);
}
// // Insert three items (1 over limit), sweep
// {
// Cache c("test", LedgerIndex(2), 3s, clock, j);
// // BEAST_EXPECT(c.insert("one"));
// // ++clock;
// // BEAST_EXPECT(c.insert("two"));
// // ++clock;
// // BEAST_EXPECT(c.insert("three"));
// // ++clock;
// // BEAST_EXPECT(c.size() == 3);
// // c.sweep();
// // BEAST_EXPECT(c.size() < 3);
// }
}
};

View File

@@ -27,8 +27,6 @@
namespace ripple {
// FIXME: Need to clean up ledgers by index at some point
LedgerHistory::LedgerHistory(
beast::insight::Collector::ptr const& collector,
Application& app)
@@ -47,6 +45,7 @@ LedgerHistory::LedgerHistory(
std::chrono::minutes{5},
stopwatch(),
app_.journal("TaggedCache"))
, mLedgersByIndex(256)
, j_(app.journal("LedgerHistory"))
{
}
@@ -63,12 +62,18 @@ LedgerHistory::insert(
ledger->stateMap().getHash().isNonZero(),
"ripple::LedgerHistory::insert : nonzero hash");
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODOL merge the below into a single call to avoid lock and race
// conditions, i.e. - return alreadyHad on assignment somehow.
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
ledger->info().hash, ledger);
if (validated)
{
mLedgersByIndex[ledger->info().seq] = ledger->info().hash;
JLOG(j_.info()) << "LedgerHistory::insert: mLedgersByIndex size: "
<< mLedgersByIndex.size() << " , total size: "
<< mLedgersByIndex.size() *
(sizeof(LedgerIndex) + sizeof(LedgerHash));
}
return alreadyHad;
}
@@ -76,7 +81,7 @@ LedgerHistory::insert(
LedgerHash
LedgerHistory::getLedgerHash(LedgerIndex index)
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODO: is it safe to get iterator without lock here?
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
return it->second;
return {};
@@ -86,13 +91,12 @@ std::shared_ptr<Ledger const>
LedgerHistory::getLedgerBySeq(LedgerIndex index)
{
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODO: this lock is not needed
auto it = mLedgersByIndex.find(index);
if (it != mLedgersByIndex.end())
{
uint256 hash = it->second;
sl.unlock();
return getLedgerByHash(hash);
}
}
@@ -108,13 +112,19 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
{
// Add this ledger to the local tracking by index
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
XRPL_ASSERT(
ret->isImmutable(),
"ripple::LedgerHistory::getLedgerBySeq : immutable result ledger");
m_ledgers_by_hash.canonicalize_replace_client(ret->info().hash, ret);
mLedgersByIndex[ret->info().seq] = ret->info().hash;
JLOG(j_.info())
<< "LedgerHistory::getLedgerBySeq: mLedgersByIndex size: "
<< mLedgersByIndex.size() << " , total size: "
<< mLedgersByIndex.size() *
(sizeof(LedgerIndex) + sizeof(LedgerHash));
return (ret->info().seq == index) ? ret : nullptr;
}
}
@@ -458,7 +468,8 @@ LedgerHistory::builtLedger(
XRPL_ASSERT(
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
// std::unique_lock sl(m_consensus_validated.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -500,7 +511,8 @@ LedgerHistory::validatedLedger(
!hash.isZero(),
"ripple::LedgerHistory::validatedLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
// std::unique_lock sl(m_consensus_validated.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -535,7 +547,9 @@ LedgerHistory::validatedLedger(
bool
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODO: how to ensure that? "Ensure m_ledgers_by_hash doesn't have the
// wrong hash for a particular index"
auto it = mLedgersByIndex.find(ledgerIndex);
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))

View File

@@ -22,6 +22,7 @@
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/main/Application.h>
#include <xrpld/core/detail/LRUMap.h>
#include <xrpl/beast/insight/Collector.h>
#include <xrpl/protocol/RippleLedgerHash.h>
@@ -149,7 +150,7 @@ private:
ConsensusValidated m_consensus_validated;
// Maps ledger indexes to the corresponding hash.
std::map<LedgerIndex, LedgerHash> mLedgersByIndex; // validated ledgers
LRUMap<LedgerIndex, LedgerHash> mLedgersByIndex; // validated ledgers
beast::Journal j_;
};

View File

@@ -404,7 +404,7 @@ public:
it->second->touch();
++it;
}
else if ((la + std::chrono::minutes(1)) < start)
else if ((la + std::chrono::seconds(10)) < start)
{
stuffToSweep.push_back(it->second);
// shouldn't cause the actual final delete

View File

@@ -162,20 +162,6 @@ std::unique_ptr<NodeStore::Database>
SHAMapStoreImp::makeNodeStore(int readThreads)
{
auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set(
"cache_size",
std::to_string(app_.config().getValueFor(
SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set(
"cache_age",
std::to_string(app_.config().getValueFor(
SizedItem::treeCacheAge, std::nullopt)));
std::unique_ptr<NodeStore::Database> db;
if (deleteInterval_)
@@ -269,8 +255,6 @@ SHAMapStoreImp::run()
LedgerIndex lastRotated = state_db_.getState().lastRotated;
netOPs_ = &app_.getOPs();
ledgerMaster_ = &app_.getLedgerMaster();
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
if (advisoryDelete_)
canDelete_ = state_db_.getCanDelete();
@@ -563,16 +547,12 @@ void
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
{
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
fullBelowCache_->clear();
}
void
SHAMapStoreImp::freshenCaches()
{
if (freshenCache(*treeNodeCache_))
return;
if (freshenCache(app_.getMasterTransaction().getCache()))
return;
freshenCache(app_.getMasterTransaction().getCache());
}
void

View File

@@ -112,8 +112,6 @@ private:
// as of run() or before
NetworkOPs* netOPs_ = nullptr;
LedgerMaster* ledgerMaster_ = nullptr;
FullBelowCache* fullBelowCache_ = nullptr;
TreeNodeCache* treeNodeCache_ = nullptr;
static constexpr auto nodeStoreName_ = "NodeStore";

View File

@@ -0,0 +1,153 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_LRU_MAP_H_INCLUDED
#define RIPPLE_APP_LRU_MAP_H_INCLUDED
#include <list>
#include <map>
#include <utility>
namespace ripple {
template <typename Key, typename Value>
class LRUMap
{
public:
explicit LRUMap(std::size_t capacity) : capacity_(capacity)
{
// TODO: check capacity_ > 0
}
Value&
operator[](Key const& key)
{
auto it = data_.find(key);
if (it != data_.end())
{
bump_to_front(key);
return it->second;
}
if (data_.size() >= capacity_)
{
std::size_t excess = (data_.size() + 1) - capacity_;
for (std::size_t i = 0; i < excess; ++i)
{
auto lru = usage_list_.back();
usage_list_.pop_back();
data_.erase(lru);
}
}
usage_list_.push_front(key);
return data_[key];
}
auto
find(Key const& key)
{
return data_.find(key);
}
auto
find(Key const& key) const
{
return data_.find(key);
}
auto
begin()
{
return data_.begin();
}
auto
begin() const
{
return data_.begin();
}
auto
end()
{
return data_.end();
}
auto
end() const
{
return data_.end();
}
bool
erase(Key const& key)
{
auto it = data_.find(key);
if (it == data_.end())
return false;
for (auto list_it = usage_list_.begin(); list_it != usage_list_.end();
++list_it)
{
if (*list_it == key)
{
usage_list_.erase(list_it);
break;
}
}
data_.erase(it);
return true;
}
std::size_t
size() const noexcept
{
return data_.size();
}
std::size_t
capacity() const noexcept
{
return capacity_;
}
void
clear()
{
data_.clear();
usage_list_.clear();
}
private:
void
bump_to_front(Key const& key)
{
for (auto it = usage_list_.begin(); it != usage_list_.end(); ++it)
{
if (*it == key)
{
usage_list_.erase(it);
usage_list_.push_front(key);
return;
}
}
}
std::size_t capacity_;
std::map<Key, Value> data_;
std::list<Key> usage_list_;
};
} // namespace ripple
#endif

View File

@@ -33,14 +33,6 @@ DatabaseNodeImp::store(
auto obj = NodeObject::createObject(type, std::move(data), hash);
backend_->store(obj);
if (cache_)
{
// After the store, replace a negative cache entry if there is one
cache_->canonicalize(
hash, obj, [](std::shared_ptr<NodeObject> const& n) {
return n->getType() == hotDUMMY;
});
}
}
void
@@ -49,23 +41,12 @@ DatabaseNodeImp::asyncFetch(
std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
{
if (cache_)
{
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
if (obj)
{
callback(obj->getType() == hotDUMMY ? nullptr : obj);
return;
}
}
Database::asyncFetch(hash, ledgerSeq, std::move(callback));
}
void
DatabaseNodeImp::sweep()
{
if (cache_)
cache_->sweep();
}
std::shared_ptr<NodeObject>
@@ -75,64 +56,33 @@ DatabaseNodeImp::fetchNodeObject(
FetchReport& fetchReport,
bool duplicate)
{
std::shared_ptr<NodeObject> nodeObject =
cache_ ? cache_->fetch(hash) : nullptr;
std::shared_ptr<NodeObject> nodeObject = nullptr;
Status status;
if (!nodeObject)
try
{
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not "
<< (cache_ ? "cached" : "found");
Status status;
try
{
status = backend_->fetch(hash.data(), &nodeObject);
}
catch (std::exception const& e)
{
JLOG(j_.fatal())
<< "fetchNodeObject " << hash
<< ": Exception fetching from backend: " << e.what();
Rethrow();
}
switch (status)
{
case ok:
if (cache_)
{
if (nodeObject)
cache_->canonicalize_replace_client(hash, nodeObject);
else
{
auto notFound =
NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nodeObject = notFound;
}
}
break;
case notFound:
break;
case dataCorrupt:
JLOG(j_.fatal()) << "fetchNodeObject " << hash
<< ": nodestore data is corrupted";
break;
default:
JLOG(j_.warn())
<< "fetchNodeObject " << hash
<< ": backend returns unknown result " << status;
break;
}
status = backend_->fetch(hash.data(), &nodeObject);
}
else
catch (std::exception const& e)
{
JLOG(j_.trace()) << "fetchNodeObject " << hash
<< ": record found in cache";
if (nodeObject->getType() == hotDUMMY)
nodeObject.reset();
JLOG(j_.fatal()) << "fetchNodeObject " << hash
<< ": Exception fetching from backend: " << e.what();
Rethrow();
}
switch (status)
{
case ok:
case notFound:
break;
case dataCorrupt:
JLOG(j_.fatal()) << "fetchNodeObject " << hash
<< ": nodestore data is corrupted";
break;
default:
JLOG(j_.warn()) << "fetchNodeObject " << hash
<< ": backend returns unknown result " << status;
break;
}
if (nodeObject)
@@ -144,71 +94,33 @@ DatabaseNodeImp::fetchNodeObject(
std::vector<std::shared_ptr<NodeObject>>
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
{
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
using namespace std::chrono;
auto const before = steady_clock::now();
std::unordered_map<uint256 const*, size_t> indexMap;
std::vector<uint256 const*> cacheMisses;
uint64_t hits = 0;
uint64_t fetches = 0;
std::vector<uint256 const*> batch{hashes.size()};
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
// See if the object already exists in the cache
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
++fetches;
if (!nObj)
{
// Try the database
indexMap[&hash] = i;
cacheMisses.push_back(&hash);
}
else
{
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
// It was in the cache.
++hits;
}
batch.push_back(&hash);
}
JLOG(j_.debug()) << "fetchBatch - cache hits = "
<< (hashes.size() - cacheMisses.size())
<< " - cache misses = " << cacheMisses.size();
auto dbResults = backend_->fetchBatch(cacheMisses).first;
for (size_t i = 0; i < dbResults.size(); ++i)
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
results = backend_->fetchBatch(batch).first;
for (size_t i = 0; i < results.size(); ++i)
{
auto nObj = std::move(dbResults[i]);
size_t index = indexMap[cacheMisses[i]];
auto const& hash = hashes[index];
if (nObj)
{
// Ensure all threads get the same object
if (cache_)
cache_->canonicalize_replace_client(hash, nObj);
}
else
if (!results[i])
{
JLOG(j_.error())
<< "fetchBatch - "
<< "record not found in db or cache. hash = " << strHex(hash);
if (cache_)
{
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nObj = std::move(notFound);
}
<< "record not found in db. hash = " << strHex(hashes[i]);
}
results[index] = std::move(nObj);
}
auto fetchDurationUs =
std::chrono::duration_cast<std::chrono::microseconds>(
steady_clock::now() - before)
.count();
updateFetchMetrics(fetches, hits, fetchDurationUs);
updateFetchMetrics(hashes.size(), 0, fetchDurationUs);
return results;
}

View File

@@ -45,38 +45,6 @@ public:
: Database(scheduler, readThreads, config, j)
, backend_(std::move(backend))
{
std::optional<int> cacheSize, cacheAge;
if (config.exists("cache_size"))
{
cacheSize = get<int>(config, "cache_size");
if (cacheSize.value() < 0)
{
Throw<std::runtime_error>(
"Specified negative value for cache_size");
}
}
if (config.exists("cache_age"))
{
cacheAge = get<int>(config, "cache_age");
if (cacheAge.value() < 0)
{
Throw<std::runtime_error>(
"Specified negative value for cache_age");
}
}
if (cacheSize != 0 || cacheAge != 0)
{
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
"DatabaseNodeImp",
cacheSize.value_or(0),
std::chrono::minutes(cacheAge.value_or(0)),
stopwatch(),
j);
}
XRPL_ASSERT(
backend_,
"ripple::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
@@ -137,9 +105,6 @@ public:
sweep() override;
private:
// Cache for database objects. This cache is not always initialized. Check
// for null before using.
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
// Persistent key/value storage
std::shared_ptr<Backend> backend_;

View File

@@ -35,8 +35,8 @@ namespace {
// element must be the number of children in a dense array.
constexpr std::array<std::uint8_t, 4> boundaries{
2,
4,
6,
3,
5,
SHAMapInnerNode::branchFactor};
static_assert(
boundaries.size() <= 4,