Compare commits

..

1 Commits

Author SHA1 Message Date
Valentin Balaschenko
607d0f7e02 remove temp vector 2025-07-25 14:03:31 +01:00
17 changed files with 392 additions and 415 deletions

View File

@@ -940,7 +940,23 @@
# #
# path Location to store the database # path Location to store the database
# #
# Optional keys for NuDB and RocksDB: # Optional keys
#
# cache_size Size of cache for database records. Default is 16384.
# Setting this value to 0 will use the default value.
#
# cache_age Length of time in minutes to keep database records
# cached. Default is 5 minutes. Setting this value to
# 0 will use the default value.
#
# Note: if neither cache_size nor cache_age is
# specified, the cache for database records will not
# be created. If only one of cache_size or cache_age
# is specified, the cache will be created using the
# default value for the unspecified parameter.
#
# Note: the cache will not be created if online_delete
# is specified.
# #
# fast_load Boolean. If set, load the last persisted ledger # fast_load Boolean. If set, load the last persisted ledger
# from disk upon process start before syncing to # from disk upon process start before syncing to
@@ -948,6 +964,8 @@
# if sufficient IOPS capacity is available. # if sufficient IOPS capacity is available.
# Default 0. # Default 0.
# #
# Optional keys for NuDB or RocksDB:
#
# earliest_seq The default is 32570 to match the XRP ledger # earliest_seq The default is 32570 to match the XRP ledger
# network's earliest allowed sequence. Alternate # network's earliest allowed sequence. Alternate
# networks may set this value. Minimum value of 1. # networks may set this value. Minimum value of 1.

View File

@@ -21,6 +21,7 @@
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED #define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <ostream> #include <ostream>

View File

@@ -170,6 +170,9 @@ public:
bool bool
retrieve(key_type const& key, T& data); retrieve(key_type const& key, T& data);
mutex_type&
peekMutex();
std::vector<key_type> std::vector<key_type>
getKeys() const; getKeys() const;
@@ -190,14 +193,11 @@ public:
private: private:
SharedPointerType SharedPointerType
initialFetch(key_type const& key); initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
void void
collect_metrics(); collect_metrics();
Mutex&
lockPartition(key_type const& key) const;
private: private:
struct Stats struct Stats
{ {
@@ -300,8 +300,8 @@ private:
[[maybe_unused]] clock_type::time_point const& now, [[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition, typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep, SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemoval, std::atomic<int>& allRemovals,
Mutex& partitionLock); std::lock_guard<std::recursive_mutex> const&);
[[nodiscard]] std::thread [[nodiscard]] std::thread
sweepHelper( sweepHelper(
@@ -310,12 +310,14 @@ private:
typename KeyOnlyCacheType::map_type& partition, typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&, SweptPointersVector&,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
Mutex& partitionLock); std::lock_guard<std::recursive_mutex> const&);
beast::Journal m_journal; beast::Journal m_journal;
clock_type& m_clock; clock_type& m_clock;
Stats m_stats; Stats m_stats;
mutex_type mutable m_mutex;
// Used for logging // Used for logging
std::string m_name; std::string m_name;
@@ -326,11 +328,10 @@ private:
clock_type::duration const m_target_age; clock_type::duration const m_target_age;
// Number of items cached // Number of items cached
std::atomic<int> m_cache_count; int m_cache_count;
cache_type m_cache; // Hold strong reference to recent objects cache_type m_cache; // Hold strong reference to recent objects
std::atomic<std::uint64_t> m_hits; std::uint64_t m_hits;
std::atomic<std::uint64_t> m_misses; std::uint64_t m_misses;
mutable std::vector<mutex_type> partitionLocks_;
}; };
} // namespace ripple } // namespace ripple

View File

@@ -60,7 +60,6 @@ inline TaggedCache<
, m_hits(0) , m_hits(0)
, m_misses(0) , m_misses(0)
{ {
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
} }
template < template <
@@ -106,13 +105,8 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::size() const Mutex>::size() const
{ {
std::size_t totalSize = 0; std::lock_guard lock(m_mutex);
for (size_t i = 0; i < partitionLocks_.size(); ++i) return m_cache.size();
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
totalSize += m_cache.map()[i].size();
}
return totalSize;
} }
template < template <
@@ -135,7 +129,8 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::getCacheSize() const Mutex>::getCacheSize() const
{ {
return m_cache_count.load(std::memory_order_relaxed); std::lock_guard lock(m_mutex);
return m_cache_count;
} }
template < template <
@@ -158,7 +153,8 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::getTrackSize() const Mutex>::getTrackSize() const
{ {
return size(); std::lock_guard lock(m_mutex);
return m_cache.size();
} }
template < template <
@@ -181,10 +177,9 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::getHitRate() Mutex>::getHitRate()
{ {
auto hits = m_hits.load(std::memory_order_relaxed); std::lock_guard lock(m_mutex);
auto misses = m_misses.load(std::memory_order_relaxed); auto const total = static_cast<float>(m_hits + m_misses);
float total = float(hits + misses); return m_hits * (100.0f / std::max(1.0f, total));
return hits * (100.0f / std::max(1.0f, total));
} }
template < template <
@@ -207,12 +202,9 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::clear() Mutex>::clear()
{ {
for (auto& mutex : partitionLocks_) std::lock_guard lock(m_mutex);
mutex.lock();
m_cache.clear(); m_cache.clear();
for (auto& mutex : partitionLocks_) m_cache_count = 0;
mutex.unlock();
m_cache_count.store(0, std::memory_order_relaxed);
} }
template < template <
@@ -235,14 +227,11 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::reset() Mutex>::reset()
{ {
for (auto& mutex : partitionLocks_) std::lock_guard lock(m_mutex);
mutex.lock();
m_cache.clear(); m_cache.clear();
for (auto& mutex : partitionLocks_) m_cache_count = 0;
mutex.unlock(); m_hits = 0;
m_cache_count.store(0, std::memory_order_relaxed); m_misses = 0;
m_hits.store(0, std::memory_order_relaxed);
m_misses.store(0, std::memory_order_relaxed);
} }
template < template <
@@ -266,7 +255,7 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::touch_if_exists(KeyComparable const& key) Mutex>::touch_if_exists(KeyComparable const& key)
{ {
std::lock_guard<Mutex> lock(lockPartition(key)); std::lock_guard lock(m_mutex);
auto const iter(m_cache.find(key)); auto const iter(m_cache.find(key));
if (iter == m_cache.end()) if (iter == m_cache.end())
{ {
@@ -308,9 +297,26 @@ TaggedCache<
auto const start = std::chrono::steady_clock::now(); auto const start = std::chrono::steady_clock::now();
{ {
when_expire = std::lock_guard lock(m_mutex);
now + std::chrono::hours(1); // any future time works too to make
// sure that nothing survives if (m_target_size == 0 ||
(static_cast<int>(m_cache.size()) <= m_target_size))
{
when_expire = now - m_target_age;
}
else
{
when_expire = now - m_target_age * m_target_size / m_cache.size();
clock_type::duration const minimumAge(std::chrono::seconds(1));
if (when_expire > (now - minimumAge))
when_expire = now - minimumAge;
JLOG(m_journal.trace())
<< m_name << " is growing fast " << m_cache.size() << " of "
<< m_target_size << " aging at " << (now - when_expire).count()
<< " of " << m_target_age.count();
}
std::vector<std::thread> workers; std::vector<std::thread> workers;
workers.reserve(m_cache.partitions()); workers.reserve(m_cache.partitions());
@@ -324,13 +330,12 @@ TaggedCache<
m_cache.map()[p], m_cache.map()[p],
allStuffToSweep[p], allStuffToSweep[p],
allRemovals, allRemovals,
partitionLocks_[p])); lock));
} }
for (std::thread& worker : workers) for (std::thread& worker : workers)
worker.join(); worker.join();
int removals = allRemovals.load(std::memory_order_relaxed); m_cache_count -= allRemovals;
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
} }
// At this point allStuffToSweep will go out of scope outside the lock // At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer. // and decrement the reference count on each strong pointer.
@@ -364,8 +369,7 @@ TaggedCache<
{ {
// Remove from cache, if !valid, remove from map too. Returns true if // Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache // removed from cache
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
@@ -378,7 +382,7 @@ TaggedCache<
if (entry.isCached()) if (entry.isCached())
{ {
m_cache_count.fetch_sub(1, std::memory_order_relaxed); --m_cache_count;
entry.ptr.convertToWeak(); entry.ptr.convertToWeak();
ret = true; ret = true;
} }
@@ -416,16 +420,17 @@ TaggedCache<
{ {
// Return canonical value, store if needed, refresh in cache // Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already // Return values: true=we had the data already
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
if (cit == m_cache.end()) if (cit == m_cache.end())
{ {
m_cache.emplace( m_cache.emplace(
std::piecewise_construct, std::piecewise_construct,
std::forward_as_tuple(key), std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data)); std::forward_as_tuple(m_clock.now(), data));
m_cache_count.fetch_add(1, std::memory_order_relaxed); ++m_cache_count;
return false; return false;
} }
@@ -474,12 +479,12 @@ TaggedCache<
data = cachedData; data = cachedData;
} }
m_cache_count.fetch_add(1, std::memory_order_relaxed); ++m_cache_count;
return true; return true;
} }
entry.ptr = data; entry.ptr = data;
m_cache_count.fetch_add(1, std::memory_order_relaxed); ++m_cache_count;
return false; return false;
} }
@@ -555,11 +560,10 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::fetch(key_type const& key) Mutex>::fetch(key_type const& key)
{ {
std::lock_guard<Mutex> lock(lockPartition(key)); std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
auto ret = initialFetch(key);
if (!ret) if (!ret)
m_misses.fetch_add(1, std::memory_order_relaxed); ++m_misses;
return ret; return ret;
} }
@@ -623,8 +627,8 @@ TaggedCache<
Mutex>::insert(key_type const& key) Mutex>::insert(key_type const& key)
-> std::enable_if_t<IsKeyCache, ReturnType> -> std::enable_if_t<IsKeyCache, ReturnType>
{ {
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now()); clock_type::time_point const now(m_clock.now());
std::lock_guard<Mutex> lock(lockPartition(key));
auto [it, inserted] = m_cache.emplace( auto [it, inserted] = m_cache.emplace(
std::piecewise_construct, std::piecewise_construct,
std::forward_as_tuple(key), std::forward_as_tuple(key),
@@ -664,6 +668,29 @@ TaggedCache<
return true; return true;
} }
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline auto
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::peekMutex() -> mutex_type&
{
return m_mutex;
}
template < template <
class Key, class Key,
class T, class T,
@@ -687,13 +714,10 @@ TaggedCache<
std::vector<key_type> v; std::vector<key_type> v;
{ {
std::lock_guard lock(m_mutex);
v.reserve(m_cache.size()); v.reserve(m_cache.size());
for (std::size_t i = 0; i < partitionLocks_.size(); ++i) for (auto const& _ : m_cache)
{ v.push_back(_.first);
std::lock_guard<Mutex> lock(partitionLocks_[i]);
for (auto const& entry : m_cache.map()[i])
v.push_back(entry.first);
}
} }
return v; return v;
@@ -719,12 +743,11 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::rate() const Mutex>::rate() const
{ {
auto hits = m_hits.load(std::memory_order_relaxed); std::lock_guard lock(m_mutex);
auto misses = m_misses.load(std::memory_order_relaxed); auto const tot = m_hits + m_misses;
auto const tot = hits + misses;
if (tot == 0) if (tot == 0)
return 0.0; return 0;
return double(hits) / tot; return double(m_hits) / tot;
} }
template < template <
@@ -748,16 +771,18 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::fetch(key_type const& digest, Handler const& h) Mutex>::fetch(key_type const& digest, Handler const& h)
{ {
std::lock_guard<Mutex> lock(lockPartition(digest)); {
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest)) if (auto ret = initialFetch(digest, l))
return ret; return ret;
}
auto sle = h(); auto sle = h();
if (!sle) if (!sle)
return {}; return {};
m_misses.fetch_add(1, std::memory_order_relaxed); std::lock_guard l(m_mutex);
++m_misses;
auto const [it, inserted] = auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted) if (!inserted)
@@ -784,10 +809,9 @@ TaggedCache<
SharedPointerType, SharedPointerType,
Hash, Hash,
KeyEqual, KeyEqual,
Mutex>::initialFetch(key_type const& key) Mutex>::
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
{ {
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
if (cit == m_cache.end()) if (cit == m_cache.end())
return {}; return {};
@@ -795,7 +819,7 @@ TaggedCache<
Entry& entry = cit->second; Entry& entry = cit->second;
if (entry.isCached()) if (entry.isCached())
{ {
m_hits.fetch_add(1, std::memory_order_relaxed); ++m_hits;
entry.touch(m_clock.now()); entry.touch(m_clock.now());
return entry.ptr.getStrong(); return entry.ptr.getStrong();
} }
@@ -803,13 +827,12 @@ TaggedCache<
if (entry.isCached()) if (entry.isCached())
{ {
// independent of cache size, so not counted as a hit // independent of cache size, so not counted as a hit
m_cache_count.fetch_add(1, std::memory_order_relaxed); ++m_cache_count;
entry.touch(m_clock.now()); entry.touch(m_clock.now());
return entry.ptr.getStrong(); return entry.ptr.getStrong();
} }
m_cache.erase(cit); // TODO: if this erase happens on fetch, what is left m_cache.erase(cit);
// for a sweep?
return {}; return {};
} }
@@ -838,11 +861,10 @@ TaggedCache<
{ {
beast::insight::Gauge::value_type hit_rate(0); beast::insight::Gauge::value_type hit_rate(0);
{ {
auto const hits = m_hits.load(std::memory_order_relaxed); std::lock_guard lock(m_mutex);
auto const misses = m_misses.load(std::memory_order_relaxed); auto const total(m_hits + m_misses);
auto const total = hits + misses;
if (total != 0) if (total != 0)
hit_rate = (hits * 100) / total; hit_rate = (m_hits * 100) / total;
} }
m_stats.hit_rate.set(hit_rate); m_stats.hit_rate.set(hit_rate);
} }
@@ -873,14 +895,12 @@ TaggedCache<
typename KeyValueCacheType::map_type& partition, typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep, SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
Mutex& partitionLock) std::lock_guard<std::recursive_mutex> const&)
{ {
return std::thread([&, this]() { return std::thread([&, this]() {
int cacheRemovals = 0; int cacheRemovals = 0;
int mapRemovals = 0; int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep // Keep references to all the stuff we sweep
// so that we can destroy them outside the lock. // so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size()); stuffToSweep.reserve(partition.size());
@@ -964,14 +984,12 @@ TaggedCache<
typename KeyOnlyCacheType::map_type& partition, typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&, SweptPointersVector&,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
Mutex& partitionLock) std::lock_guard<std::recursive_mutex> const&)
{ {
return std::thread([&, this]() { return std::thread([&, this]() {
int cacheRemovals = 0; int cacheRemovals = 0;
int mapRemovals = 0; int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep // Keep references to all the stuff we sweep
// so that we can destroy them outside the lock. // so that we can destroy them outside the lock.
{ {
@@ -1006,29 +1024,6 @@ TaggedCache<
}); });
} }
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline Mutex&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::lockPartition(key_type const& key) const
{
return partitionLocks_[m_cache.partition_index(key)];
}
} // namespace ripple } // namespace ripple
#endif #endif

View File

@@ -277,12 +277,6 @@ public:
return map_; return map_;
} }
partition_map_type const&
map() const
{
return map_;
}
iterator iterator
begin() begin()
{ {
@@ -327,12 +321,6 @@ public:
return cend(); return cend();
} }
std::size_t
partition_index(key_type const& key) const
{
return partitioner(key);
}
private: private:
template <class T> template <class T>
void void
@@ -392,7 +380,7 @@ public:
clear() clear()
{ {
for (auto& p : map_) for (auto& p : map_)
p.clear(); // TODO make sure that it is locked inside p.clear();
} }
iterator iterator
@@ -418,7 +406,7 @@ public:
{ {
std::size_t ret = 0; std::size_t ret = 0;
for (auto& p : map_) for (auto& p : map_)
ret += p.size(); // TODO make sure that it is locked inside ret += p.size();
return ret; return ret;
} }

View File

@@ -22,6 +22,7 @@
#include <xrpl/basics/ByteUtilities.h> #include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint> #include <cstdint>

View File

@@ -558,8 +558,23 @@ public:
Env env(*this, envconfig(onlineDelete)); Env env(*this, envconfig(onlineDelete));
///////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////
// Create NodeStore with two backends to allow online deletion of data. // Create the backend. Normally, SHAMapStoreImp handles all these
// Normally, SHAMapStoreImp handles all these details. // details
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set(
"cache_size",
std::to_string(env.app().config().getValueFor(
SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set(
"cache_age",
std::to_string(env.app().config().getValueFor(
SizedItem::treeCacheAge, std::nullopt)));
NodeStoreScheduler scheduler(env.app().getJobQueue()); NodeStoreScheduler scheduler(env.app().getJobQueue());
std::string const writableDb = "write"; std::string const writableDb = "write";
@@ -567,8 +582,9 @@ public:
auto writableBackend = makeBackendRotating(env, scheduler, writableDb); auto writableBackend = makeBackendRotating(env, scheduler, writableDb);
auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb); auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb);
// Create NodeStore with two backends to allow online deletion of
// data
constexpr int readThreads = 4; constexpr int readThreads = 4;
auto nscfg = env.app().config().section(ConfigSection::nodeDatabase());
auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>( auto dbr = std::make_unique<NodeStore::DatabaseRotatingImp>(
scheduler, scheduler,
readThreads, readThreads,

View File

@@ -32,66 +32,64 @@ public:
void void
run() override run() override
{ {
// using namespace std::chrono_literals; using namespace std::chrono_literals;
// TestStopwatch clock; TestStopwatch clock;
// clock.set(0); clock.set(0);
// using Key = std::string; using Key = std::string;
// using Cache = TaggedCache<Key, int, true>; using Cache = TaggedCache<Key, int, true>;
test::SuiteJournal j("KeyCacheTest", *this); test::SuiteJournal j("KeyCacheTest", *this);
BEAST_EXPECT(true); // Insert an item, retrieve it, and age it so it gets purged.
{
Cache c("test", LedgerIndex(1), 2s, clock, j);
// // Insert an item, retrieve it, and age it so it gets purged. BEAST_EXPECT(c.size() == 0);
// { BEAST_EXPECT(c.insert("one"));
// // Cache c("test", LedgerIndex(1), 2s, clock, j); BEAST_EXPECT(!c.insert("one"));
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.touch_if_exists("one"));
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 1);
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(!c.touch_if_exists("one"));
}
// // BEAST_EXPECT(c.size() == 0); // Insert two items, have one expire
// // BEAST_EXPECT(c.insert("one")); {
// // BEAST_EXPECT(!c.insert("one")); Cache c("test", LedgerIndex(2), 2s, clock, j);
// // BEAST_EXPECT(c.size() == 1);
// // BEAST_EXPECT(c.touch_if_exists("one"));
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 1);
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 0);
// // BEAST_EXPECT(!c.touch_if_exists("one"));
// }
// // Insert two items, have one expire BEAST_EXPECT(c.insert("one"));
// { BEAST_EXPECT(c.size() == 1);
// // Cache c("test", LedgerIndex(2), 2s, clock, j); BEAST_EXPECT(c.insert("two"));
BEAST_EXPECT(c.size() == 2);
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 2);
BEAST_EXPECT(c.touch_if_exists("two"));
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 1);
}
// // BEAST_EXPECT(c.insert("one")); // Insert three items (1 over limit), sweep
// // BEAST_EXPECT(c.size() == 1); {
// // BEAST_EXPECT(c.insert("two")); Cache c("test", LedgerIndex(2), 3s, clock, j);
// // BEAST_EXPECT(c.size() == 2);
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 2);
// // BEAST_EXPECT(c.touch_if_exists("two"));
// // ++clock;
// // c.sweep();
// // BEAST_EXPECT(c.size() == 1);
// }
// // Insert three items (1 over limit), sweep BEAST_EXPECT(c.insert("one"));
// { ++clock;
// Cache c("test", LedgerIndex(2), 3s, clock, j); BEAST_EXPECT(c.insert("two"));
++clock;
// // BEAST_EXPECT(c.insert("one")); BEAST_EXPECT(c.insert("three"));
// // ++clock; ++clock;
// // BEAST_EXPECT(c.insert("two")); BEAST_EXPECT(c.size() == 3);
// // ++clock; c.sweep();
// // BEAST_EXPECT(c.insert("three")); BEAST_EXPECT(c.size() < 3);
// // ++clock; }
// // BEAST_EXPECT(c.size() == 3);
// // c.sweep();
// // BEAST_EXPECT(c.size() < 3);
// }
} }
}; };

View File

@@ -27,6 +27,8 @@
namespace ripple { namespace ripple {
// FIXME: Need to clean up ledgers by index at some point
LedgerHistory::LedgerHistory( LedgerHistory::LedgerHistory(
beast::insight::Collector::ptr const& collector, beast::insight::Collector::ptr const& collector,
Application& app) Application& app)
@@ -45,7 +47,6 @@ LedgerHistory::LedgerHistory(
std::chrono::minutes{5}, std::chrono::minutes{5},
stopwatch(), stopwatch(),
app_.journal("TaggedCache")) app_.journal("TaggedCache"))
, mLedgersByIndex(256)
, j_(app.journal("LedgerHistory")) , j_(app.journal("LedgerHistory"))
{ {
} }
@@ -62,18 +63,12 @@ LedgerHistory::insert(
ledger->stateMap().getHash().isNonZero(), ledger->stateMap().getHash().isNonZero(),
"ripple::LedgerHistory::insert : nonzero hash"); "ripple::LedgerHistory::insert : nonzero hash");
// TODOL merge the below into a single call to avoid lock and race std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// conditions, i.e. - return alreadyHad on assignment somehow.
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache( bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
ledger->info().hash, ledger); ledger->info().hash, ledger);
if (validated) if (validated)
{
mLedgersByIndex[ledger->info().seq] = ledger->info().hash; mLedgersByIndex[ledger->info().seq] = ledger->info().hash;
JLOG(j_.info()) << "LedgerHistory::insert: mLedgersByIndex size: "
<< mLedgersByIndex.size() << " , total size: "
<< mLedgersByIndex.size() *
(sizeof(LedgerIndex) + sizeof(LedgerHash));
}
return alreadyHad; return alreadyHad;
} }
@@ -81,7 +76,7 @@ LedgerHistory::insert(
LedgerHash LedgerHash
LedgerHistory::getLedgerHash(LedgerIndex index) LedgerHistory::getLedgerHash(LedgerIndex index)
{ {
// TODO: is it safe to get iterator without lock here? std::unique_lock sl(m_ledgers_by_hash.peekMutex());
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end()) if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
return it->second; return it->second;
return {}; return {};
@@ -91,12 +86,13 @@ std::shared_ptr<Ledger const>
LedgerHistory::getLedgerBySeq(LedgerIndex index) LedgerHistory::getLedgerBySeq(LedgerIndex index)
{ {
{ {
// TODO: this lock is not needed std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto it = mLedgersByIndex.find(index); auto it = mLedgersByIndex.find(index);
if (it != mLedgersByIndex.end()) if (it != mLedgersByIndex.end())
{ {
uint256 hash = it->second; uint256 hash = it->second;
sl.unlock();
return getLedgerByHash(hash); return getLedgerByHash(hash);
} }
} }
@@ -112,19 +108,13 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
{ {
// Add this ledger to the local tracking by index // Add this ledger to the local tracking by index
// std::unique_lock sl(m_ledgers_by_hash.peekMutex()); std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
XRPL_ASSERT( XRPL_ASSERT(
ret->isImmutable(), ret->isImmutable(),
"ripple::LedgerHistory::getLedgerBySeq : immutable result ledger"); "ripple::LedgerHistory::getLedgerBySeq : immutable result ledger");
m_ledgers_by_hash.canonicalize_replace_client(ret->info().hash, ret); m_ledgers_by_hash.canonicalize_replace_client(ret->info().hash, ret);
mLedgersByIndex[ret->info().seq] = ret->info().hash; mLedgersByIndex[ret->info().seq] = ret->info().hash;
JLOG(j_.info())
<< "LedgerHistory::getLedgerBySeq: mLedgersByIndex size: "
<< mLedgersByIndex.size() << " , total size: "
<< mLedgersByIndex.size() *
(sizeof(LedgerIndex) + sizeof(LedgerHash));
return (ret->info().seq == index) ? ret : nullptr; return (ret->info().seq == index) ? ret : nullptr;
} }
} }
@@ -468,8 +458,7 @@ LedgerHistory::builtLedger(
XRPL_ASSERT( XRPL_ASSERT(
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash"); !hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
// std::unique_lock sl(m_consensus_validated.peekMutex()); std::unique_lock sl(m_consensus_validated.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
auto entry = std::make_shared<cv_entry>(); auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry); m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -511,8 +500,7 @@ LedgerHistory::validatedLedger(
!hash.isZero(), !hash.isZero(),
"ripple::LedgerHistory::validatedLedger : nonzero hash"); "ripple::LedgerHistory::validatedLedger : nonzero hash");
// std::unique_lock sl(m_consensus_validated.peekMutex()); std::unique_lock sl(m_consensus_validated.peekMutex());
// TODO: make sure that canonicalize_replace_client lock the partition
auto entry = std::make_shared<cv_entry>(); auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry); m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -547,9 +535,7 @@ LedgerHistory::validatedLedger(
bool bool
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
{ {
// std::unique_lock sl(m_ledgers_by_hash.peekMutex()); std::unique_lock sl(m_ledgers_by_hash.peekMutex());
// TODO: how to ensure that? "Ensure m_ledgers_by_hash doesn't have the
// wrong hash for a particular index"
auto it = mLedgersByIndex.find(ledgerIndex); auto it = mLedgersByIndex.find(ledgerIndex);
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash)) if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))

View File

@@ -22,7 +22,6 @@
#include <xrpld/app/ledger/Ledger.h> #include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/main/Application.h> #include <xrpld/app/main/Application.h>
#include <xrpld/core/detail/LRUMap.h>
#include <xrpl/beast/insight/Collector.h> #include <xrpl/beast/insight/Collector.h>
#include <xrpl/protocol/RippleLedgerHash.h> #include <xrpl/protocol/RippleLedgerHash.h>
@@ -150,7 +149,7 @@ private:
ConsensusValidated m_consensus_validated; ConsensusValidated m_consensus_validated;
// Maps ledger indexes to the corresponding hash. // Maps ledger indexes to the corresponding hash.
LRUMap<LedgerIndex, LedgerHash> mLedgersByIndex; // validated ledgers std::map<LedgerIndex, LedgerHash> mLedgersByIndex; // validated ledgers
beast::Journal j_; beast::Journal j_;
}; };

View File

@@ -384,16 +384,9 @@ public:
{ {
auto const start = m_clock.now(); auto const start = m_clock.now();
// Make a list of things to sweep, while holding the lock
std::vector<MapType::mapped_type> stuffToSweep;
std::size_t total;
{ {
ScopedLockType sl(mLock); ScopedLockType sl(mLock);
MapType::iterator it(mLedgers.begin()); MapType::iterator it(mLedgers.begin());
total = mLedgers.size();
stuffToSweep.reserve(total);
while (it != mLedgers.end()) while (it != mLedgers.end())
{ {
@@ -406,9 +399,6 @@ public:
} }
else if ((la + std::chrono::seconds(10)) < start) else if ((la + std::chrono::seconds(10)) < start)
{ {
stuffToSweep.push_back(it->second);
// shouldn't cause the actual final delete
// since we are holding a reference in the vector.
it = mLedgers.erase(it); it = mLedgers.erase(it);
} }
else else
@@ -419,14 +409,6 @@ public:
beast::expire(mRecentFailures, kReacquireInterval); beast::expire(mRecentFailures, kReacquireInterval);
} }
JLOG(j_.debug())
<< "Swept " << stuffToSweep.size() << " out of " << total
<< " inbound ledgers. Duration: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
m_clock.now() - start)
.count()
<< "ms";
} }
void void

View File

@@ -162,6 +162,20 @@ std::unique_ptr<NodeStore::Database>
SHAMapStoreImp::makeNodeStore(int readThreads) SHAMapStoreImp::makeNodeStore(int readThreads)
{ {
auto nscfg = app_.config().section(ConfigSection::nodeDatabase()); auto nscfg = app_.config().section(ConfigSection::nodeDatabase());
// Provide default values:
if (!nscfg.exists("cache_size"))
nscfg.set(
"cache_size",
std::to_string(app_.config().getValueFor(
SizedItem::treeCacheSize, std::nullopt)));
if (!nscfg.exists("cache_age"))
nscfg.set(
"cache_age",
std::to_string(app_.config().getValueFor(
SizedItem::treeCacheAge, std::nullopt)));
std::unique_ptr<NodeStore::Database> db; std::unique_ptr<NodeStore::Database> db;
if (deleteInterval_) if (deleteInterval_)
@@ -255,6 +269,8 @@ SHAMapStoreImp::run()
LedgerIndex lastRotated = state_db_.getState().lastRotated; LedgerIndex lastRotated = state_db_.getState().lastRotated;
netOPs_ = &app_.getOPs(); netOPs_ = &app_.getOPs();
ledgerMaster_ = &app_.getLedgerMaster(); ledgerMaster_ = &app_.getLedgerMaster();
fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache());
treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache());
if (advisoryDelete_) if (advisoryDelete_)
canDelete_ = state_db_.getCanDelete(); canDelete_ = state_db_.getCanDelete();
@@ -547,12 +563,16 @@ void
SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq) SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
{ {
ledgerMaster_->clearLedgerCachePrior(validatedSeq); ledgerMaster_->clearLedgerCachePrior(validatedSeq);
fullBelowCache_->clear();
} }
void void
SHAMapStoreImp::freshenCaches() SHAMapStoreImp::freshenCaches()
{ {
freshenCache(app_.getMasterTransaction().getCache()); if (freshenCache(*treeNodeCache_))
return;
if (freshenCache(app_.getMasterTransaction().getCache()))
return;
} }
void void

View File

@@ -112,6 +112,8 @@ private:
// as of run() or before // as of run() or before
NetworkOPs* netOPs_ = nullptr; NetworkOPs* netOPs_ = nullptr;
LedgerMaster* ledgerMaster_ = nullptr; LedgerMaster* ledgerMaster_ = nullptr;
FullBelowCache* fullBelowCache_ = nullptr;
TreeNodeCache* treeNodeCache_ = nullptr;
static constexpr auto nodeStoreName_ = "NodeStore"; static constexpr auto nodeStoreName_ = "NodeStore";

View File

@@ -1,153 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_APP_LRU_MAP_H_INCLUDED
#define RIPPLE_APP_LRU_MAP_H_INCLUDED
#include <list>
#include <map>
#include <utility>
namespace ripple {
template <typename Key, typename Value>
class LRUMap
{
public:
explicit LRUMap(std::size_t capacity) : capacity_(capacity)
{
// TODO: check capacity_ > 0
}
Value&
operator[](Key const& key)
{
auto it = data_.find(key);
if (it != data_.end())
{
bump_to_front(key);
return it->second;
}
if (data_.size() >= capacity_)
{
std::size_t excess = (data_.size() + 1) - capacity_;
for (std::size_t i = 0; i < excess; ++i)
{
auto lru = usage_list_.back();
usage_list_.pop_back();
data_.erase(lru);
}
}
usage_list_.push_front(key);
return data_[key];
}
auto
find(Key const& key)
{
return data_.find(key);
}
auto
find(Key const& key) const
{
return data_.find(key);
}
auto
begin()
{
return data_.begin();
}
auto
begin() const
{
return data_.begin();
}
auto
end()
{
return data_.end();
}
auto
end() const
{
return data_.end();
}
bool
erase(Key const& key)
{
auto it = data_.find(key);
if (it == data_.end())
return false;
for (auto list_it = usage_list_.begin(); list_it != usage_list_.end();
++list_it)
{
if (*list_it == key)
{
usage_list_.erase(list_it);
break;
}
}
data_.erase(it);
return true;
}
std::size_t
size() const noexcept
{
return data_.size();
}
std::size_t
capacity() const noexcept
{
return capacity_;
}
void
clear()
{
data_.clear();
usage_list_.clear();
}
private:
void
bump_to_front(Key const& key)
{
for (auto it = usage_list_.begin(); it != usage_list_.end(); ++it)
{
if (*it == key)
{
usage_list_.erase(it);
usage_list_.push_front(key);
return;
}
}
}
std::size_t capacity_;
std::map<Key, Value> data_;
std::list<Key> usage_list_;
};
} // namespace ripple
#endif

View File

@@ -33,6 +33,14 @@ DatabaseNodeImp::store(
auto obj = NodeObject::createObject(type, std::move(data), hash); auto obj = NodeObject::createObject(type, std::move(data), hash);
backend_->store(obj); backend_->store(obj);
if (cache_)
{
// After the store, replace a negative cache entry if there is one
cache_->canonicalize(
hash, obj, [](std::shared_ptr<NodeObject> const& n) {
return n->getType() == hotDUMMY;
});
}
} }
void void
@@ -41,12 +49,23 @@ DatabaseNodeImp::asyncFetch(
std::uint32_t ledgerSeq, std::uint32_t ledgerSeq,
std::function<void(std::shared_ptr<NodeObject> const&)>&& callback) std::function<void(std::shared_ptr<NodeObject> const&)>&& callback)
{ {
if (cache_)
{
std::shared_ptr<NodeObject> obj = cache_->fetch(hash);
if (obj)
{
callback(obj->getType() == hotDUMMY ? nullptr : obj);
return;
}
}
Database::asyncFetch(hash, ledgerSeq, std::move(callback)); Database::asyncFetch(hash, ledgerSeq, std::move(callback));
} }
void void
DatabaseNodeImp::sweep() DatabaseNodeImp::sweep()
{ {
if (cache_)
cache_->sweep();
} }
std::shared_ptr<NodeObject> std::shared_ptr<NodeObject>
@@ -56,7 +75,14 @@ DatabaseNodeImp::fetchNodeObject(
FetchReport& fetchReport, FetchReport& fetchReport,
bool duplicate) bool duplicate)
{ {
std::shared_ptr<NodeObject> nodeObject = nullptr; std::shared_ptr<NodeObject> nodeObject =
cache_ ? cache_->fetch(hash) : nullptr;
if (!nodeObject)
{
JLOG(j_.trace()) << "fetchNodeObject " << hash << ": record not "
<< (cache_ ? "cached" : "found");
Status status; Status status;
try try
@@ -65,7 +91,8 @@ DatabaseNodeImp::fetchNodeObject(
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
JLOG(j_.fatal()) << "fetchNodeObject " << hash JLOG(j_.fatal())
<< "fetchNodeObject " << hash
<< ": Exception fetching from backend: " << e.what(); << ": Exception fetching from backend: " << e.what();
Rethrow(); Rethrow();
} }
@@ -73,6 +100,20 @@ DatabaseNodeImp::fetchNodeObject(
switch (status) switch (status)
{ {
case ok: case ok:
if (cache_)
{
if (nodeObject)
cache_->canonicalize_replace_client(hash, nodeObject);
else
{
auto notFound =
NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nodeObject = notFound;
}
}
break;
case notFound: case notFound:
break; break;
case dataCorrupt: case dataCorrupt:
@@ -80,10 +121,19 @@ DatabaseNodeImp::fetchNodeObject(
<< ": nodestore data is corrupted"; << ": nodestore data is corrupted";
break; break;
default: default:
JLOG(j_.warn()) << "fetchNodeObject " << hash JLOG(j_.warn())
<< "fetchNodeObject " << hash
<< ": backend returns unknown result " << status; << ": backend returns unknown result " << status;
break; break;
} }
}
else
{
JLOG(j_.trace()) << "fetchNodeObject " << hash
<< ": record found in cache";
if (nodeObject->getType() == hotDUMMY)
nodeObject.reset();
}
if (nodeObject) if (nodeObject)
fetchReport.wasFound = true; fetchReport.wasFound = true;
@@ -94,33 +144,71 @@ DatabaseNodeImp::fetchNodeObject(
std::vector<std::shared_ptr<NodeObject>> std::vector<std::shared_ptr<NodeObject>>
DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes) DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
{ {
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()};
using namespace std::chrono; using namespace std::chrono;
auto const before = steady_clock::now(); auto const before = steady_clock::now();
std::unordered_map<uint256 const*, size_t> indexMap;
std::vector<uint256 const*> batch{hashes.size()}; std::vector<uint256 const*> cacheMisses;
uint64_t hits = 0;
uint64_t fetches = 0;
for (size_t i = 0; i < hashes.size(); ++i) for (size_t i = 0; i < hashes.size(); ++i)
{ {
auto const& hash = hashes[i]; auto const& hash = hashes[i];
batch.push_back(&hash); // See if the object already exists in the cache
auto nObj = cache_ ? cache_->fetch(hash) : nullptr;
++fetches;
if (!nObj)
{
// Try the database
indexMap[&hash] = i;
cacheMisses.push_back(&hash);
}
else
{
results[i] = nObj->getType() == hotDUMMY ? nullptr : nObj;
// It was in the cache.
++hits;
}
} }
std::vector<std::shared_ptr<NodeObject>> results{hashes.size()}; JLOG(j_.debug()) << "fetchBatch - cache hits = "
results = backend_->fetchBatch(batch).first; << (hashes.size() - cacheMisses.size())
for (size_t i = 0; i < results.size(); ++i) << " - cache misses = " << cacheMisses.size();
auto dbResults = backend_->fetchBatch(cacheMisses).first;
for (size_t i = 0; i < dbResults.size(); ++i)
{ {
if (!results[i]) auto nObj = std::move(dbResults[i]);
size_t index = indexMap[cacheMisses[i]];
auto const& hash = hashes[index];
if (nObj)
{
// Ensure all threads get the same object
if (cache_)
cache_->canonicalize_replace_client(hash, nObj);
}
else
{ {
JLOG(j_.error()) JLOG(j_.error())
<< "fetchBatch - " << "fetchBatch - "
<< "record not found in db. hash = " << strHex(hashes[i]); << "record not found in db or cache. hash = " << strHex(hash);
if (cache_)
{
auto notFound = NodeObject::createObject(hotDUMMY, {}, hash);
cache_->canonicalize_replace_client(hash, notFound);
if (notFound->getType() != hotDUMMY)
nObj = std::move(notFound);
} }
} }
results[index] = std::move(nObj);
}
auto fetchDurationUs = auto fetchDurationUs =
std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::duration_cast<std::chrono::microseconds>(
steady_clock::now() - before) steady_clock::now() - before)
.count(); .count();
updateFetchMetrics(hashes.size(), 0, fetchDurationUs); updateFetchMetrics(fetches, hits, fetchDurationUs);
return results; return results;
} }

View File

@@ -45,6 +45,38 @@ public:
: Database(scheduler, readThreads, config, j) : Database(scheduler, readThreads, config, j)
, backend_(std::move(backend)) , backend_(std::move(backend))
{ {
std::optional<int> cacheSize, cacheAge;
if (config.exists("cache_size"))
{
cacheSize = get<int>(config, "cache_size");
if (cacheSize.value() < 0)
{
Throw<std::runtime_error>(
"Specified negative value for cache_size");
}
}
if (config.exists("cache_age"))
{
cacheAge = get<int>(config, "cache_age");
if (cacheAge.value() < 0)
{
Throw<std::runtime_error>(
"Specified negative value for cache_age");
}
}
if (cacheSize != 0 || cacheAge != 0)
{
cache_ = std::make_shared<TaggedCache<uint256, NodeObject>>(
"DatabaseNodeImp",
cacheSize.value_or(0),
std::chrono::minutes(cacheAge.value_or(0)),
stopwatch(),
j);
}
XRPL_ASSERT( XRPL_ASSERT(
backend_, backend_,
"ripple::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null " "ripple::NodeStore::DatabaseNodeImp::DatabaseNodeImp : non-null "
@@ -105,6 +137,9 @@ public:
sweep() override; sweep() override;
private: private:
// Cache for database objects. This cache is not always initialized. Check
// for null before using.
std::shared_ptr<TaggedCache<uint256, NodeObject>> cache_;
// Persistent key/value storage // Persistent key/value storage
std::shared_ptr<Backend> backend_; std::shared_ptr<Backend> backend_;

View File

@@ -35,8 +35,8 @@ namespace {
// element must be the number of children in a dense array. // element must be the number of children in a dense array.
constexpr std::array<std::uint8_t, 4> boundaries{ constexpr std::array<std::uint8_t, 4> boundaries{
2, 2,
3, 4,
5, 6,
SHAMapInnerNode::branchFactor}; SHAMapInnerNode::branchFactor};
static_assert( static_assert(
boundaries.size() <= 4, boundaries.size() <= 4,