Introduce partitioned unordered maps:

This commit implements partitioned unordered maps and makes it possible
to traverse such a map in parallel, allowing for more efficient use of
CPU resources.

The `CachedSLEs`, `TaggedCache`, and `KeyCache` classes make use of the
new functionality, which should improve performance.
This commit is contained in:
Mark Travis
2021-09-17 15:48:33 -07:00
committed by seelabs
parent 7edfbbd8bd
commit 19018e8959
26 changed files with 1089 additions and 770 deletions

View File

@@ -447,6 +447,7 @@ target_sources (rippled PRIVATE
src/ripple/basics/impl/UptimeClock.cpp
src/ripple/basics/impl/make_SSLContext.cpp
src/ripple/basics/impl/mulDiv.cpp
src/ripple/basics/impl/partitioned_unordered_map.cpp
#[===============================[
main sources:
subdir: conditions
@@ -483,7 +484,6 @@ target_sources (rippled PRIVATE
src/ripple/ledger/impl/ApplyViewBase.cpp
src/ripple/ledger/impl/ApplyViewImpl.cpp
src/ripple/ledger/impl/BookDirs.cpp
src/ripple/ledger/impl/CachedSLEs.cpp
src/ripple/ledger/impl/CachedView.cpp
src/ripple/ledger/impl/Directory.cpp
src/ripple/ledger/impl/OpenView.cpp

View File

@@ -30,6 +30,7 @@
#include <ripple/protocol/jss.h>
#include <memory>
#include <mutex>
#include <vector>
namespace ripple {
@@ -347,27 +348,29 @@ public:
void
sweep() override
{
clock_type::time_point const now(m_clock.now());
auto const start = m_clock.now();
// Make a list of things to sweep, while holding the lock
std::vector<MapType::mapped_type> stuffToSweep;
std::size_t total;
{
ScopedLockType sl(mLock);
MapType::iterator it(mLedgers.begin());
total = mLedgers.size();
stuffToSweep.reserve(total);
while (it != mLedgers.end())
{
if (it->second->getLastAction() > now)
auto const la = it->second->getLastAction();
if (la > start)
{
it->second->touch();
++it;
}
else if (
(it->second->getLastAction() + std::chrono::minutes(1)) <
now)
else if ((la + std::chrono::minutes(1)) < start)
{
stuffToSweep.push_back(it->second);
// shouldn't cause the actual final delete
@@ -383,8 +386,13 @@ public:
beast::expire(mRecentFailures, kReacquireInterval);
}
JLOG(j_.debug()) << "Swept " << stuffToSweep.size() << " out of "
<< total << " inbound ledgers.";
JLOG(j_.debug())
<< "Swept " << stuffToSweep.size() << " out of " << total
<< " inbound ledgers. Duration: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
m_clock.now() - start)
.count()
<< "ms";
}
void

View File

@@ -218,10 +218,12 @@ LedgerReplayer::gotReplayDelta(
void
LedgerReplayer::sweep()
{
auto const start = std::chrono::steady_clock::now();
{
std::lock_guard<std::mutex> lock(mtx_);
JLOG(j_.debug()) << "Sweeping, LedgerReplayer has " << tasks_.size()
<< " tasks, " << skipLists_.size() << " skipLists, and "
<< deltas_.size() << " deltas.";
<< " tasks, " << skipLists_.size()
<< " skipLists, and " << deltas_.size() << " deltas.";
tasks_.erase(
std::remove_if(
@@ -230,8 +232,8 @@ LedgerReplayer::sweep()
[this](auto const& t) -> bool {
if (t->finished())
{
JLOG(j_.debug())
<< "Sweep task " << t->getTaskParameter().finishHash_;
JLOG(j_.debug()) << "Sweep task "
<< t->getTaskParameter().finishHash_;
return true;
}
return false;
@@ -251,6 +253,12 @@ LedgerReplayer::sweep()
};
removeCannotLocked(skipLists_);
removeCannotLocked(deltas_);
}
JLOG(j_.debug()) << " LedgerReplayer sweep lock duration "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start)
.count()
<< "ms";
}
void

View File

@@ -79,12 +79,14 @@
#include <date/date.h>
#include <chrono>
#include <condition_variable>
#include <cstring>
#include <iostream>
#include <limits>
#include <mutex>
#include <optional>
#include <sstream>
#include <utility>
#include <variant>
@@ -327,7 +329,12 @@ public:
stopwatch(),
logs_->journal("TaggedCache"))
, cachedSLEs_(std::chrono::minutes(1), stopwatch())
, cachedSLEs_(
"Cached SLEs",
0,
std::chrono::minutes(1),
stopwatch(),
logs_->journal("CachedSLEs"))
, validatorKeys_(*config_, m_journal)
@@ -1146,11 +1153,11 @@ public:
shardStore_->sweep();
getLedgerMaster().sweep();
getTempNodeCache().sweep();
getValidations().expire();
getValidations().expire(m_journal);
getInboundLedgers().sweep();
getLedgerReplayer().sweep();
m_acceptedLedgerCache.sweep();
cachedSLEs_.expire();
cachedSLEs_.sweep();
#ifdef RIPPLED_REPORTING
if (auto pg = dynamic_cast<RelationalDBInterfacePostgres*>(

View File

@@ -52,7 +52,19 @@ class ShardArchiveHandler;
// VFALCO TODO Fix forward declares required for header dependency loops
class AmendmentTable;
class CachedSLEs;
template <
class Key,
class T,
bool IsKeyCache,
class Hash,
class KeyEqual,
class Mutex>
class TaggedCache;
class STLedgerEntry;
using SLE = STLedgerEntry;
using CachedSLEs = TaggedCache<uint256, SLE const>;
class CollectorManager;
class Family;
class HashRouter;

View File

@@ -1,7 +1,7 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Copyright (c) 2021 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
@@ -17,312 +17,16 @@
*/
//==============================================================================
#ifndef RIPPLE_BASICS_KEYCACHE_H_INCLUDED
#define RIPPLE_BASICS_KEYCACHE_H_INCLUDED
#ifndef RIPPLE_BASICS_KEYCACHE_H
#define RIPPLE_BASICS_KEYCACHE_H
#include <ripple/basics/UnorderedContainers.h>
#include <ripple/basics/hardened_hash.h>
#include <ripple/beast/clock/abstract_clock.h>
#include <ripple/beast/insight/Insight.h>
#include <mutex>
#include <ripple/basics/TaggedCache.h>
#include <ripple/basics/base_uint.h>
namespace ripple {
/** Maintains a cache of keys with no associated data.
The cache has a target size and an expiration time. When cached items become
older than the maximum age they are eligible for removal during a
call to @ref sweep.
*/
template <
class Key,
class Hash = hardened_hash<>,
class KeyEqual = std::equal_to<Key>,
// class Allocator = std::allocator <std::pair <Key const, Entry>>,
class Mutex = std::mutex>
class KeyCache
{
public:
using key_type = Key;
using clock_type = beast::abstract_clock<std::chrono::steady_clock>;
private:
struct Stats
{
template <class Handler>
Stats(
std::string const& prefix,
Handler const& handler,
beast::insight::Collector::ptr const& collector)
: hook(collector->make_hook(handler))
, size(collector->make_gauge(prefix, "size"))
, hit_rate(collector->make_gauge(prefix, "hit_rate"))
, hits(0)
, misses(0)
{
}
beast::insight::Hook hook;
beast::insight::Gauge size;
beast::insight::Gauge hit_rate;
std::size_t hits;
std::size_t misses;
};
struct Entry
{
explicit Entry(clock_type::time_point const& last_access_)
: last_access(last_access_)
{
}
clock_type::time_point last_access;
};
using map_type = hardened_hash_map<key_type, Entry, Hash, KeyEqual>;
using iterator = typename map_type::iterator;
public:
using size_type = typename map_type::size_type;
private:
Mutex mutable m_mutex;
map_type m_map;
Stats mutable m_stats;
clock_type& m_clock;
std::string const m_name;
size_type m_target_size;
clock_type::duration m_target_age;
public:
/** Construct with the specified name.
@param size The initial target size.
@param age The initial expiration time.
*/
KeyCache(
std::string const& name,
clock_type& clock,
beast::insight::Collector::ptr const& collector,
size_type target_size = 0,
std::chrono::seconds expiration = std::chrono::minutes{2})
: m_stats(name, std::bind(&KeyCache::collect_metrics, this), collector)
, m_clock(clock)
, m_name(name)
, m_target_size(target_size)
, m_target_age(expiration)
{
}
// VFALCO TODO Use a forwarding constructor call here
KeyCache(
std::string const& name,
clock_type& clock,
size_type target_size = 0,
std::chrono::seconds expiration = std::chrono::minutes{2})
: m_stats(
name,
std::bind(&KeyCache::collect_metrics, this),
beast::insight::NullCollector::New())
, m_clock(clock)
, m_name(name)
, m_target_size(target_size)
, m_target_age(expiration)
{
}
//--------------------------------------------------------------------------
/** Retrieve the name of this object. */
std::string const&
name() const
{
return m_name;
}
/** Return the clock associated with the cache. */
clock_type&
clock()
{
return m_clock;
}
/** Returns the number of items in the container. */
size_type
size() const
{
std::lock_guard lock(m_mutex);
return m_map.size();
}
/** Empty the cache */
void
clear()
{
std::lock_guard lock(m_mutex);
m_map.clear();
}
void
reset()
{
std::lock_guard lock(m_mutex);
m_map.clear();
m_stats.hits = 0;
m_stats.misses = 0;
}
void
setTargetSize(size_type s)
{
std::lock_guard lock(m_mutex);
m_target_size = s;
}
void
setTargetAge(std::chrono::seconds s)
{
std::lock_guard lock(m_mutex);
m_target_age = s;
}
/** Returns `true` if the key was found.
Does not update the last access time.
*/
template <class KeyComparable>
bool
exists(KeyComparable const& key) const
{
std::lock_guard lock(m_mutex);
typename map_type::const_iterator const iter(m_map.find(key));
if (iter != m_map.end())
{
++m_stats.hits;
return true;
}
++m_stats.misses;
return false;
}
/** Insert the specified key.
The last access time is refreshed in all cases.
@return `true` If the key was newly inserted.
*/
bool
insert(Key const& key)
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
auto [it, inserted] = m_map.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(now));
if (!inserted)
{
it->second.last_access = now;
return false;
}
return true;
}
/** Refresh the last access time on a key if present.
@return `true` If the key was found.
*/
template <class KeyComparable>
bool
touch_if_exists(KeyComparable const& key)
{
std::lock_guard lock(m_mutex);
iterator const iter(m_map.find(key));
if (iter == m_map.end())
{
++m_stats.misses;
return false;
}
iter->second.last_access = m_clock.now();
++m_stats.hits;
return true;
}
/** Remove the specified cache entry.
@param key The key to remove.
@return `false` If the key was not found.
*/
bool
erase(key_type const& key)
{
std::lock_guard lock(m_mutex);
if (m_map.erase(key) > 0)
{
++m_stats.hits;
return true;
}
++m_stats.misses;
return false;
}
/** Remove stale entries from the cache. */
void
sweep()
{
clock_type::time_point const now(m_clock.now());
clock_type::time_point when_expire;
std::lock_guard lock(m_mutex);
if (m_target_size == 0 || (m_map.size() <= m_target_size))
{
when_expire = now - m_target_age;
}
else
{
when_expire = now - m_target_age * m_target_size / m_map.size();
clock_type::duration const minimumAge(std::chrono::seconds(1));
if (when_expire > (now - minimumAge))
when_expire = now - minimumAge;
}
iterator it = m_map.begin();
while (it != m_map.end())
{
if (it->second.last_access > now)
{
it->second.last_access = now;
++it;
}
else if (it->second.last_access <= when_expire)
{
it = m_map.erase(it);
}
else
{
++it;
}
}
}
private:
void
collect_metrics()
{
m_stats.size.set(size());
{
beast::insight::Gauge::value_type hit_rate(0);
{
std::lock_guard lock(m_mutex);
auto const total(m_stats.hits + m_stats.misses);
if (total != 0)
hit_rate = (m_stats.hits * 100) / total;
}
m_stats.hit_rate.set(hit_rate);
}
}
};
using KeyCache = TaggedCache<uint256, int, true>;
} // namespace ripple
#endif
#endif // RIPPLE_BASICS_KEYCACHE_H

View File

@@ -0,0 +1,113 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2021 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
#include <ripple/basics/base_uint.h>
#include <ostream>
namespace ripple {
// A SHAMapHash is the hash of a node in a SHAMap, and also the
// type of the hash of the entire SHAMap.
class SHAMapHash
{
uint256 hash_;
public:
SHAMapHash() = default;
explicit SHAMapHash(uint256 const& hash) : hash_(hash)
{
}
uint256 const&
as_uint256() const
{
return hash_;
}
uint256&
as_uint256()
{
return hash_;
}
bool
isZero() const
{
return hash_.isZero();
}
bool
isNonZero() const
{
return hash_.isNonZero();
}
int
signum() const
{
return hash_.signum();
}
void
zero()
{
hash_.zero();
}
friend bool
operator==(SHAMapHash const& x, SHAMapHash const& y)
{
return x.hash_ == y.hash_;
}
friend bool
operator<(SHAMapHash const& x, SHAMapHash const& y)
{
return x.hash_ < y.hash_;
}
friend std::ostream&
operator<<(std::ostream& os, SHAMapHash const& x)
{
return os << x.hash_;
}
friend std::string
to_string(SHAMapHash const& x)
{
return to_string(x.hash_);
}
template <class H>
friend void
hash_append(H& h, SHAMapHash const& x)
{
hash_append(h, x.hash_);
}
};
inline bool
operator!=(SHAMapHash const& x, SHAMapHash const& y)
{
return !(x == y);
}
} // namespace ripple
#endif // RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED

View File

@@ -25,8 +25,11 @@
#include <ripple/basics/hardened_hash.h>
#include <ripple/beast/clock/abstract_clock.h>
#include <ripple/beast/insight/Insight.h>
#include <atomic>
#include <functional>
#include <mutex>
#include <thread>
#include <type_traits>
#include <vector>
namespace ripple {
@@ -46,6 +49,7 @@ namespace ripple {
template <
class Key,
class T,
bool IsKeyCache = false,
class Hash = hardened_hash<>,
class KeyEqual = std::equal_to<Key>,
class Mutex = std::recursive_mutex>
@@ -89,11 +93,12 @@ public:
return m_clock;
}
int
getTargetSize() const
/** Returns the number of items in the container. */
std::size_t
size() const
{
std::lock_guard lock(m_mutex);
return m_target_size;
return m_cache.size();
}
void
@@ -103,8 +108,15 @@ public:
m_target_size = s;
if (s > 0)
m_cache.rehash(static_cast<std::size_t>(
(s + (s >> 2)) / m_cache.max_load_factor() + 1));
{
for (auto& partition : m_cache.map())
{
partition.rehash(static_cast<std::size_t>(
(s + (s >> 2)) /
(partition.max_load_factor() * m_cache.partitions()) +
1));
}
}
JLOG(m_journal.debug()) << m_name << " target size set to " << s;
}
@@ -165,22 +177,39 @@ public:
m_misses = 0;
}
/** Refresh the last access time on a key if present.
@return `true` If the key was found.
*/
template <class KeyComparable>
bool
touch_if_exists(KeyComparable const& key)
{
std::lock_guard lock(m_mutex);
auto const iter(m_cache.find(key));
if (iter == m_cache.end())
{
++m_stats.misses;
return false;
}
iter->second.touch(m_clock.now());
++m_stats.hits;
return true;
}
void
sweep()
{
int cacheRemovals = 0;
int mapRemovals = 0;
int cc = 0;
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
//
std::vector<std::shared_ptr<mapped_type>> stuffToSweep;
// For performance, each worker thread should exit before the swept data
// is destroyed but still within the main cache lock.
std::vector<std::vector<std::shared_ptr<mapped_type>>> allStuffToSweep(
m_cache.partitions());
{
clock_type::time_point const now(m_clock.now());
clock_type::time_point when_expire;
auto const start = std::chrono::steady_clock::now();
{
std::lock_guard lock(m_mutex);
if (m_target_size == 0 ||
@@ -204,61 +233,33 @@ public:
<< m_target_age.count();
}
stuffToSweep.reserve(m_cache.size());
std::vector<std::thread> workers;
workers.reserve(m_cache.partitions());
std::atomic<int> allRemovals = 0;
auto cit = m_cache.begin();
for (std::size_t p = 0; p < m_cache.partitions(); ++p)
{
workers.push_back(sweepHelper(
when_expire,
now,
m_cache.map()[p],
allStuffToSweep[p],
allRemovals,
lock));
}
for (std::thread& worker : workers)
worker.join();
while (cit != m_cache.end())
{
if (cit->second.isWeak())
{
// weak
if (cit->second.isExpired())
{
++mapRemovals;
cit = m_cache.erase(cit);
m_cache_count -= allRemovals;
}
else
{
++cit;
}
}
else if (cit->second.last_access <= when_expire)
{
// strong, expired
--m_cache_count;
++cacheRemovals;
if (cit->second.ptr.unique())
{
stuffToSweep.push_back(cit->second.ptr);
++mapRemovals;
cit = m_cache.erase(cit);
}
else
{
// remains weakly cached
cit->second.ptr.reset();
++cit;
}
}
else
{
// strong, not expired
++cc;
++cit;
}
}
}
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.trace())
<< m_name << ": cache = " << m_cache.size() << "-"
<< cacheRemovals << ", map-=" << mapRemovals;
}
// At this point stuffToSweep will go out of scope outside the lock
// At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer.
JLOG(m_journal.debug())
<< m_name << " TaggedCache sweep lock duration "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start)
.count()
<< "ms";
}
bool
@@ -391,51 +392,41 @@ public:
std::shared_ptr<T>
fetch(const key_type& key)
{
// fetch us a shared pointer to the stored data object
std::lock_guard lock(m_mutex);
auto cit = m_cache.find(key);
if (cit == m_cache.end())
{
std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
if (!ret)
++m_misses;
return {};
}
Entry& entry = cit->second;
entry.touch(m_clock.now());
if (entry.isCached())
{
++m_hits;
return entry.ptr;
}
entry.ptr = entry.lock();
if (entry.isCached())
{
// independent of cache size, so not counted as a hit
++m_cache_count;
return entry.ptr;
}
m_cache.erase(cit);
++m_misses;
return {};
return ret;
}
/** Insert the element into the container.
If the key already exists, nothing happens.
@return `true` If the element was inserted
*/
bool
template <class ReturnType = bool>
auto
insert(key_type const& key, T const& value)
-> std::enable_if_t<!IsKeyCache, ReturnType>
{
auto p = std::make_shared<T>(std::cref(value));
return canonicalize_replace_client(key, p);
}
template <class ReturnType = bool>
auto
insert(key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
auto [it, inserted] = m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(now));
if (!inserted)
it->second.last_access = now;
return inserted;
}
// VFALCO NOTE It looks like this returns a copy of the data in
// the output parameter 'data'. This could be expensive.
// Perhaps it should work like standard containers, which
@@ -454,53 +445,6 @@ public:
return true;
}
/** Refresh the expiration time on a key.
@param key The key to refresh.
@return `true` if the key was found and the object is cached.
*/
bool
refreshIfPresent(const key_type& key)
{
bool found = false;
// If present, make current in cache
std::lock_guard lock(m_mutex);
if (auto cit = m_cache.find(key); cit != m_cache.end())
{
Entry& entry = cit->second;
if (!entry.isCached())
{
// Convert weak to strong.
entry.ptr = entry.lock();
if (entry.isCached())
{
// We just put the object back in cache
++m_cache_count;
entry.touch(m_clock.now());
found = true;
}
else
{
// Couldn't get strong pointer,
// object fell out of the cache so remove the entry.
m_cache.erase(cit);
}
}
else
{
// It's cached so update the timer
entry.touch(m_clock.now());
found = true;
}
}
return found;
}
mutex_type&
peekMutex()
{
@@ -522,7 +466,75 @@ public:
return v;
}
// CachedSLEs functions.
/** Returns the fraction of cache hits. */
double
rate() const
{
std::lock_guard lock(m_mutex);
auto const tot = m_hits + m_misses;
if (tot == 0)
return 0;
return double(m_hits) / tot;
}
/** Fetch an item from the cache.
If the digest was not found, Handler
will be called with this signature:
std::shared_ptr<SLE const>(void)
*/
template <class Handler>
std::shared_ptr<T>
fetch(key_type const& digest, Handler const& h)
{
{
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest, l))
return ret;
}
auto sle = h();
if (!sle)
return {};
std::lock_guard l(m_mutex);
++m_misses;
auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted)
it->second.touch(m_clock.now());
return it->second.ptr;
}
// End CachedSLEs functions.
private:
std::shared_ptr<T>
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
{
auto cit = m_cache.find(key);
if (cit == m_cache.end())
return {};
Entry& entry = cit->second;
if (entry.isCached())
{
++m_hits;
entry.touch(m_clock.now());
return entry.ptr;
}
entry.ptr = entry.lock();
if (entry.isCached())
{
// independent of cache size, so not counted as a hit
++m_cache_count;
entry.touch(m_clock.now());
return entry.ptr;
}
m_cache.erase(cit);
return {};
}
void
collect_metrics()
{
@@ -551,22 +563,44 @@ private:
: hook(collector->make_hook(handler))
, size(collector->make_gauge(prefix, "size"))
, hit_rate(collector->make_gauge(prefix, "hit_rate"))
, hits(0)
, misses(0)
{
}
beast::insight::Hook hook;
beast::insight::Gauge size;
beast::insight::Gauge hit_rate;
std::size_t hits;
std::size_t misses;
};
class Entry
class KeyOnlyEntry
{
public:
clock_type::time_point last_access;
explicit KeyOnlyEntry(clock_type::time_point const& last_access_)
: last_access(last_access_)
{
}
void
touch(clock_type::time_point const& now)
{
last_access = now;
}
};
class ValueEntry
{
public:
std::shared_ptr<mapped_type> ptr;
std::weak_ptr<mapped_type> weak_ptr;
clock_type::time_point last_access;
Entry(
ValueEntry(
clock_type::time_point const& last_access_,
std::shared_ptr<mapped_type> const& ptr_)
: ptr(ptr_), weak_ptr(ptr_), last_access(last_access_)
@@ -600,7 +634,136 @@ private:
}
};
using cache_type = hardened_hash_map<key_type, Entry, Hash, KeyEqual>;
typedef
typename std::conditional<IsKeyCache, KeyOnlyEntry, ValueEntry>::type
Entry;
using KeyOnlyCacheType =
hardened_partitioned_hash_map<key_type, KeyOnlyEntry, Hash, KeyEqual>;
using KeyValueCacheType =
hardened_partitioned_hash_map<key_type, ValueEntry, Hash, KeyEqual>;
using cache_type =
hardened_partitioned_hash_map<key_type, Entry, Hash, KeyEqual>;
[[nodiscard]] std::thread
sweepHelper(
clock_type::time_point const& when_expire,
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
std::vector<std::shared_ptr<mapped_type>>& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const& lock)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
int mapRemovals = 0;
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size());
{
auto cit = partition.begin();
while (cit != partition.end())
{
if (cit->second.isWeak())
{
// weak
if (cit->second.isExpired())
{
++mapRemovals;
cit = partition.erase(cit);
}
else
{
++cit;
}
}
else if (cit->second.last_access <= when_expire)
{
// strong, expired
++cacheRemovals;
if (cit->second.ptr.unique())
{
stuffToSweep.push_back(cit->second.ptr);
++mapRemovals;
cit = partition.erase(cit);
}
else
{
// remains weakly cached
cit->second.ptr.reset();
++cit;
}
}
else
{
// strong, not expired
++cit;
}
}
}
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.debug())
<< "TaggedCache partition sweep " << m_name
<< ": cache = " << partition.size() << "-" << cacheRemovals
<< ", map-=" << mapRemovals;
}
allRemovals += cacheRemovals;
});
}
[[nodiscard]] std::thread
sweepHelper(
clock_type::time_point const& when_expire,
clock_type::time_point const& now,
typename KeyOnlyCacheType::map_type& partition,
std::vector<std::shared_ptr<mapped_type>>& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const& lock)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
int mapRemovals = 0;
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size());
{
auto cit = partition.begin();
while (cit != partition.end())
{
if (cit->second.last_access > now)
{
cit->second.last_access = now;
++cit;
}
else if (cit->second.last_access <= when_expire)
{
cit = partition.erase(cit);
}
else
{
++cit;
}
}
}
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.debug())
<< "TaggedCache partition sweep " << m_name
<< ": cache = " << partition.size() << "-" << cacheRemovals
<< ", map-=" << mapRemovals;
}
allRemovals += cacheRemovals;
});
};
beast::Journal m_journal;
clock_type& m_clock;

View File

@@ -21,6 +21,7 @@
#define RIPPLE_BASICS_UNORDEREDCONTAINERS_H_INCLUDED
#include <ripple/basics/hardened_hash.h>
#include <ripple/basics/partitioned_unordered_map.h>
#include <ripple/beast/hash/hash_append.h>
#include <ripple/beast/hash/uhash.h>
#include <ripple/beast/hash/xxhasher.h>
@@ -86,6 +87,15 @@ template <
class Allocator = std::allocator<std::pair<Key const, Value>>>
using hardened_hash_map = std::unordered_map<Key, Value, Hash, Pred, Allocator>;
template <
class Key,
class Value,
class Hash = hardened_hash<strong_hash>,
class Pred = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, Value>>>
using hardened_partitioned_hash_map =
partitioned_unordered_map<Key, Value, Hash, Pred, Allocator>;
template <
class Key,
class Value,

View File

@@ -0,0 +1,78 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2021 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/partitioned_unordered_map.h>
#include <ripple/basics/SHAMapHash.h>
#include <ripple/basics/base_uint.h>
#include <ripple/beast/hash/uhash.h>
#include <ripple/protocol/Protocol.h>
#include <limits>
#include <string>
namespace ripple {
static std::size_t
extract(uint256 const& key)
{
return *reinterpret_cast<std::size_t const*>(key.data());
}
static std::size_t
extract(SHAMapHash const& key)
{
return *reinterpret_cast<std::size_t const*>(key.as_uint256().data());
}
static std::size_t
extract(LedgerIndex key)
{
return static_cast<std::size_t>(key);
}
static std::size_t
extract(std::string const& key)
{
return ::beast::uhash<>{}(key);
}
template <typename Key>
std::size_t
partitioner(Key const& key, std::size_t const numPartitions)
{
return extract(key) % numPartitions;
}
template std::size_t
partitioner<LedgerIndex>(
LedgerIndex const& key,
std::size_t const numPartitions);
template std::size_t
partitioner<uint256>(uint256 const& key, std::size_t const numPartitions);
template std::size_t
partitioner<SHAMapHash>(SHAMapHash const& key, std::size_t const numPartitions);
template std::size_t
partitioner<std::string>(
std::string const& key,
std::size_t const numPartitions);
} // namespace ripple

View File

@@ -0,0 +1,409 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2021 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_PARTITIONED_UNORDERED_MAP_H
#define RIPPLE_BASICS_PARTITIONED_UNORDERED_MAP_H
#include <cassert>
#include <functional>
#include <optional>
#include <thread>
#include <unordered_map>
#include <utility>
#include <vector>
namespace ripple {
template <typename Key>
std::size_t
partitioner(Key const& key, std::size_t const numPartitions);
template <
typename Key,
typename Value,
typename Hash,
typename Pred = std::equal_to<Key>,
typename Alloc = std::allocator<std::pair<const Key, Value>>>
class partitioned_unordered_map
{
std::size_t partitions_;
public:
using key_type = Key;
using mapped_type = Value;
using value_type = std::pair<Key const, mapped_type>;
using size_type = std::size_t;
using difference_type = std::size_t;
using hasher = Hash;
using key_equal = Pred;
using allocator_type = Alloc;
using reference = value_type&;
using const_reference = value_type const&;
using pointer = value_type*;
using const_pointer = value_type const*;
using map_type = std::
unordered_map<key_type, mapped_type, hasher, key_equal, allocator_type>;
using partition_map_type = std::vector<map_type>;
struct iterator
{
using iterator_category = std::forward_iterator_tag;
partition_map_type* map_{nullptr};
typename partition_map_type::iterator ait_;
typename map_type::iterator mit_;
iterator() = default;
iterator(partition_map_type* map) : map_(map)
{
}
reference
operator*() const
{
return *mit_;
}
pointer
operator->() const
{
return &(*mit_);
}
void
inc()
{
++mit_;
while (mit_ == ait_->end())
{
++ait_;
if (ait_ == map_->end())
return;
mit_ = ait_->begin();
}
}
// ++it
iterator&
operator++()
{
inc();
return *this;
}
// it++
iterator
operator++(int)
{
iterator tmp(*this);
inc();
return tmp;
}
friend bool
operator==(iterator const& lhs, iterator const& rhs)
{
return lhs.map_ == rhs.map_ && lhs.ait_ == rhs.ait_ &&
lhs.mit_ == rhs.mit_;
}
friend bool
operator!=(iterator const& lhs, iterator const& rhs)
{
return !(lhs == rhs);
}
};
struct const_iterator
{
using iterator_category = std::forward_iterator_tag;
partition_map_type* map_{nullptr};
typename partition_map_type::iterator ait_;
typename map_type::iterator mit_;
const_iterator() = default;
const_iterator(partition_map_type* map) : map_(map)
{
}
const_iterator(iterator const& orig)
{
map_ = orig.map_;
ait_ = orig.ait_;
mit_ = orig.mit_;
}
const_reference
operator*() const
{
return *mit_;
}
const_pointer
operator->() const
{
return &(*mit_);
}
void
inc()
{
++mit_;
while (mit_ == ait_->end())
{
++ait_;
if (ait_ == map_->end())
return;
mit_ = ait_->begin();
}
}
// ++it
const_iterator&
operator++()
{
inc();
return *this;
}
// it++
const_iterator
operator++(int)
{
const_iterator tmp(*this);
inc();
return tmp;
}
friend bool
operator==(const_iterator const& lhs, const_iterator const& rhs)
{
return lhs.map_ == rhs.map_ && lhs.ait_ == rhs.ait_ &&
lhs.mit_ == rhs.mit_;
}
friend bool
operator!=(const_iterator const& lhs, const_iterator const& rhs)
{
return !(lhs == rhs);
}
};
private:
std::size_t
partitioner(Key const& key) const
{
return ripple::partitioner(key, partitions_);
}
template <class T>
static void
end(T& it)
{
it.ait_ = it.map_->end();
it.mit_ = it.map_->back().end();
}
template <class T>
static void
begin(T& it)
{
for (it.ait_ = it.map_->begin(); it.ait_ != it.map_->end(); ++it.ait_)
{
if (it.ait_->begin() == it.ait_->end())
continue;
it.mit_ = it.ait_->begin();
return;
}
end(it);
}
public:
partitioned_unordered_map(
std::optional<std::size_t> partitions = std::nullopt)
{
// Set partitions to the number of hardware threads if the parameter
// is either empty or set to 0.
partitions_ = partitions && *partitions
? *partitions
: std::thread::hardware_concurrency();
map_.resize(partitions_);
assert(partitions_);
}
std::size_t
partitions() const
{
return partitions_;
}
partition_map_type&
map()
{
return map_;
}
iterator
begin()
{
iterator it(&map_);
begin(it);
return it;
}
const_iterator
cbegin() const
{
const_iterator it(&map_);
begin(it);
return it;
}
const_iterator
begin() const
{
return cbegin();
}
iterator
end()
{
iterator it(&map_);
end(it);
return it;
}
const_iterator
cend() const
{
const_iterator it(&map_);
end(it);
return it;
}
const_iterator
end() const
{
return cend();
}
private:
template <class T>
void
find(key_type const& key, T& it) const
{
it.ait_ = it.map_->begin() + partitioner(key);
it.mit_ = it.ait_->find(key);
if (it.mit_ == it.ait_->end())
end(it);
}
public:
iterator
find(key_type const& key)
{
iterator it(&map_);
find(key, it);
return it;
}
const_iterator
find(key_type const& key) const
{
const_iterator it(&map_);
find(key, it);
return it;
}
template <class T, class U>
std::pair<iterator, bool>
emplace(std::piecewise_construct_t const&, T&& keyTuple, U&& valueTuple)
{
auto const& key = std::get<0>(keyTuple);
iterator it(&map_);
it.ait_ = it.map_->begin() + partitioner(key);
auto [eit, inserted] = it.ait_->emplace(
std::piecewise_construct,
std::forward<T>(keyTuple),
std::forward<U>(valueTuple));
it.mit_ = eit;
return {it, inserted};
}
template <class T, class U>
std::pair<iterator, bool>
emplace(T&& key, U&& val)
{
iterator it(&map_);
it.ait_ = it.map_->begin() + partitioner(key);
auto [eit, inserted] =
it.ait_->emplace(std::forward<T>(key), std::forward<U>(val));
it.mit_ = eit;
return {it, inserted};
}
void
clear()
{
for (auto& p : map_)
p.clear();
}
iterator
erase(const_iterator position)
{
iterator it(&map_);
it.ait_ = position.ait_;
it.mit_ = position.ait_->erase(position.mit_);
while (it.mit_ == it.ait_->end())
{
++it.ait_;
if (it.ait_ == it.map_->end())
break;
it.mit_ = it.ait_->begin();
}
return it;
}
std::size_t
size() const
{
std::size_t ret = 0;
for (auto& p : map_)
ret += p.size();
return ret;
}
Value&
operator[](Key const& key)
{
return map_[partitioner(key)][key];
}
private:
mutable partition_map_type map_{};
};
} // namespace ripple
#endif // RIPPLE_BASICS_PARTITIONED_UNORDERED_MAP_H

View File

@@ -666,6 +666,7 @@ Consensus<Adaptor>::startRoundInternal(
ConsensusMode mode)
{
phase_ = ConsensusPhase::open;
JLOG(j_.debug()) << "transitioned to ConsensusPhase::open";
mode_.set(mode, adaptor_);
now_ = now;
prevLedgerID_ = prevLedgerID;
@@ -1290,6 +1291,7 @@ Consensus<Adaptor>::phaseEstablish()
prevProposers_ = currPeerPositions_.size();
prevRoundTime_ = result_->roundTime.read();
phase_ = ConsensusPhase::accepted;
JLOG(j_.debug()) << "transitioned to ConsensusPhase::accepted";
adaptor_.onAccept(
*result_,
previousLedger_,
@@ -1307,6 +1309,7 @@ Consensus<Adaptor>::closeLedger()
assert(!result_);
phase_ = ConsensusPhase::establish;
JLOG(j_.debug()) << "transitioned to ConsensusPhase::establish";
rawCloseTimes_.self = now_;
result_.emplace(adaptor_.onClose(previousLedger_, now_, mode_.get()));

View File

@@ -722,15 +722,18 @@ public:
validationSET_EXPIRES ago and were not asked to keep.
*/
void
expire()
expire(beast::Journal& j)
{
auto const start = std::chrono::steady_clock::now();
{
std::lock_guard lock{mutex_};
if (toKeep_)
{
// We only need to refresh the keep range when it's just about to
// expire. Track the next time we need to refresh.
// We only need to refresh the keep range when it's just about
// to expire. Track the next time we need to refresh.
static std::chrono::steady_clock::time_point refreshTime;
if (auto const now = byLedger_.clock().now(); refreshTime <= now)
if (auto const now = byLedger_.clock().now();
refreshTime <= now)
{
// The next refresh time is shortly before the expiration
// time from now.
@@ -742,7 +745,8 @@ public:
auto const& validationMap = i->second;
if (!validationMap.empty())
{
auto const seq = validationMap.begin()->second.seq();
auto const seq =
validationMap.begin()->second.seq();
if (toKeep_->low_ <= seq && seq < toKeep_->high_)
{
byLedger_.touch(i);
@@ -750,9 +754,11 @@ public:
}
}
for (auto i = bySequence_.begin(); i != bySequence_.end(); ++i)
for (auto i = bySequence_.begin(); i != bySequence_.end();
++i)
{
if (toKeep_->low_ <= i->first && i->first < toKeep_->high_)
if (toKeep_->low_ <= i->first &&
i->first < toKeep_->high_)
{
bySequence_.touch(i);
}
@@ -763,6 +769,13 @@ public:
beast::expire(byLedger_, parms_.validationSET_EXPIRES);
beast::expire(bySequence_, parms_.validationSET_EXPIRES);
}
JLOG(j.debug())
<< "Validations sets sweep lock duration "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start)
.count()
<< "ms";
}
/** Update trust status of validations

View File

@@ -20,90 +20,12 @@
#ifndef RIPPLE_LEDGER_CACHEDSLES_H_INCLUDED
#define RIPPLE_LEDGER_CACHEDSLES_H_INCLUDED
#include <ripple/basics/chrono.h>
#include <ripple/beast/container/aged_unordered_map.h>
#include <ripple/basics/TaggedCache.h>
#include <ripple/basics/base_uint.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <memory>
#include <mutex>
namespace ripple {
using CachedSLEs = TaggedCache<uint256, SLE const>;
}
/** Caches SLEs by their digest. */
class CachedSLEs
{
public:
using digest_type = uint256;
using value_type = std::shared_ptr<SLE const>;
CachedSLEs(CachedSLEs const&) = delete;
CachedSLEs&
operator=(CachedSLEs const&) = delete;
template <class Rep, class Period>
CachedSLEs(
std::chrono::duration<Rep, Period> const& timeToLive,
Stopwatch& clock)
: timeToLive_(timeToLive), map_(clock)
{
}
/** Discard expired entries.
Needs to be called periodically.
*/
void
expire();
/** Fetch an item from the cache.
If the digest was not found, Handler
will be called with this signature:
std::shared_ptr<SLE const>(void)
*/
template <class Handler>
value_type
fetch(digest_type const& digest, Handler const& h)
{
{
std::lock_guard lock(mutex_);
auto iter = map_.find(digest);
if (iter != map_.end())
{
++hit_;
map_.touch(iter);
return iter->second;
}
}
auto sle = h();
if (!sle)
return nullptr;
std::lock_guard lock(mutex_);
++miss_;
auto const [it, inserted] = map_.emplace(digest, std::move(sle));
if (!inserted)
map_.touch(it);
return it->second;
}
/** Returns the fraction of cache hits. */
double
rate() const;
private:
std::size_t hit_ = 0;
std::size_t miss_ = 0;
std::mutex mutable mutex_;
Stopwatch::duration timeToLive_;
beast::aged_unordered_map<
digest_type,
value_type,
Stopwatch::clock_type,
hardened_hash<strong_hash>>
map_;
};
} // namespace ripple
#endif
#endif // RIPPLE_LEDGER_CACHEDSLES_H_INCLUDED

View File

@@ -1,57 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/ledger/CachedSLEs.h>
#include <vector>
namespace ripple {
void
CachedSLEs::expire()
{
std::vector<std::shared_ptr<void const>> trash;
{
auto const expireTime = map_.clock().now() - timeToLive_;
std::lock_guard lock(mutex_);
for (auto iter = map_.chronological.begin();
iter != map_.chronological.end();
++iter)
{
if (iter.when() > expireTime)
break;
if (iter->second.unique())
{
trash.emplace_back(std::move(iter->second));
iter = map_.erase(iter);
}
}
}
}
double
CachedSLEs::rate() const
{
std::lock_guard lock(mutex_);
auto const tot = hit_ + miss_;
if (tot == 0)
return 0;
return double(hit_) / tot;
}
} // namespace ripple

View File

@@ -20,7 +20,6 @@
#ifndef RIPPLE_NODESTORE_DATABASE_H_INCLUDED
#define RIPPLE_NODESTORE_DATABASE_H_INCLUDED
#include <ripple/basics/KeyCache.h>
#include <ripple/basics/TaggedCache.h>
#include <ripple/nodestore/Backend.h>
#include <ripple/nodestore/NodeObject.h>

View File

@@ -23,6 +23,7 @@
#include <ripple/app/ledger/Ledger.h>
#include <ripple/app/rdb/RelationalDBInterface.h>
#include <ripple/basics/BasicConfig.h>
#include <ripple/basics/KeyCache.h>
#include <ripple/basics/MathUtilities.h>
#include <ripple/basics/RangeSet.h>
#include <ripple/core/DatabaseCon.h>
@@ -39,7 +40,7 @@ namespace ripple {
namespace NodeStore {
using PCache = TaggedCache<uint256, NodeObject>;
using NCache = KeyCache<uint256>;
using NCache = KeyCache;
class DatabaseShard;
/* A range of historical ledgers backed by a node store.

View File

@@ -21,8 +21,10 @@
#define RIPPLE_SHAMAP_FULLBELOWCACHE_H_INCLUDED
#include <ripple/basics/KeyCache.h>
#include <ripple/basics/TaggedCache.h>
#include <ripple/basics/base_uint.h>
#include <ripple/beast/insight/Collector.h>
#include <ripple/beast/utility/Journal.h>
#include <atomic>
#include <string>
@@ -33,17 +35,15 @@ namespace detail {
/** Remembers which tree keys have all descendants resident.
This optimizes the process of acquiring a complete tree.
*/
template <class Key>
class BasicFullBelowCache
{
private:
using CacheType = KeyCache<Key>;
using CacheType = KeyCache;
public:
enum { defaultCacheTargetSize = 0 };
using key_type = Key;
using size_type = typename CacheType::size_type;
using key_type = uint256;
using clock_type = typename CacheType::clock_type;
/** Construct the cache.
@@ -56,11 +56,12 @@ public:
BasicFullBelowCache(
std::string const& name,
clock_type& clock,
beast::Journal j,
beast::insight::Collector::ptr const& collector =
beast::insight::NullCollector::New(),
std::size_t target_size = defaultCacheTargetSize,
std::chrono::seconds expiration = std::chrono::minutes{2})
: m_cache(name, clock, collector, target_size, expiration), m_gen(1)
: m_cache(name, target_size, expiration, clock, j, collector), m_gen(1)
{
}
@@ -75,7 +76,7 @@ public:
Thread safety:
Safe to call from any thread.
*/
size_type
std::size_t
size() const
{
return m_cache.size();
@@ -138,13 +139,13 @@ public:
}
private:
KeyCache<Key> m_cache;
CacheType m_cache;
std::atomic<std::uint32_t> m_gen;
};
} // namespace detail
using FullBelowCache = detail::BasicFullBelowCache<uint256>;
using FullBelowCache = detail::BasicFullBelowCache;
} // namespace ripple

View File

@@ -21,6 +21,7 @@
#define RIPPLE_SHAMAP_SHAMAPTREENODE_H_INCLUDED
#include <ripple/basics/CountedObject.h>
#include <ripple/basics/SHAMapHash.h>
#include <ripple/basics/TaggedCache.h>
#include <ripple/beast/utility/Journal.h>
#include <ripple/protocol/Serializer.h>
@@ -42,88 +43,6 @@ static constexpr unsigned char const wireTypeInner = 2;
static constexpr unsigned char const wireTypeCompressedInner = 3;
static constexpr unsigned char const wireTypeTransactionWithMeta = 4;
// A SHAMapHash is the hash of a node in a SHAMap, and also the
// type of the hash of the entire SHAMap.
class SHAMapHash
{
uint256 hash_;
public:
SHAMapHash() = default;
explicit SHAMapHash(uint256 const& hash) : hash_(hash)
{
}
uint256 const&
as_uint256() const
{
return hash_;
}
uint256&
as_uint256()
{
return hash_;
}
bool
isZero() const
{
return hash_.isZero();
}
bool
isNonZero() const
{
return hash_.isNonZero();
}
int
signum() const
{
return hash_.signum();
}
void
zero()
{
hash_.zero();
}
friend bool
operator==(SHAMapHash const& x, SHAMapHash const& y)
{
return x.hash_ == y.hash_;
}
friend bool
operator<(SHAMapHash const& x, SHAMapHash const& y)
{
return x.hash_ < y.hash_;
}
friend std::ostream&
operator<<(std::ostream& os, SHAMapHash const& x)
{
return os << x.hash_;
}
friend std::string
to_string(SHAMapHash const& x)
{
return to_string(x.hash_);
}
template <class H>
friend void
hash_append(H& h, SHAMapHash const& x)
{
hash_append(h, x.hash_);
}
};
inline bool
operator!=(SHAMapHash const& x, SHAMapHash const& y)
{
return !(x == y);
}
enum class SHAMapNodeType {
tnINNER = 1,
tnTRANSACTION_NM = 2, // transaction, no metadata

View File

@@ -31,6 +31,7 @@ NodeFamily::NodeFamily(Application& app, CollectorManager& cm)
, fbCache_(std::make_shared<FullBelowCache>(
"Node family full below cache",
stopwatch(),
app.journal("NodeFamilyFulLBelowCache"),
cm.collector(),
fullBelowTargetSize,
fullBelowExpiration))

View File

@@ -55,6 +55,7 @@ ShardFamily::getFullBelowCache(std::uint32_t ledgerSeq)
auto fbCache{std::make_shared<FullBelowCache>(
"Shard family full below cache shard " + std::to_string(shardIndex),
stopwatch(),
j_,
cm_.collector(),
fullBelowTargetSize,
fullBelowExpiration)};

View File

@@ -17,10 +17,13 @@
*/
//==============================================================================
#include <ripple/basics/KeyCache.h>
#include <ripple/basics/TaggedCache.h>
#include <ripple/basics/chrono.h>
#include <ripple/beast/clock/manual_clock.h>
#include <ripple/beast/unit_test.h>
#include <ripple/beast/utility/Journal.h>
#include <ripple/protocol/Protocol.h>
#include <test/unit_test/SuiteJournal.h>
namespace ripple {
@@ -35,32 +38,31 @@ public:
clock.set(0);
using Key = std::string;
using Cache = KeyCache<Key>;
using Cache = TaggedCache<Key, int, true>;
test::SuiteJournal j("KeyCacheTest", *this);
// Insert an item, retrieve it, and age it so it gets purged.
{
Cache c("test", clock, 1, 2s);
Cache c("test", LedgerIndex(1), 2s, clock, j);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(c.insert("one"));
BEAST_EXPECT(!c.insert("one"));
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.exists("one"));
BEAST_EXPECT(c.touch_if_exists("one"));
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.exists("one"));
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(!c.exists("one"));
BEAST_EXPECT(!c.touch_if_exists("one"));
}
// Insert two items, have one expire
{
Cache c("test", clock, 2, 2s);
Cache c("test", LedgerIndex(2), 2s, clock, j);
BEAST_EXPECT(c.insert("one"));
BEAST_EXPECT(c.size() == 1);
@@ -73,12 +75,11 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.size() == 1);
BEAST_EXPECT(c.exists("two"));
}
// Insert three items (1 over limit), sweep
{
Cache c("test", clock, 2, 3s);
Cache c("test", LedgerIndex(2), 3s, clock, j);
BEAST_EXPECT(c.insert("one"));
++clock;

View File

@@ -21,6 +21,7 @@
#include <ripple/basics/chrono.h>
#include <ripple/beast/clock/manual_clock.h>
#include <ripple/beast/unit_test.h>
#include <ripple/protocol/Protocol.h>
#include <test/unit_test/SuiteJournal.h>
namespace ripple {
@@ -48,7 +49,7 @@ public:
TestStopwatch clock;
clock.set(0);
using Key = int;
using Key = LedgerIndex;
using Value = std::string;
using Cache = TaggedCache<Key, Value>;

View File

@@ -21,9 +21,9 @@
#include <ripple/beast/clock/manual_clock.h>
#include <ripple/beast/unit_test.h>
#include <ripple/consensus/Validations.h>
#include <test/csf/Validation.h>
#include <memory>
#include <test/csf/Validation.h>
#include <test/unit_test/SuiteJournal.h>
#include <tuple>
#include <type_traits>
#include <vector>
@@ -703,6 +703,7 @@ class Validations_test : public beast::unit_test::suite
{
// Verify expiring clears out validations stored by ledger
testcase("Expire validations");
SuiteJournal j("Validations_test", *this);
LedgerHistoryHelper h;
TestHarness harness(h.oracle);
Node const a = harness.makeNode();
@@ -713,10 +714,10 @@ class Validations_test : public beast::unit_test::suite
Ledger const ledgerA = h["a"];
BEAST_EXPECT(ValStatus::current == harness.add(a.validate(ledgerA)));
BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerA.id()) == 1);
harness.vals().expire();
harness.vals().expire(j);
BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerA.id()) == 1);
harness.clock().advance(harness.parms().validationSET_EXPIRES);
harness.vals().expire();
harness.vals().expire(j);
BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerA.id()) == 0);
// use setSeqToKeep to keep the validation from expire
@@ -725,7 +726,7 @@ class Validations_test : public beast::unit_test::suite
BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerB.id()) == 1);
harness.vals().setSeqToKeep(ledgerB.seq(), ledgerB.seq() + one);
harness.clock().advance(harness.parms().validationSET_EXPIRES);
harness.vals().expire();
harness.vals().expire(j);
BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerB.id()) == 1);
// change toKeep
harness.vals().setSeqToKeep(ledgerB.seq() + one, ledgerB.seq() + two);
@@ -736,7 +737,7 @@ class Validations_test : public beast::unit_test::suite
for (int i = 0; i < loops; ++i)
{
harness.clock().advance(harness.parms().validationFRESHNESS);
harness.vals().expire();
harness.vals().expire(j);
}
BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerB.id()) == 0);
@@ -746,7 +747,7 @@ class Validations_test : public beast::unit_test::suite
BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerC.id()) == 1);
harness.vals().setSeqToKeep(ledgerC.seq() - one, ledgerC.seq());
harness.clock().advance(harness.parms().validationSET_EXPIRES);
harness.vals().expire();
harness.vals().expire(j);
BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerC.id()) == 0);
}

View File

@@ -919,7 +919,7 @@ struct Peer
start()
{
// TODO: Expire validations less frequently?
validations.expire();
validations.expire(j);
scheduler.in(parms().ledgerGRANULARITY, [&]() { timerEntry(); });
startRound();
}

View File

@@ -46,7 +46,8 @@ public:
TestNodeFamily(beast::Journal j)
: fbCache_(std::make_shared<FullBelowCache>(
"App family full below cache",
clock_))
clock_,
j))
, tnCache_(std::make_shared<TreeNodeCache>(
"App family tree node cache",
65536,