mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-19 18:45:52 +00:00
Compare commits
8 Commits
a1q123456/
...
vlntb/lock
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab0a72ad93 | ||
|
|
83ae5e3050 | ||
|
|
56f5189a2b | ||
|
|
da694c8304 | ||
|
|
d0f836581b | ||
|
|
984c70955a | ||
|
|
3effb54e49 | ||
|
|
316f9535e3 |
@@ -21,7 +21,6 @@
|
|||||||
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
|
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
|
||||||
|
|
||||||
#include <xrpl/basics/base_uint.h>
|
#include <xrpl/basics/base_uint.h>
|
||||||
#include <xrpl/basics/partitioned_unordered_map.h>
|
|
||||||
|
|
||||||
#include <ostream>
|
#include <ostream>
|
||||||
|
|
||||||
|
|||||||
@@ -170,9 +170,6 @@ public:
|
|||||||
bool
|
bool
|
||||||
retrieve(key_type const& key, T& data);
|
retrieve(key_type const& key, T& data);
|
||||||
|
|
||||||
mutex_type&
|
|
||||||
peekMutex();
|
|
||||||
|
|
||||||
std::vector<key_type>
|
std::vector<key_type>
|
||||||
getKeys() const;
|
getKeys() const;
|
||||||
|
|
||||||
@@ -193,11 +190,14 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
SharedPointerType
|
SharedPointerType
|
||||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
|
initialFetch(key_type const& key);
|
||||||
|
|
||||||
void
|
void
|
||||||
collect_metrics();
|
collect_metrics();
|
||||||
|
|
||||||
|
Mutex&
|
||||||
|
lockPartition(key_type const& key) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct Stats
|
struct Stats
|
||||||
{
|
{
|
||||||
@@ -300,8 +300,8 @@ private:
|
|||||||
[[maybe_unused]] clock_type::time_point const& now,
|
[[maybe_unused]] clock_type::time_point const& now,
|
||||||
typename KeyValueCacheType::map_type& partition,
|
typename KeyValueCacheType::map_type& partition,
|
||||||
SweptPointersVector& stuffToSweep,
|
SweptPointersVector& stuffToSweep,
|
||||||
std::atomic<int>& allRemovals,
|
std::atomic<int>& allRemoval,
|
||||||
std::lock_guard<std::recursive_mutex> const&);
|
Mutex& partitionLock);
|
||||||
|
|
||||||
[[nodiscard]] std::thread
|
[[nodiscard]] std::thread
|
||||||
sweepHelper(
|
sweepHelper(
|
||||||
@@ -310,14 +310,12 @@ private:
|
|||||||
typename KeyOnlyCacheType::map_type& partition,
|
typename KeyOnlyCacheType::map_type& partition,
|
||||||
SweptPointersVector&,
|
SweptPointersVector&,
|
||||||
std::atomic<int>& allRemovals,
|
std::atomic<int>& allRemovals,
|
||||||
std::lock_guard<std::recursive_mutex> const&);
|
Mutex& partitionLock);
|
||||||
|
|
||||||
beast::Journal m_journal;
|
beast::Journal m_journal;
|
||||||
clock_type& m_clock;
|
clock_type& m_clock;
|
||||||
Stats m_stats;
|
Stats m_stats;
|
||||||
|
|
||||||
mutex_type mutable m_mutex;
|
|
||||||
|
|
||||||
// Used for logging
|
// Used for logging
|
||||||
std::string m_name;
|
std::string m_name;
|
||||||
|
|
||||||
@@ -328,10 +326,11 @@ private:
|
|||||||
clock_type::duration const m_target_age;
|
clock_type::duration const m_target_age;
|
||||||
|
|
||||||
// Number of items cached
|
// Number of items cached
|
||||||
int m_cache_count;
|
std::atomic<int> m_cache_count;
|
||||||
cache_type m_cache; // Hold strong reference to recent objects
|
cache_type m_cache; // Hold strong reference to recent objects
|
||||||
std::uint64_t m_hits;
|
std::atomic<std::uint64_t> m_hits;
|
||||||
std::uint64_t m_misses;
|
std::atomic<std::uint64_t> m_misses;
|
||||||
|
mutable std::vector<mutex_type> partitionLocks_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ripple
|
} // namespace ripple
|
||||||
|
|||||||
@@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
#include <xrpl/basics/IntrusivePointer.ipp>
|
#include <xrpl/basics/IntrusivePointer.ipp>
|
||||||
#include <xrpl/basics/TaggedCache.h>
|
#include <xrpl/basics/TaggedCache.h>
|
||||||
|
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
|
|
||||||
@@ -60,6 +61,7 @@ inline TaggedCache<
|
|||||||
, m_hits(0)
|
, m_hits(0)
|
||||||
, m_misses(0)
|
, m_misses(0)
|
||||||
{
|
{
|
||||||
|
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -105,8 +107,13 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::size() const
|
Mutex>::size() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
std::size_t totalSize = 0;
|
||||||
return m_cache.size();
|
for (size_t i = 0; i < partitionLocks_.size(); ++i)
|
||||||
|
{
|
||||||
|
std::lock_guard<Mutex> lock(partitionLocks_[i]);
|
||||||
|
totalSize += m_cache.map()[i].size();
|
||||||
|
}
|
||||||
|
return totalSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -129,8 +136,7 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::getCacheSize() const
|
Mutex>::getCacheSize() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
return m_cache_count.load(std::memory_order_relaxed);
|
||||||
return m_cache_count;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -153,8 +159,7 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::getTrackSize() const
|
Mutex>::getTrackSize() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
return size();
|
||||||
return m_cache.size();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -177,9 +182,10 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::getHitRate()
|
Mutex>::getHitRate()
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
auto hits = m_hits.load(std::memory_order_relaxed);
|
||||||
auto const total = static_cast<float>(m_hits + m_misses);
|
auto misses = m_misses.load(std::memory_order_relaxed);
|
||||||
return m_hits * (100.0f / std::max(1.0f, total));
|
float total = float(hits + misses);
|
||||||
|
return hits * (100.0f / std::max(1.0f, total));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -202,9 +208,12 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::clear()
|
Mutex>::clear()
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
for (auto& mutex : partitionLocks_)
|
||||||
|
mutex.lock();
|
||||||
m_cache.clear();
|
m_cache.clear();
|
||||||
m_cache_count = 0;
|
for (auto& mutex : partitionLocks_)
|
||||||
|
mutex.unlock();
|
||||||
|
m_cache_count.store(0, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -227,11 +236,14 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::reset()
|
Mutex>::reset()
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
for (auto& mutex : partitionLocks_)
|
||||||
|
mutex.lock();
|
||||||
m_cache.clear();
|
m_cache.clear();
|
||||||
m_cache_count = 0;
|
for (auto& mutex : partitionLocks_)
|
||||||
m_hits = 0;
|
mutex.unlock();
|
||||||
m_misses = 0;
|
m_cache_count.store(0, std::memory_order_relaxed);
|
||||||
|
m_hits.store(0, std::memory_order_relaxed);
|
||||||
|
m_misses.store(0, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -255,7 +267,7 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::touch_if_exists(KeyComparable const& key)
|
Mutex>::touch_if_exists(KeyComparable const& key)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||||
auto const iter(m_cache.find(key));
|
auto const iter(m_cache.find(key));
|
||||||
if (iter == m_cache.end())
|
if (iter == m_cache.end())
|
||||||
{
|
{
|
||||||
@@ -297,8 +309,6 @@ TaggedCache<
|
|||||||
|
|
||||||
auto const start = std::chrono::steady_clock::now();
|
auto const start = std::chrono::steady_clock::now();
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
|
||||||
|
|
||||||
if (m_target_size == 0 ||
|
if (m_target_size == 0 ||
|
||||||
(static_cast<int>(m_cache.size()) <= m_target_size))
|
(static_cast<int>(m_cache.size()) <= m_target_size))
|
||||||
{
|
{
|
||||||
@@ -330,12 +340,13 @@ TaggedCache<
|
|||||||
m_cache.map()[p],
|
m_cache.map()[p],
|
||||||
allStuffToSweep[p],
|
allStuffToSweep[p],
|
||||||
allRemovals,
|
allRemovals,
|
||||||
lock));
|
partitionLocks_[p]));
|
||||||
}
|
}
|
||||||
for (std::thread& worker : workers)
|
for (std::thread& worker : workers)
|
||||||
worker.join();
|
worker.join();
|
||||||
|
|
||||||
m_cache_count -= allRemovals;
|
int removals = allRemovals.load(std::memory_order_relaxed);
|
||||||
|
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
// At this point allStuffToSweep will go out of scope outside the lock
|
// At this point allStuffToSweep will go out of scope outside the lock
|
||||||
// and decrement the reference count on each strong pointer.
|
// and decrement the reference count on each strong pointer.
|
||||||
@@ -369,7 +380,8 @@ TaggedCache<
|
|||||||
{
|
{
|
||||||
// Remove from cache, if !valid, remove from map too. Returns true if
|
// Remove from cache, if !valid, remove from map too. Returns true if
|
||||||
// removed from cache
|
// removed from cache
|
||||||
std::lock_guard lock(m_mutex);
|
|
||||||
|
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||||
|
|
||||||
auto cit = m_cache.find(key);
|
auto cit = m_cache.find(key);
|
||||||
|
|
||||||
@@ -382,7 +394,7 @@ TaggedCache<
|
|||||||
|
|
||||||
if (entry.isCached())
|
if (entry.isCached())
|
||||||
{
|
{
|
||||||
--m_cache_count;
|
m_cache_count.fetch_sub(1, std::memory_order_relaxed);
|
||||||
entry.ptr.convertToWeak();
|
entry.ptr.convertToWeak();
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
@@ -420,17 +432,16 @@ TaggedCache<
|
|||||||
{
|
{
|
||||||
// Return canonical value, store if needed, refresh in cache
|
// Return canonical value, store if needed, refresh in cache
|
||||||
// Return values: true=we had the data already
|
// Return values: true=we had the data already
|
||||||
std::lock_guard lock(m_mutex);
|
|
||||||
|
|
||||||
|
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||||
auto cit = m_cache.find(key);
|
auto cit = m_cache.find(key);
|
||||||
|
|
||||||
if (cit == m_cache.end())
|
if (cit == m_cache.end())
|
||||||
{
|
{
|
||||||
m_cache.emplace(
|
m_cache.emplace(
|
||||||
std::piecewise_construct,
|
std::piecewise_construct,
|
||||||
std::forward_as_tuple(key),
|
std::forward_as_tuple(key),
|
||||||
std::forward_as_tuple(m_clock.now(), data));
|
std::forward_as_tuple(m_clock.now(), data));
|
||||||
++m_cache_count;
|
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -479,12 +490,12 @@ TaggedCache<
|
|||||||
data = cachedData;
|
data = cachedData;
|
||||||
}
|
}
|
||||||
|
|
||||||
++m_cache_count;
|
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
entry.ptr = data;
|
entry.ptr = data;
|
||||||
++m_cache_count;
|
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -560,10 +571,11 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::fetch(key_type const& key)
|
Mutex>::fetch(key_type const& key)
|
||||||
{
|
{
|
||||||
std::lock_guard<mutex_type> l(m_mutex);
|
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||||
auto ret = initialFetch(key, l);
|
|
||||||
|
auto ret = initialFetch(key);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
++m_misses;
|
m_misses.fetch_add(1, std::memory_order_relaxed);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -627,8 +639,8 @@ TaggedCache<
|
|||||||
Mutex>::insert(key_type const& key)
|
Mutex>::insert(key_type const& key)
|
||||||
-> std::enable_if_t<IsKeyCache, ReturnType>
|
-> std::enable_if_t<IsKeyCache, ReturnType>
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
|
||||||
clock_type::time_point const now(m_clock.now());
|
clock_type::time_point const now(m_clock.now());
|
||||||
|
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||||
auto [it, inserted] = m_cache.emplace(
|
auto [it, inserted] = m_cache.emplace(
|
||||||
std::piecewise_construct,
|
std::piecewise_construct,
|
||||||
std::forward_as_tuple(key),
|
std::forward_as_tuple(key),
|
||||||
@@ -668,29 +680,6 @@ TaggedCache<
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
|
||||||
class Key,
|
|
||||||
class T,
|
|
||||||
bool IsKeyCache,
|
|
||||||
class SharedWeakUnionPointer,
|
|
||||||
class SharedPointerType,
|
|
||||||
class Hash,
|
|
||||||
class KeyEqual,
|
|
||||||
class Mutex>
|
|
||||||
inline auto
|
|
||||||
TaggedCache<
|
|
||||||
Key,
|
|
||||||
T,
|
|
||||||
IsKeyCache,
|
|
||||||
SharedWeakUnionPointer,
|
|
||||||
SharedPointerType,
|
|
||||||
Hash,
|
|
||||||
KeyEqual,
|
|
||||||
Mutex>::peekMutex() -> mutex_type&
|
|
||||||
{
|
|
||||||
return m_mutex;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <
|
template <
|
||||||
class Key,
|
class Key,
|
||||||
class T,
|
class T,
|
||||||
@@ -714,10 +703,13 @@ TaggedCache<
|
|||||||
std::vector<key_type> v;
|
std::vector<key_type> v;
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
|
||||||
v.reserve(m_cache.size());
|
v.reserve(m_cache.size());
|
||||||
for (auto const& _ : m_cache)
|
for (std::size_t i = 0; i < partitionLocks_.size(); ++i)
|
||||||
v.push_back(_.first);
|
{
|
||||||
|
std::lock_guard<Mutex> lock(partitionLocks_[i]);
|
||||||
|
for (auto const& entry : m_cache.map()[i])
|
||||||
|
v.push_back(entry.first);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return v;
|
return v;
|
||||||
@@ -743,11 +735,12 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::rate() const
|
Mutex>::rate() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
auto hits = m_hits.load(std::memory_order_relaxed);
|
||||||
auto const tot = m_hits + m_misses;
|
auto misses = m_misses.load(std::memory_order_relaxed);
|
||||||
|
auto const tot = hits + misses;
|
||||||
if (tot == 0)
|
if (tot == 0)
|
||||||
return 0;
|
return 0.0;
|
||||||
return double(m_hits) / tot;
|
return double(hits) / tot;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -771,18 +764,16 @@ TaggedCache<
|
|||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::fetch(key_type const& digest, Handler const& h)
|
Mutex>::fetch(key_type const& digest, Handler const& h)
|
||||||
{
|
{
|
||||||
{
|
std::lock_guard<Mutex> lock(lockPartition(digest));
|
||||||
std::lock_guard l(m_mutex);
|
|
||||||
if (auto ret = initialFetch(digest, l))
|
if (auto ret = initialFetch(digest))
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
auto sle = h();
|
auto sle = h();
|
||||||
if (!sle)
|
if (!sle)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
std::lock_guard l(m_mutex);
|
m_misses.fetch_add(1, std::memory_order_relaxed);
|
||||||
++m_misses;
|
|
||||||
auto const [it, inserted] =
|
auto const [it, inserted] =
|
||||||
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
|
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
|
||||||
if (!inserted)
|
if (!inserted)
|
||||||
@@ -809,9 +800,10 @@ TaggedCache<
|
|||||||
SharedPointerType,
|
SharedPointerType,
|
||||||
Hash,
|
Hash,
|
||||||
KeyEqual,
|
KeyEqual,
|
||||||
Mutex>::
|
Mutex>::initialFetch(key_type const& key)
|
||||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
|
|
||||||
{
|
{
|
||||||
|
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||||
|
|
||||||
auto cit = m_cache.find(key);
|
auto cit = m_cache.find(key);
|
||||||
if (cit == m_cache.end())
|
if (cit == m_cache.end())
|
||||||
return {};
|
return {};
|
||||||
@@ -819,7 +811,7 @@ TaggedCache<
|
|||||||
Entry& entry = cit->second;
|
Entry& entry = cit->second;
|
||||||
if (entry.isCached())
|
if (entry.isCached())
|
||||||
{
|
{
|
||||||
++m_hits;
|
m_hits.fetch_add(1, std::memory_order_relaxed);
|
||||||
entry.touch(m_clock.now());
|
entry.touch(m_clock.now());
|
||||||
return entry.ptr.getStrong();
|
return entry.ptr.getStrong();
|
||||||
}
|
}
|
||||||
@@ -827,12 +819,13 @@ TaggedCache<
|
|||||||
if (entry.isCached())
|
if (entry.isCached())
|
||||||
{
|
{
|
||||||
// independent of cache size, so not counted as a hit
|
// independent of cache size, so not counted as a hit
|
||||||
++m_cache_count;
|
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||||
entry.touch(m_clock.now());
|
entry.touch(m_clock.now());
|
||||||
return entry.ptr.getStrong();
|
return entry.ptr.getStrong();
|
||||||
}
|
}
|
||||||
|
|
||||||
m_cache.erase(cit);
|
m_cache.erase(cit); // TODO: if this erase happens on fetch, what is left
|
||||||
|
// for a sweep?
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -861,10 +854,11 @@ TaggedCache<
|
|||||||
{
|
{
|
||||||
beast::insight::Gauge::value_type hit_rate(0);
|
beast::insight::Gauge::value_type hit_rate(0);
|
||||||
{
|
{
|
||||||
std::lock_guard lock(m_mutex);
|
auto const hits = m_hits.load(std::memory_order_relaxed);
|
||||||
auto const total(m_hits + m_misses);
|
auto const misses = m_misses.load(std::memory_order_relaxed);
|
||||||
|
auto const total = hits + misses;
|
||||||
if (total != 0)
|
if (total != 0)
|
||||||
hit_rate = (m_hits * 100) / total;
|
hit_rate = (hits * 100) / total;
|
||||||
}
|
}
|
||||||
m_stats.hit_rate.set(hit_rate);
|
m_stats.hit_rate.set(hit_rate);
|
||||||
}
|
}
|
||||||
@@ -895,12 +889,16 @@ TaggedCache<
|
|||||||
typename KeyValueCacheType::map_type& partition,
|
typename KeyValueCacheType::map_type& partition,
|
||||||
SweptPointersVector& stuffToSweep,
|
SweptPointersVector& stuffToSweep,
|
||||||
std::atomic<int>& allRemovals,
|
std::atomic<int>& allRemovals,
|
||||||
std::lock_guard<std::recursive_mutex> const&)
|
Mutex& partitionLock)
|
||||||
{
|
{
|
||||||
return std::thread([&, this]() {
|
return std::thread([&, this]() {
|
||||||
|
beast::setCurrentThreadName("sweep-1");
|
||||||
|
|
||||||
int cacheRemovals = 0;
|
int cacheRemovals = 0;
|
||||||
int mapRemovals = 0;
|
int mapRemovals = 0;
|
||||||
|
|
||||||
|
std::lock_guard<Mutex> lock(partitionLock);
|
||||||
|
|
||||||
// Keep references to all the stuff we sweep
|
// Keep references to all the stuff we sweep
|
||||||
// so that we can destroy them outside the lock.
|
// so that we can destroy them outside the lock.
|
||||||
stuffToSweep.reserve(partition.size());
|
stuffToSweep.reserve(partition.size());
|
||||||
@@ -984,12 +982,16 @@ TaggedCache<
|
|||||||
typename KeyOnlyCacheType::map_type& partition,
|
typename KeyOnlyCacheType::map_type& partition,
|
||||||
SweptPointersVector&,
|
SweptPointersVector&,
|
||||||
std::atomic<int>& allRemovals,
|
std::atomic<int>& allRemovals,
|
||||||
std::lock_guard<std::recursive_mutex> const&)
|
Mutex& partitionLock)
|
||||||
{
|
{
|
||||||
return std::thread([&, this]() {
|
return std::thread([&, this]() {
|
||||||
|
beast::setCurrentThreadName("sweep-2");
|
||||||
|
|
||||||
int cacheRemovals = 0;
|
int cacheRemovals = 0;
|
||||||
int mapRemovals = 0;
|
int mapRemovals = 0;
|
||||||
|
|
||||||
|
std::lock_guard<Mutex> lock(partitionLock);
|
||||||
|
|
||||||
// Keep references to all the stuff we sweep
|
// Keep references to all the stuff we sweep
|
||||||
// so that we can destroy them outside the lock.
|
// so that we can destroy them outside the lock.
|
||||||
{
|
{
|
||||||
@@ -1024,6 +1026,29 @@ TaggedCache<
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <
|
||||||
|
class Key,
|
||||||
|
class T,
|
||||||
|
bool IsKeyCache,
|
||||||
|
class SharedWeakUnionPointer,
|
||||||
|
class SharedPointerType,
|
||||||
|
class Hash,
|
||||||
|
class KeyEqual,
|
||||||
|
class Mutex>
|
||||||
|
inline Mutex&
|
||||||
|
TaggedCache<
|
||||||
|
Key,
|
||||||
|
T,
|
||||||
|
IsKeyCache,
|
||||||
|
SharedWeakUnionPointer,
|
||||||
|
SharedPointerType,
|
||||||
|
Hash,
|
||||||
|
KeyEqual,
|
||||||
|
Mutex>::lockPartition(key_type const& key) const
|
||||||
|
{
|
||||||
|
return partitionLocks_[m_cache.partition_index(key)];
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ripple
|
} // namespace ripple
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -277,6 +277,12 @@ public:
|
|||||||
return map_;
|
return map_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
partition_map_type const&
|
||||||
|
map() const
|
||||||
|
{
|
||||||
|
return map_;
|
||||||
|
}
|
||||||
|
|
||||||
iterator
|
iterator
|
||||||
begin()
|
begin()
|
||||||
{
|
{
|
||||||
@@ -321,6 +327,12 @@ public:
|
|||||||
return cend();
|
return cend();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::size_t
|
||||||
|
partition_index(key_type const& key) const
|
||||||
|
{
|
||||||
|
return partitioner(key);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <class T>
|
template <class T>
|
||||||
void
|
void
|
||||||
@@ -380,7 +392,7 @@ public:
|
|||||||
clear()
|
clear()
|
||||||
{
|
{
|
||||||
for (auto& p : map_)
|
for (auto& p : map_)
|
||||||
p.clear();
|
p.clear(); // TODO make sure that it is locked inside
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator
|
iterator
|
||||||
@@ -406,7 +418,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::size_t ret = 0;
|
std::size_t ret = 0;
|
||||||
for (auto& p : map_)
|
for (auto& p : map_)
|
||||||
ret += p.size();
|
ret += p.size(); // TODO make sure that it is locked inside
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,6 @@
|
|||||||
|
|
||||||
#include <xrpl/basics/ByteUtilities.h>
|
#include <xrpl/basics/ByteUtilities.h>
|
||||||
#include <xrpl/basics/base_uint.h>
|
#include <xrpl/basics/base_uint.h>
|
||||||
#include <xrpl/basics/partitioned_unordered_map.h>
|
|
||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
|
||||||
|
|||||||
@@ -63,8 +63,8 @@ LedgerHistory::insert(
|
|||||||
ledger->stateMap().getHash().isNonZero(),
|
ledger->stateMap().getHash().isNonZero(),
|
||||||
"ripple::LedgerHistory::insert : nonzero hash");
|
"ripple::LedgerHistory::insert : nonzero hash");
|
||||||
|
|
||||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
// TODOL merge the below into a single call to avoid lock and race
|
||||||
|
// conditions, i.e. - return alreadyHad on assignment somehow.
|
||||||
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
|
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
|
||||||
ledger->info().hash, ledger);
|
ledger->info().hash, ledger);
|
||||||
if (validated)
|
if (validated)
|
||||||
@@ -76,7 +76,7 @@ LedgerHistory::insert(
|
|||||||
LedgerHash
|
LedgerHash
|
||||||
LedgerHistory::getLedgerHash(LedgerIndex index)
|
LedgerHistory::getLedgerHash(LedgerIndex index)
|
||||||
{
|
{
|
||||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
// TODO: is it safe to get iterator without lock here?
|
||||||
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
|
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
|
||||||
return it->second;
|
return it->second;
|
||||||
return {};
|
return {};
|
||||||
@@ -86,13 +86,12 @@ std::shared_ptr<Ledger const>
|
|||||||
LedgerHistory::getLedgerBySeq(LedgerIndex index)
|
LedgerHistory::getLedgerBySeq(LedgerIndex index)
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
// TODO: this lock is not needed
|
||||||
auto it = mLedgersByIndex.find(index);
|
auto it = mLedgersByIndex.find(index);
|
||||||
|
|
||||||
if (it != mLedgersByIndex.end())
|
if (it != mLedgersByIndex.end())
|
||||||
{
|
{
|
||||||
uint256 hash = it->second;
|
uint256 hash = it->second;
|
||||||
sl.unlock();
|
|
||||||
return getLedgerByHash(hash);
|
return getLedgerByHash(hash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -108,7 +107,8 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
|
|||||||
|
|
||||||
{
|
{
|
||||||
// Add this ledger to the local tracking by index
|
// Add this ledger to the local tracking by index
|
||||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
// std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
||||||
|
// TODO: make sure that canonicalize_replace_client lock the partition
|
||||||
|
|
||||||
XRPL_ASSERT(
|
XRPL_ASSERT(
|
||||||
ret->isImmutable(),
|
ret->isImmutable(),
|
||||||
@@ -458,7 +458,8 @@ LedgerHistory::builtLedger(
|
|||||||
XRPL_ASSERT(
|
XRPL_ASSERT(
|
||||||
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
|
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
|
||||||
|
|
||||||
std::unique_lock sl(m_consensus_validated.peekMutex());
|
// std::unique_lock sl(m_consensus_validated.peekMutex());
|
||||||
|
// TODO: make sure that canonicalize_replace_client lock the partition
|
||||||
|
|
||||||
auto entry = std::make_shared<cv_entry>();
|
auto entry = std::make_shared<cv_entry>();
|
||||||
m_consensus_validated.canonicalize_replace_client(index, entry);
|
m_consensus_validated.canonicalize_replace_client(index, entry);
|
||||||
@@ -500,7 +501,8 @@ LedgerHistory::validatedLedger(
|
|||||||
!hash.isZero(),
|
!hash.isZero(),
|
||||||
"ripple::LedgerHistory::validatedLedger : nonzero hash");
|
"ripple::LedgerHistory::validatedLedger : nonzero hash");
|
||||||
|
|
||||||
std::unique_lock sl(m_consensus_validated.peekMutex());
|
// std::unique_lock sl(m_consensus_validated.peekMutex());
|
||||||
|
// TODO: make sure that canonicalize_replace_client lock the partition
|
||||||
|
|
||||||
auto entry = std::make_shared<cv_entry>();
|
auto entry = std::make_shared<cv_entry>();
|
||||||
m_consensus_validated.canonicalize_replace_client(index, entry);
|
m_consensus_validated.canonicalize_replace_client(index, entry);
|
||||||
@@ -535,7 +537,9 @@ LedgerHistory::validatedLedger(
|
|||||||
bool
|
bool
|
||||||
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
|
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
|
||||||
{
|
{
|
||||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
// std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
||||||
|
// TODO: how to ensure that? "Ensure m_ledgers_by_hash doesn't have the
|
||||||
|
// wrong hash for a particular index"
|
||||||
auto it = mLedgersByIndex.find(ledgerIndex);
|
auto it = mLedgersByIndex.find(ledgerIndex);
|
||||||
|
|
||||||
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))
|
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))
|
||||||
|
|||||||
Reference in New Issue
Block a user