diff --git a/include/xrpl/basics/SHAMapHash.h b/include/xrpl/basics/SHAMapHash.h index 2d2dcdc3ef..1ec326409c 100644 --- a/include/xrpl/basics/SHAMapHash.h +++ b/include/xrpl/basics/SHAMapHash.h @@ -21,7 +21,6 @@ #define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED #include -#include #include diff --git a/include/xrpl/basics/TaggedCache.h b/include/xrpl/basics/TaggedCache.h index 99c91fe393..d1e2c46253 100644 --- a/include/xrpl/basics/TaggedCache.h +++ b/include/xrpl/basics/TaggedCache.h @@ -170,9 +170,6 @@ public: bool retrieve(key_type const& key, T& data); - mutex_type& - peekMutex(); - std::vector getKeys() const; @@ -193,11 +190,14 @@ public: private: SharedPointerType - initialFetch(key_type const& key, std::lock_guard const& l); + initialFetch(key_type const& key); void collect_metrics(); + Mutex& + lockPartition(key_type const& key) const; + private: struct Stats { @@ -300,8 +300,8 @@ private: [[maybe_unused]] clock_type::time_point const& now, typename KeyValueCacheType::map_type& partition, SweptPointersVector& stuffToSweep, - std::atomic& allRemovals, - std::lock_guard const&); + std::atomic& allRemoval, + Mutex& partitionLock); [[nodiscard]] std::thread sweepHelper( @@ -310,14 +310,12 @@ private: typename KeyOnlyCacheType::map_type& partition, SweptPointersVector&, std::atomic& allRemovals, - std::lock_guard const&); + Mutex& partitionLock); beast::Journal m_journal; clock_type& m_clock; Stats m_stats; - mutex_type mutable m_mutex; - // Used for logging std::string m_name; @@ -328,10 +326,11 @@ private: clock_type::duration const m_target_age; // Number of items cached - int m_cache_count; + std::atomic m_cache_count; cache_type m_cache; // Hold strong reference to recent objects - std::uint64_t m_hits; - std::uint64_t m_misses; + std::atomic m_hits; + std::atomic m_misses; + mutable std::vector partitionLocks_; }; } // namespace ripple diff --git a/include/xrpl/basics/TaggedCache.ipp b/include/xrpl/basics/TaggedCache.ipp index b4056be522..44991381b1 100644 --- a/include/xrpl/basics/TaggedCache.ipp +++ b/include/xrpl/basics/TaggedCache.ipp @@ -61,6 +61,7 @@ inline TaggedCache< , m_hits(0) , m_misses(0) { + partitionLocks_ = std::vector(m_cache.partitions()); } template < @@ -106,8 +107,13 @@ TaggedCache< KeyEqual, Mutex>::size() const { - std::lock_guard lock(m_mutex); - return m_cache.size(); + std::size_t totalSize = 0; + for (size_t i = 0; i < partitionLocks_.size(); ++i) + { + std::lock_guard lock(partitionLocks_[i]); + totalSize += m_cache.map()[i].size(); + } + return totalSize; } template < @@ -130,8 +136,7 @@ TaggedCache< KeyEqual, Mutex>::getCacheSize() const { - std::lock_guard lock(m_mutex); - return m_cache_count; + return m_cache_count.load(std::memory_order_relaxed); } template < @@ -154,8 +159,7 @@ TaggedCache< KeyEqual, Mutex>::getTrackSize() const { - std::lock_guard lock(m_mutex); - return m_cache.size(); + return size(); } template < @@ -178,9 +182,10 @@ TaggedCache< KeyEqual, Mutex>::getHitRate() { - std::lock_guard lock(m_mutex); - auto const total = static_cast(m_hits + m_misses); - return m_hits * (100.0f / std::max(1.0f, total)); + auto hits = m_hits.load(std::memory_order_relaxed); + auto misses = m_misses.load(std::memory_order_relaxed); + float total = float(hits + misses); + return hits * (100.0f / std::max(1.0f, total)); } template < @@ -203,9 +208,12 @@ TaggedCache< KeyEqual, Mutex>::clear() { - std::lock_guard lock(m_mutex); + for (auto& mutex : partitionLocks_) + mutex.lock(); m_cache.clear(); - m_cache_count = 0; + for (auto& mutex : partitionLocks_) + mutex.unlock(); + m_cache_count.store(0, std::memory_order_relaxed); } template < @@ -228,11 +236,14 @@ TaggedCache< KeyEqual, Mutex>::reset() { - std::lock_guard lock(m_mutex); + for (auto& mutex : partitionLocks_) + mutex.lock(); m_cache.clear(); - m_cache_count = 0; - m_hits = 0; - m_misses = 0; + for (auto& mutex : partitionLocks_) + mutex.unlock(); + m_cache_count.store(0, std::memory_order_relaxed); + m_hits.store(0, std::memory_order_relaxed); + m_misses.store(0, std::memory_order_relaxed); } template < @@ -256,7 +267,7 @@ TaggedCache< KeyEqual, Mutex>::touch_if_exists(KeyComparable const& key) { - std::lock_guard lock(m_mutex); + std::lock_guard lock(lockPartition(key)); auto const iter(m_cache.find(key)); if (iter == m_cache.end()) { @@ -298,8 +309,6 @@ TaggedCache< auto const start = std::chrono::steady_clock::now(); { - std::lock_guard lock(m_mutex); - if (m_target_size == 0 || (static_cast(m_cache.size()) <= m_target_size)) { @@ -331,12 +340,13 @@ TaggedCache< m_cache.map()[p], allStuffToSweep[p], allRemovals, - lock)); + partitionLocks_[p])); } for (std::thread& worker : workers) worker.join(); - m_cache_count -= allRemovals; + int removals = allRemovals.load(std::memory_order_relaxed); + m_cache_count.fetch_sub(removals, std::memory_order_relaxed); } // At this point allStuffToSweep will go out of scope outside the lock // and decrement the reference count on each strong pointer. @@ -370,7 +380,8 @@ TaggedCache< { // Remove from cache, if !valid, remove from map too. Returns true if // removed from cache - std::lock_guard lock(m_mutex); + + std::lock_guard lock(lockPartition(key)); auto cit = m_cache.find(key); @@ -383,7 +394,7 @@ TaggedCache< if (entry.isCached()) { - --m_cache_count; + m_cache_count.fetch_sub(1, std::memory_order_relaxed); entry.ptr.convertToWeak(); ret = true; } @@ -421,17 +432,16 @@ TaggedCache< { // Return canonical value, store if needed, refresh in cache // Return values: true=we had the data already - std::lock_guard lock(m_mutex); + std::lock_guard lock(lockPartition(key)); auto cit = m_cache.find(key); - if (cit == m_cache.end()) { m_cache.emplace( std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(m_clock.now(), data)); - ++m_cache_count; + m_cache_count.fetch_add(1, std::memory_order_relaxed); return false; } @@ -480,12 +490,12 @@ TaggedCache< data = cachedData; } - ++m_cache_count; + m_cache_count.fetch_add(1, std::memory_order_relaxed); return true; } entry.ptr = data; - ++m_cache_count; + m_cache_count.fetch_add(1, std::memory_order_relaxed); return false; } @@ -561,10 +571,11 @@ TaggedCache< KeyEqual, Mutex>::fetch(key_type const& key) { - std::lock_guard l(m_mutex); - auto ret = initialFetch(key, l); + std::lock_guard lock(lockPartition(key)); + + auto ret = initialFetch(key); if (!ret) - ++m_misses; + m_misses.fetch_add(1, std::memory_order_relaxed); return ret; } @@ -628,8 +639,8 @@ TaggedCache< Mutex>::insert(key_type const& key) -> std::enable_if_t { - std::lock_guard lock(m_mutex); clock_type::time_point const now(m_clock.now()); + std::lock_guard lock(lockPartition(key)); auto [it, inserted] = m_cache.emplace( std::piecewise_construct, std::forward_as_tuple(key), @@ -669,29 +680,6 @@ TaggedCache< return true; } -template < - class Key, - class T, - bool IsKeyCache, - class SharedWeakUnionPointer, - class SharedPointerType, - class Hash, - class KeyEqual, - class Mutex> -inline auto -TaggedCache< - Key, - T, - IsKeyCache, - SharedWeakUnionPointer, - SharedPointerType, - Hash, - KeyEqual, - Mutex>::peekMutex() -> mutex_type& -{ - return m_mutex; -} - template < class Key, class T, @@ -715,10 +703,13 @@ TaggedCache< std::vector v; { - std::lock_guard lock(m_mutex); v.reserve(m_cache.size()); - for (auto const& _ : m_cache) - v.push_back(_.first); + for (std::size_t i = 0; i < partitionLocks_.size(); ++i) + { + std::lock_guard lock(partitionLocks_[i]); + for (auto const& entry : m_cache.map()[i]) + v.push_back(entry.first); + } } return v; @@ -744,11 +735,12 @@ TaggedCache< KeyEqual, Mutex>::rate() const { - std::lock_guard lock(m_mutex); - auto const tot = m_hits + m_misses; + auto hits = m_hits.load(std::memory_order_relaxed); + auto misses = m_misses.load(std::memory_order_relaxed); + auto const tot = hits + misses; if (tot == 0) - return 0; - return double(m_hits) / tot; + return 0.0; + return double(hits) / tot; } template < @@ -772,18 +764,16 @@ TaggedCache< KeyEqual, Mutex>::fetch(key_type const& digest, Handler const& h) { - { - std::lock_guard l(m_mutex); - if (auto ret = initialFetch(digest, l)) - return ret; - } + std::lock_guard lock(lockPartition(digest)); + + if (auto ret = initialFetch(digest)) + return ret; auto sle = h(); if (!sle) return {}; - std::lock_guard l(m_mutex); - ++m_misses; + m_misses.fetch_add(1, std::memory_order_relaxed); auto const [it, inserted] = m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); if (!inserted) @@ -810,9 +800,10 @@ TaggedCache< SharedPointerType, Hash, KeyEqual, - Mutex>:: - initialFetch(key_type const& key, std::lock_guard const& l) + Mutex>::initialFetch(key_type const& key) { + std::lock_guard lock(lockPartition(key)); + auto cit = m_cache.find(key); if (cit == m_cache.end()) return {}; @@ -820,7 +811,7 @@ TaggedCache< Entry& entry = cit->second; if (entry.isCached()) { - ++m_hits; + m_hits.fetch_add(1, std::memory_order_relaxed); entry.touch(m_clock.now()); return entry.ptr.getStrong(); } @@ -828,12 +819,13 @@ TaggedCache< if (entry.isCached()) { // independent of cache size, so not counted as a hit - ++m_cache_count; + m_cache_count.fetch_add(1, std::memory_order_relaxed); entry.touch(m_clock.now()); return entry.ptr.getStrong(); } - m_cache.erase(cit); + m_cache.erase(cit); // TODO: if this erase happens on fetch, what is left + // for a sweep? return {}; } @@ -862,10 +854,11 @@ TaggedCache< { beast::insight::Gauge::value_type hit_rate(0); { - std::lock_guard lock(m_mutex); - auto const total(m_hits + m_misses); + auto const hits = m_hits.load(std::memory_order_relaxed); + auto const misses = m_misses.load(std::memory_order_relaxed); + auto const total = hits + misses; if (total != 0) - hit_rate = (m_hits * 100) / total; + hit_rate = (hits * 100) / total; } m_stats.hit_rate.set(hit_rate); } @@ -896,7 +889,7 @@ TaggedCache< typename KeyValueCacheType::map_type& partition, SweptPointersVector& stuffToSweep, std::atomic& allRemovals, - std::lock_guard const&) + Mutex& partitionLock) { return std::thread([&, this]() { beast::setCurrentThreadName("sweep-1"); @@ -904,6 +897,8 @@ TaggedCache< int cacheRemovals = 0; int mapRemovals = 0; + std::lock_guard lock(partitionLock); + // Keep references to all the stuff we sweep // so that we can destroy them outside the lock. stuffToSweep.reserve(partition.size()); @@ -987,7 +982,7 @@ TaggedCache< typename KeyOnlyCacheType::map_type& partition, SweptPointersVector&, std::atomic& allRemovals, - std::lock_guard const&) + Mutex& partitionLock) { return std::thread([&, this]() { beast::setCurrentThreadName("sweep-2"); @@ -995,6 +990,8 @@ TaggedCache< int cacheRemovals = 0; int mapRemovals = 0; + std::lock_guard lock(partitionLock); + // Keep references to all the stuff we sweep // so that we can destroy them outside the lock. { @@ -1029,6 +1026,29 @@ TaggedCache< }); } +template < + class Key, + class T, + bool IsKeyCache, + class SharedWeakUnionPointer, + class SharedPointerType, + class Hash, + class KeyEqual, + class Mutex> +inline Mutex& +TaggedCache< + Key, + T, + IsKeyCache, + SharedWeakUnionPointer, + SharedPointerType, + Hash, + KeyEqual, + Mutex>::lockPartition(key_type const& key) const +{ + return partitionLocks_[m_cache.partition_index(key)]; +} + } // namespace ripple #endif diff --git a/include/xrpl/basics/partitioned_unordered_map.h b/include/xrpl/basics/partitioned_unordered_map.h index 4e503ad0fa..d45221e4fb 100644 --- a/include/xrpl/basics/partitioned_unordered_map.h +++ b/include/xrpl/basics/partitioned_unordered_map.h @@ -277,6 +277,12 @@ public: return map_; } + partition_map_type const& + map() const + { + return map_; + } + iterator begin() { @@ -321,6 +327,12 @@ public: return cend(); } + std::size_t + partition_index(key_type const& key) const + { + return partitioner(key); + } + private: template void @@ -380,7 +392,7 @@ public: clear() { for (auto& p : map_) - p.clear(); + p.clear(); // TODO make sure that it is locked inside } iterator @@ -406,7 +418,7 @@ public: { std::size_t ret = 0; for (auto& p : map_) - ret += p.size(); + ret += p.size(); // TODO make sure that it is locked inside return ret; } diff --git a/include/xrpl/protocol/Protocol.h b/include/xrpl/protocol/Protocol.h index 898fd06fbd..bd39233cca 100644 --- a/include/xrpl/protocol/Protocol.h +++ b/include/xrpl/protocol/Protocol.h @@ -22,7 +22,6 @@ #include #include -#include #include diff --git a/src/xrpld/app/ledger/LedgerHistory.cpp b/src/xrpld/app/ledger/LedgerHistory.cpp index ccec209bd4..a59cdcabf5 100644 --- a/src/xrpld/app/ledger/LedgerHistory.cpp +++ b/src/xrpld/app/ledger/LedgerHistory.cpp @@ -63,8 +63,8 @@ LedgerHistory::insert( ledger->stateMap().getHash().isNonZero(), "ripple::LedgerHistory::insert : nonzero hash"); - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); - + // TODOL merge the below into a single call to avoid lock and race + // conditions, i.e. - return alreadyHad on assignment somehow. bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache( ledger->info().hash, ledger); if (validated) @@ -76,7 +76,7 @@ LedgerHistory::insert( LedgerHash LedgerHistory::getLedgerHash(LedgerIndex index) { - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); + // TODO: is it safe to get iterator without lock here? if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end()) return it->second; return {}; @@ -86,13 +86,12 @@ std::shared_ptr LedgerHistory::getLedgerBySeq(LedgerIndex index) { { - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); + // TODO: this lock is not needed auto it = mLedgersByIndex.find(index); if (it != mLedgersByIndex.end()) { uint256 hash = it->second; - sl.unlock(); return getLedgerByHash(hash); } } @@ -108,7 +107,8 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index) { // Add this ledger to the local tracking by index - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); + // std::unique_lock sl(m_ledgers_by_hash.peekMutex()); + // TODO: make sure that canonicalize_replace_client lock the partition XRPL_ASSERT( ret->isImmutable(), @@ -458,7 +458,8 @@ LedgerHistory::builtLedger( XRPL_ASSERT( !hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash"); - std::unique_lock sl(m_consensus_validated.peekMutex()); + // std::unique_lock sl(m_consensus_validated.peekMutex()); + // TODO: make sure that canonicalize_replace_client lock the partition auto entry = std::make_shared(); m_consensus_validated.canonicalize_replace_client(index, entry); @@ -500,7 +501,8 @@ LedgerHistory::validatedLedger( !hash.isZero(), "ripple::LedgerHistory::validatedLedger : nonzero hash"); - std::unique_lock sl(m_consensus_validated.peekMutex()); + // std::unique_lock sl(m_consensus_validated.peekMutex()); + // TODO: make sure that canonicalize_replace_client lock the partition auto entry = std::make_shared(); m_consensus_validated.canonicalize_replace_client(index, entry); @@ -535,7 +537,9 @@ LedgerHistory::validatedLedger( bool LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) { - std::unique_lock sl(m_ledgers_by_hash.peekMutex()); + // std::unique_lock sl(m_ledgers_by_hash.peekMutex()); + // TODO: how to ensure that? "Ensure m_ledgers_by_hash doesn't have the + // wrong hash for a particular index" auto it = mLedgersByIndex.find(ledgerIndex); if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))