mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-03 17:35:51 +00:00
Compare commits
6 Commits
vlntb/mem-
...
vlntb/tagg
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c5fe98d02 | ||
|
|
3fb6acd29e | ||
|
|
77b7cef5a7 | ||
|
|
2c187461cc | ||
|
|
13a12c6402 | ||
|
|
362ecbd1cb |
@@ -145,7 +145,6 @@ test.toplevel > xrpl.json
|
||||
test.unit_test > xrpl.basics
|
||||
tests.libxrpl > xrpl.basics
|
||||
tests.libxrpl > xrpl.json
|
||||
tests.libxrpl > xrpl.ledger
|
||||
tests.libxrpl > xrpl.net
|
||||
xrpl.json > xrpl.basics
|
||||
xrpl.ledger > xrpl.basics
|
||||
|
||||
4
.github/scripts/strategy-matrix/generate.py
vendored
4
.github/scripts/strategy-matrix/generate.py
vendored
@@ -130,8 +130,8 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64':
|
||||
continue
|
||||
|
||||
# We skip all clang-20 on arm64 due to boost 1.86 build error
|
||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64':
|
||||
# We skip all clang 20+ on arm64 due to Boost build error.
|
||||
if f'{os['compiler_name']}-{os['compiler_version']}' in ['clang-20', 'clang-21'] and architecture['platform'] == 'linux/arm64':
|
||||
continue
|
||||
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
|
||||
|
||||
46
.github/scripts/strategy-matrix/linux.json
vendored
46
.github/scripts/strategy-matrix/linux.json
vendored
@@ -15,63 +15,91 @@
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "16",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "17",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "18",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "19",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20",
|
||||
"image_sha": "e1782cd"
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15",
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20",
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "21",
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
|
||||
@@ -6,18 +6,18 @@
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",
|
||||
"rocksdb/10.5.1#4a197eca381a3e5ae8adf8cffa5aacd0%1759820024.194",
|
||||
"rocksdb/10.5.1#4a197eca381a3e5ae8adf8cffa5aacd0%1762797952.535",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"openssl/3.5.4#a1d5835cc6ed5c5b8f3cd5b9b5d24205%1760106486.594",
|
||||
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
|
||||
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1763150366.909",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
|
||||
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
|
||||
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
|
||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
|
||||
"doctest/2.4.12#eb9fb352fb2fdfc8abb17ec270945165%1749889324.069",
|
||||
"doctest/2.4.12#eb9fb352fb2fdfc8abb17ec270945165%1762797941.757",
|
||||
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
|
||||
@@ -53,6 +53,9 @@
|
||||
],
|
||||
"lz4/[>=1.9.4 <2]": [
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504"
|
||||
],
|
||||
"sqlite3/3.44.2": [
|
||||
"sqlite3/3.49.1"
|
||||
]
|
||||
},
|
||||
"config_requires": []
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
#define XRPL_BASICS_SHAMAP_HASH_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/partitioned_unordered_map.h>
|
||||
|
||||
#include <ostream>
|
||||
|
||||
|
||||
@@ -71,9 +71,6 @@ public:
|
||||
int
|
||||
getCacheSize() const;
|
||||
|
||||
int
|
||||
getTrackSize() const;
|
||||
|
||||
float
|
||||
getHitRate();
|
||||
|
||||
@@ -171,11 +168,14 @@ public:
|
||||
|
||||
private:
|
||||
SharedPointerType
|
||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
|
||||
initialFetch(key_type const& key);
|
||||
|
||||
void
|
||||
collect_metrics();
|
||||
|
||||
Mutex&
|
||||
lockPartition(key_type const& key) const;
|
||||
|
||||
private:
|
||||
struct Stats
|
||||
{
|
||||
@@ -278,8 +278,8 @@ private:
|
||||
[[maybe_unused]] clock_type::time_point const& now,
|
||||
typename KeyValueCacheType::map_type& partition,
|
||||
SweptPointersVector& stuffToSweep,
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&);
|
||||
std::atomic<int>& allRemoval,
|
||||
Mutex& partitionLock);
|
||||
|
||||
[[nodiscard]] std::thread
|
||||
sweepHelper(
|
||||
@@ -288,14 +288,12 @@ private:
|
||||
typename KeyOnlyCacheType::map_type& partition,
|
||||
SweptPointersVector&,
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&);
|
||||
Mutex& partitionLock);
|
||||
|
||||
beast::Journal m_journal;
|
||||
clock_type& m_clock;
|
||||
Stats m_stats;
|
||||
|
||||
mutex_type mutable m_mutex;
|
||||
|
||||
// Used for logging
|
||||
std::string m_name;
|
||||
|
||||
@@ -306,10 +304,11 @@ private:
|
||||
clock_type::duration const m_target_age;
|
||||
|
||||
// Number of items cached
|
||||
int m_cache_count;
|
||||
std::atomic<int> m_cache_count;
|
||||
cache_type m_cache; // Hold strong reference to recent objects
|
||||
std::uint64_t m_hits;
|
||||
std::uint64_t m_misses;
|
||||
std::atomic<std::uint64_t> m_hits;
|
||||
std::atomic<std::uint64_t> m_misses;
|
||||
mutable std::vector<mutex_type> partitionLocks_;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.ipp>
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -41,6 +42,7 @@ inline TaggedCache<
|
||||
, m_hits(0)
|
||||
, m_misses(0)
|
||||
{
|
||||
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -86,8 +88,13 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::size() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache.size();
|
||||
std::size_t totalSize = 0;
|
||||
for (size_t i = 0; i < partitionLocks_.size(); ++i)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(partitionLocks_[i]);
|
||||
totalSize += m_cache.map()[i].size();
|
||||
}
|
||||
return totalSize;
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -110,32 +117,7 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::getCacheSize() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache_count;
|
||||
}
|
||||
|
||||
template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache,
|
||||
class SharedWeakUnionPointer,
|
||||
class SharedPointerType,
|
||||
class Hash,
|
||||
class KeyEqual,
|
||||
class Mutex>
|
||||
inline int
|
||||
TaggedCache<
|
||||
Key,
|
||||
T,
|
||||
IsKeyCache,
|
||||
SharedWeakUnionPointer,
|
||||
SharedPointerType,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
Mutex>::getTrackSize() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache.size();
|
||||
return m_cache_count.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -158,9 +140,10 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::getHitRate()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const total = static_cast<float>(m_hits + m_misses);
|
||||
return m_hits * (100.0f / std::max(1.0f, total));
|
||||
auto const hits = m_hits.load(std::memory_order_relaxed);
|
||||
auto const misses = m_misses.load(std::memory_order_relaxed);
|
||||
float const total = float(hits + misses);
|
||||
return hits * (100.0f / std::max(1.0f, total));
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -183,9 +166,12 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::clear()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
for (auto& mutex : partitionLocks_)
|
||||
mutex.lock();
|
||||
m_cache.clear();
|
||||
m_cache_count = 0;
|
||||
for (auto& mutex : partitionLocks_)
|
||||
mutex.unlock();
|
||||
m_cache_count.store(0, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -208,11 +194,9 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::reset()
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_cache.clear();
|
||||
m_cache_count = 0;
|
||||
m_hits = 0;
|
||||
m_misses = 0;
|
||||
clear();
|
||||
m_hits.store(0, std::memory_order_relaxed);
|
||||
m_misses.store(0, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -236,7 +220,7 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::touch_if_exists(KeyComparable const& key)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
auto const iter(m_cache.find(key));
|
||||
if (iter == m_cache.end())
|
||||
{
|
||||
@@ -278,8 +262,6 @@ TaggedCache<
|
||||
|
||||
auto const start = std::chrono::steady_clock::now();
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
if (m_target_size == 0 ||
|
||||
(static_cast<int>(m_cache.size()) <= m_target_size))
|
||||
{
|
||||
@@ -311,12 +293,13 @@ TaggedCache<
|
||||
m_cache.map()[p],
|
||||
allStuffToSweep[p],
|
||||
allRemovals,
|
||||
lock));
|
||||
partitionLocks_[p]));
|
||||
}
|
||||
for (std::thread& worker : workers)
|
||||
worker.join();
|
||||
|
||||
m_cache_count -= allRemovals;
|
||||
int removals = allRemovals.load(std::memory_order_relaxed);
|
||||
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
|
||||
}
|
||||
// At this point allStuffToSweep will go out of scope outside the lock
|
||||
// and decrement the reference count on each strong pointer.
|
||||
@@ -350,7 +333,8 @@ TaggedCache<
|
||||
{
|
||||
// Remove from cache, if !valid, remove from map too. Returns true if
|
||||
// removed from cache
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
|
||||
auto cit = m_cache.find(key);
|
||||
|
||||
@@ -363,7 +347,7 @@ TaggedCache<
|
||||
|
||||
if (entry.isCached())
|
||||
{
|
||||
--m_cache_count;
|
||||
m_cache_count.fetch_sub(1, std::memory_order_relaxed);
|
||||
entry.ptr.convertToWeak();
|
||||
ret = true;
|
||||
}
|
||||
@@ -401,17 +385,16 @@ TaggedCache<
|
||||
{
|
||||
// Return canonical value, store if needed, refresh in cache
|
||||
// Return values: true=we had the data already
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
auto cit = m_cache.find(key);
|
||||
|
||||
if (cit == m_cache.end())
|
||||
{
|
||||
m_cache.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
std::forward_as_tuple(m_clock.now(), data));
|
||||
++m_cache_count;
|
||||
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -460,12 +443,12 @@ TaggedCache<
|
||||
data = cachedData;
|
||||
}
|
||||
|
||||
++m_cache_count;
|
||||
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
|
||||
entry.ptr = data;
|
||||
++m_cache_count;
|
||||
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -541,10 +524,11 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::fetch(key_type const& key)
|
||||
{
|
||||
std::lock_guard<mutex_type> l(m_mutex);
|
||||
auto ret = initialFetch(key, l);
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
|
||||
auto ret = initialFetch(key);
|
||||
if (!ret)
|
||||
++m_misses;
|
||||
m_misses.fetch_add(1, std::memory_order_relaxed);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -608,8 +592,8 @@ TaggedCache<
|
||||
Mutex>::insert(key_type const& key)
|
||||
-> std::enable_if_t<IsKeyCache, ReturnType>
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
clock_type::time_point const now(m_clock.now());
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
auto [it, inserted] = m_cache.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
@@ -672,10 +656,13 @@ TaggedCache<
|
||||
std::vector<key_type> v;
|
||||
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
v.reserve(m_cache.size());
|
||||
for (auto const& _ : m_cache)
|
||||
v.push_back(_.first);
|
||||
for (std::size_t i = 0; i < partitionLocks_.size(); ++i)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(partitionLocks_[i]);
|
||||
for (auto const& entry : m_cache.map()[i])
|
||||
v.push_back(entry.first);
|
||||
}
|
||||
}
|
||||
|
||||
return v;
|
||||
@@ -701,11 +688,12 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::rate() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const tot = m_hits + m_misses;
|
||||
auto const hits = m_hits.load(std::memory_order_relaxed);
|
||||
auto const misses = m_misses.load(std::memory_order_relaxed);
|
||||
auto const tot = hits + misses;
|
||||
if (tot == 0)
|
||||
return 0;
|
||||
return double(m_hits) / tot;
|
||||
return 0.0;
|
||||
return double(hits) / tot;
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -729,18 +717,16 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::fetch(key_type const& digest, Handler const& h)
|
||||
{
|
||||
{
|
||||
std::lock_guard l(m_mutex);
|
||||
if (auto ret = initialFetch(digest, l))
|
||||
return ret;
|
||||
}
|
||||
std::lock_guard<Mutex> lock(lockPartition(digest));
|
||||
|
||||
if (auto ret = initialFetch(digest))
|
||||
return ret;
|
||||
|
||||
auto sle = h();
|
||||
if (!sle)
|
||||
return {};
|
||||
|
||||
std::lock_guard l(m_mutex);
|
||||
++m_misses;
|
||||
m_misses.fetch_add(1, std::memory_order_relaxed);
|
||||
auto const [it, inserted] =
|
||||
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
|
||||
if (!inserted)
|
||||
@@ -767,9 +753,10 @@ TaggedCache<
|
||||
SharedPointerType,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
Mutex>::
|
||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
|
||||
Mutex>::initialFetch(key_type const& key)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
|
||||
auto cit = m_cache.find(key);
|
||||
if (cit == m_cache.end())
|
||||
return {};
|
||||
@@ -777,7 +764,7 @@ TaggedCache<
|
||||
Entry& entry = cit->second;
|
||||
if (entry.isCached())
|
||||
{
|
||||
++m_hits;
|
||||
m_hits.fetch_add(1, std::memory_order_relaxed);
|
||||
entry.touch(m_clock.now());
|
||||
return entry.ptr.getStrong();
|
||||
}
|
||||
@@ -785,12 +772,13 @@ TaggedCache<
|
||||
if (entry.isCached())
|
||||
{
|
||||
// independent of cache size, so not counted as a hit
|
||||
++m_cache_count;
|
||||
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||
entry.touch(m_clock.now());
|
||||
return entry.ptr.getStrong();
|
||||
}
|
||||
|
||||
m_cache.erase(cit);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -819,10 +807,11 @@ TaggedCache<
|
||||
{
|
||||
beast::insight::Gauge::value_type hit_rate(0);
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const total(m_hits + m_misses);
|
||||
auto const hits = m_hits.load(std::memory_order_relaxed);
|
||||
auto const misses = m_misses.load(std::memory_order_relaxed);
|
||||
auto const total = hits + misses;
|
||||
if (total != 0)
|
||||
hit_rate = (m_hits * 100) / total;
|
||||
hit_rate = (hits * 100) / total;
|
||||
}
|
||||
m_stats.hit_rate.set(hit_rate);
|
||||
}
|
||||
@@ -853,12 +842,16 @@ TaggedCache<
|
||||
typename KeyValueCacheType::map_type& partition,
|
||||
SweptPointersVector& stuffToSweep,
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&)
|
||||
Mutex& partitionLock)
|
||||
{
|
||||
return std::thread([&, this]() {
|
||||
beast::setCurrentThreadName("sweep-KVCache");
|
||||
|
||||
int cacheRemovals = 0;
|
||||
int mapRemovals = 0;
|
||||
|
||||
std::lock_guard<Mutex> lock(partitionLock);
|
||||
|
||||
// Keep references to all the stuff we sweep
|
||||
// so that we can destroy them outside the lock.
|
||||
stuffToSweep.reserve(partition.size());
|
||||
@@ -942,12 +935,16 @@ TaggedCache<
|
||||
typename KeyOnlyCacheType::map_type& partition,
|
||||
SweptPointersVector&,
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&)
|
||||
Mutex& partitionLock)
|
||||
{
|
||||
return std::thread([&, this]() {
|
||||
beast::setCurrentThreadName("sweep-KCache");
|
||||
|
||||
int cacheRemovals = 0;
|
||||
int mapRemovals = 0;
|
||||
|
||||
std::lock_guard<Mutex> lock(partitionLock);
|
||||
|
||||
// Keep references to all the stuff we sweep
|
||||
// so that we can destroy them outside the lock.
|
||||
{
|
||||
@@ -982,6 +979,29 @@ TaggedCache<
|
||||
});
|
||||
}
|
||||
|
||||
template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache,
|
||||
class SharedWeakUnionPointer,
|
||||
class SharedPointerType,
|
||||
class Hash,
|
||||
class KeyEqual,
|
||||
class Mutex>
|
||||
inline Mutex&
|
||||
TaggedCache<
|
||||
Key,
|
||||
T,
|
||||
IsKeyCache,
|
||||
SharedWeakUnionPointer,
|
||||
SharedPointerType,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
Mutex>::lockPartition(key_type const& key) const
|
||||
{
|
||||
return partitionLocks_[m_cache.partition_index(key)];
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -258,6 +258,12 @@ public:
|
||||
return map_;
|
||||
}
|
||||
|
||||
partition_map_type const&
|
||||
map() const
|
||||
{
|
||||
return map_;
|
||||
}
|
||||
|
||||
iterator
|
||||
begin()
|
||||
{
|
||||
@@ -302,6 +308,12 @@ public:
|
||||
return cend();
|
||||
}
|
||||
|
||||
std::size_t
|
||||
partition_index(key_type const& key) const
|
||||
{
|
||||
return partitioner(key);
|
||||
}
|
||||
|
||||
private:
|
||||
template <class T>
|
||||
void
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
#ifndef XRPL_APP_LEDGER_INDEX_MAP_H_INCLUDED
|
||||
#define XRPL_APP_LEDGER_INDEX_MAP_H_INCLUDED
|
||||
|
||||
#include <algorithm>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
template <class Key, class Mapped>
|
||||
class LedgerIndexMap
|
||||
{
|
||||
public:
|
||||
LedgerIndexMap() = default;
|
||||
explicit LedgerIndexMap(std::size_t reserve_capacity)
|
||||
{
|
||||
data_.reserve(reserve_capacity);
|
||||
}
|
||||
|
||||
LedgerIndexMap(LedgerIndexMap const&) = delete;
|
||||
LedgerIndexMap&
|
||||
operator=(LedgerIndexMap const&) = delete;
|
||||
LedgerIndexMap(LedgerIndexMap&&) = delete;
|
||||
LedgerIndexMap&
|
||||
operator=(LedgerIndexMap&&) = delete;
|
||||
|
||||
Mapped&
|
||||
operator[](Key const& k)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
return data_[k];
|
||||
}
|
||||
|
||||
Mapped&
|
||||
operator[](Key&& k)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
return data_[std::move(k)];
|
||||
}
|
||||
|
||||
[[nodiscard]] Mapped*
|
||||
get(Key const& k)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto it = data_.find(k);
|
||||
return it == data_.end() ? nullptr : &it->second;
|
||||
}
|
||||
|
||||
[[nodiscard]] Mapped const*
|
||||
get(Key const& k) const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto it = data_.find(k);
|
||||
return it == data_.end() ? nullptr : &it->second;
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
Mapped&
|
||||
put(Key const& k, Args&&... args)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto [it, inserted] = data_.try_emplace(k, std::forward<Args>(args)...);
|
||||
if (!inserted)
|
||||
it->second = Mapped(std::forward<Args>(args)...);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
bool
|
||||
contains(Key const& k) const
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
return data_.find(k) != data_.end();
|
||||
}
|
||||
|
||||
std::size_t
|
||||
size() const noexcept
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
return data_.size();
|
||||
}
|
||||
|
||||
bool
|
||||
empty() const noexcept
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
return data_.empty();
|
||||
}
|
||||
|
||||
void
|
||||
reserve(std::size_t n)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
data_.reserve(n);
|
||||
}
|
||||
|
||||
void
|
||||
rehash(std::size_t n)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
data_.rehash(n);
|
||||
}
|
||||
|
||||
std::size_t
|
||||
eraseBefore(Key const& cutoff)
|
||||
{
|
||||
std::lock_guard lock(mutex_);
|
||||
auto const before = data_.size();
|
||||
std::erase_if(data_, [&](auto const& kv) { return kv.first < cutoff; });
|
||||
return before - data_.size();
|
||||
}
|
||||
|
||||
private:
|
||||
std::unordered_map<Key, Mapped> data_;
|
||||
mutable std::mutex mutex_;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif // XRPL_APP_LEDGER_INDEX_MAP_H_INCLUDED
|
||||
@@ -66,7 +66,6 @@ XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo
|
||||
XRPL_FEATURE(DisallowIncoming, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(FlowSortStrands, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(NegativeUNL, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(RequireFullyCanonicalSig, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(DeletableAccounts, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes)
|
||||
@@ -130,6 +129,7 @@ XRPL_RETIRE_FEATURE(HardenedValidations)
|
||||
XRPL_RETIRE_FEATURE(ImmediateOfferKilled)
|
||||
XRPL_RETIRE_FEATURE(MultiSign)
|
||||
XRPL_RETIRE_FEATURE(MultiSignReserve)
|
||||
XRPL_RETIRE_FEATURE(NegativeUNL)
|
||||
XRPL_RETIRE_FEATURE(NonFungibleTokensV1_1)
|
||||
XRPL_RETIRE_FEATURE(PayChan)
|
||||
XRPL_RETIRE_FEATURE(SortedDirectories)
|
||||
|
||||
@@ -1329,7 +1329,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
Vault& vault) {
|
||||
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
|
||||
testcase("insufficient fee");
|
||||
env(tx, fee(env.current()->fees().base), ter(telINSUF_FEE_P));
|
||||
env(tx, fee(env.current()->fees().base - 1), ter(telINSUF_FEE_P));
|
||||
});
|
||||
|
||||
testCase([this](
|
||||
@@ -2074,6 +2074,10 @@ class Vault_test : public beast::unit_test::suite
|
||||
auto const sleMPT = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT == nullptr);
|
||||
|
||||
// Use one reserve so the next transaction fails
|
||||
env(ticket::create(owner, 1));
|
||||
env.close();
|
||||
|
||||
// No reserve to create MPToken for asset in VaultWithdraw
|
||||
tx = vault.withdraw(
|
||||
{.depositor = owner,
|
||||
@@ -2091,7 +2095,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
}
|
||||
},
|
||||
{.requireAuth = false,
|
||||
.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
.initialXRP = acctReserve + incReserve * 4 + 1});
|
||||
|
||||
testCase([this](
|
||||
Env& env,
|
||||
@@ -2980,6 +2984,9 @@ class Vault_test : public beast::unit_test::suite
|
||||
env.le(keylet::line(owner, asset.raw().get<Issue>()));
|
||||
BEAST_EXPECT(trustline == nullptr);
|
||||
|
||||
env(ticket::create(owner, 1));
|
||||
env.close();
|
||||
|
||||
// Fail because not enough reserve to create trust line
|
||||
tx = vault.withdraw(
|
||||
{.depositor = owner,
|
||||
@@ -2995,7 +3002,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
env(tx);
|
||||
env.close();
|
||||
},
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 + 1});
|
||||
|
||||
testCase(
|
||||
[&, this](
|
||||
@@ -3016,8 +3023,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
env(pay(owner, charlie, asset(100)));
|
||||
env.close();
|
||||
|
||||
// Use up some reserve on tickets
|
||||
env(ticket::create(charlie, 2));
|
||||
env(ticket::create(charlie, 3));
|
||||
env.close();
|
||||
|
||||
// Fail because not enough reserve to create MPToken for shares
|
||||
@@ -3035,7 +3041,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
env(tx);
|
||||
env.close();
|
||||
},
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 + 1});
|
||||
|
||||
testCase([&, this](
|
||||
Env& env,
|
||||
|
||||
@@ -39,10 +39,10 @@ public:
|
||||
// Insert an item, retrieve it, and age it so it gets purged.
|
||||
{
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
BEAST_EXPECT(!c.insert(1, "one"));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
|
||||
{
|
||||
std::string s;
|
||||
@@ -53,7 +53,7 @@ public:
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
}
|
||||
|
||||
// Insert an item, maintain a strong pointer, age it, and
|
||||
@@ -61,7 +61,7 @@ public:
|
||||
{
|
||||
BEAST_EXPECT(!c.insert(2, "two"));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
|
||||
{
|
||||
auto p = c.fetch(2);
|
||||
@@ -69,14 +69,14 @@ public:
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
}
|
||||
|
||||
// Make sure its gone now that our reference is gone
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
}
|
||||
|
||||
// Insert the same key/value pair and make sure we get the same result
|
||||
@@ -92,7 +92,7 @@ public:
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
}
|
||||
|
||||
// Put an object in but keep a strong pointer to it, advance the clock a
|
||||
@@ -102,24 +102,24 @@ public:
|
||||
// Put an object in
|
||||
BEAST_EXPECT(!c.insert(4, "four"));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
|
||||
{
|
||||
// Keep a strong pointer to it
|
||||
auto const p1 = c.fetch(4);
|
||||
BEAST_EXPECT(p1 != nullptr);
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
// Advance the clock a lot
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
// Canonicalize a new object with the same key
|
||||
auto p2 = std::make_shared<std::string>("four");
|
||||
BEAST_EXPECT(c.canonicalize_replace_client(4, p2));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
// Make sure we get the original object
|
||||
BEAST_EXPECT(p1.get() == p2.get());
|
||||
}
|
||||
@@ -127,7 +127,7 @@ public:
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -15,7 +15,6 @@ namespace test {
|
||||
/*
|
||||
* This file implements the following negative UNL related tests:
|
||||
* -- test filling and applying ttUNL_MODIFY Tx and ledger update
|
||||
* -- test ttUNL_MODIFY Tx failure without featureNegativeUNL amendment
|
||||
* -- test the NegativeUNLVote class. The test cases are split to multiple
|
||||
* test classes to allow parallel execution.
|
||||
* -- test the negativeUNLFilter function
|
||||
@@ -208,7 +207,7 @@ class NegativeUNL_test : public beast::unit_test::suite
|
||||
|
||||
testcase("Create UNLModify Tx and apply to ledgers");
|
||||
|
||||
jtx::Env env(*this, jtx::testable_amendments() | featureNegativeUNL);
|
||||
jtx::Env env(*this, jtx::testable_amendments());
|
||||
std::vector<PublicKey> publicKeys = createPublicKeys(3);
|
||||
// genesis ledger
|
||||
auto l = std::make_shared<Ledger>(
|
||||
@@ -216,7 +215,6 @@ class NegativeUNL_test : public beast::unit_test::suite
|
||||
env.app().config(),
|
||||
std::vector<uint256>{},
|
||||
env.app().getNodeFamily());
|
||||
BEAST_EXPECT(l->rules().enabled(featureNegativeUNL));
|
||||
|
||||
// Record the public keys and ledger sequences of expected negative UNL
|
||||
// validators when we build the ledger history
|
||||
@@ -500,44 +498,6 @@ class NegativeUNL_test : public beast::unit_test::suite
|
||||
}
|
||||
};
|
||||
|
||||
class NegativeUNLNoAmendment_test : public beast::unit_test::suite
|
||||
{
|
||||
void
|
||||
testNegativeUNLNoAmendment()
|
||||
{
|
||||
testcase("No negative UNL amendment");
|
||||
|
||||
jtx::Env env(*this, jtx::testable_amendments() - featureNegativeUNL);
|
||||
std::vector<PublicKey> publicKeys = createPublicKeys(1);
|
||||
// genesis ledger
|
||||
auto l = std::make_shared<Ledger>(
|
||||
create_genesis,
|
||||
env.app().config(),
|
||||
std::vector<uint256>{},
|
||||
env.app().getNodeFamily());
|
||||
BEAST_EXPECT(!l->rules().enabled(featureNegativeUNL));
|
||||
|
||||
// generate more ledgers
|
||||
for (auto i = 0; i < 256 - 1; ++i)
|
||||
{
|
||||
l = std::make_shared<Ledger>(
|
||||
*l, env.app().timeKeeper().closeTime());
|
||||
}
|
||||
BEAST_EXPECT(l->seq() == 256);
|
||||
auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]);
|
||||
OpenView accum(&*l);
|
||||
BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, false));
|
||||
accum.apply(*l);
|
||||
BEAST_EXPECT(negUnlSizeTest(l, 0, false, false));
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testNegativeUNLNoAmendment();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Utility class for creating validators and ledger history
|
||||
*/
|
||||
@@ -563,7 +523,7 @@ struct NetworkHistory
|
||||
};
|
||||
|
||||
NetworkHistory(beast::unit_test::suite& suite, Parameter const& p)
|
||||
: env(suite, jtx::testable_amendments() | featureNegativeUNL)
|
||||
: env(suite, jtx::testable_amendments())
|
||||
, param(p)
|
||||
, validations(env.app().getValidations())
|
||||
{
|
||||
@@ -1867,7 +1827,6 @@ class NegativeUNLVoteFilterValidations_test : public beast::unit_test::suite
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(NegativeUNL, consensus, ripple);
|
||||
BEAST_DEFINE_TESTSUITE(NegativeUNLNoAmendment, consensus, ripple);
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(NegativeUNLVoteInternal, consensus, ripple);
|
||||
BEAST_DEFINE_TESTSUITE_MANUAL(NegativeUNLVoteScoreTable, consensus, ripple);
|
||||
|
||||
@@ -19,7 +19,6 @@ Vault::create(CreateArgs const& args)
|
||||
jv[jss::TransactionType] = jss::VaultCreate;
|
||||
jv[jss::Account] = args.owner.human();
|
||||
jv[jss::Asset] = to_json(args.asset);
|
||||
jv[jss::Fee] = STAmount(env.current()->fees().increment).getJson();
|
||||
if (args.flags)
|
||||
jv[jss::Flags] = *args.flags;
|
||||
return {jv, keylet};
|
||||
|
||||
@@ -124,7 +124,8 @@ class Feature_test : public beast::unit_test::suite
|
||||
featureToName(fixRemoveNFTokenAutoTrustLine) ==
|
||||
"fixRemoveNFTokenAutoTrustLine");
|
||||
BEAST_EXPECT(featureToName(featureFlow) == "Flow");
|
||||
BEAST_EXPECT(featureToName(featureNegativeUNL) == "NegativeUNL");
|
||||
BEAST_EXPECT(
|
||||
featureToName(featureDeletableAccounts) == "DeletableAccounts");
|
||||
BEAST_EXPECT(
|
||||
featureToName(fixIncludeKeyletFields) == "fixIncludeKeyletFields");
|
||||
BEAST_EXPECT(featureToName(featureTokenEscrow) == "TokenEscrow");
|
||||
|
||||
@@ -29,7 +29,3 @@ if(NOT WIN32)
|
||||
target_link_libraries(xrpl.test.net PRIVATE xrpl.imports.test)
|
||||
add_dependencies(xrpl.tests xrpl.test.net)
|
||||
endif()
|
||||
|
||||
xrpl_add_test(ledger)
|
||||
target_link_libraries(xrpl.test.ledger PRIVATE xrpl.imports.test)
|
||||
add_dependencies(xrpl.tests xrpl.test.ledger)
|
||||
|
||||
@@ -1,191 +0,0 @@
|
||||
#include <xrpl/ledger/LedgerIndexMap.h>
|
||||
|
||||
#include <doctest/doctest.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
using namespace ripple;
|
||||
|
||||
TEST_SUITE_BEGIN("LedgerIndexMap");
|
||||
|
||||
using TestMap = LedgerIndexMap<int, std::string>;
|
||||
|
||||
TEST_CASE("Default empty")
|
||||
{
|
||||
TestMap m;
|
||||
CHECK(m.size() == 0);
|
||||
CHECK(m.empty());
|
||||
CHECK(m.get(42) == nullptr);
|
||||
CHECK_FALSE(m.contains(42));
|
||||
}
|
||||
|
||||
TEST_CASE("Operator bracket inserts default")
|
||||
{
|
||||
TestMap m;
|
||||
auto& v = m[10];
|
||||
CHECK(m.size() == 1);
|
||||
CHECK(m.contains(10));
|
||||
CHECK(v.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("Operator bracket, rvalue key")
|
||||
{
|
||||
TestMap m;
|
||||
int k = 7;
|
||||
auto& v1 = m[std::move(k)];
|
||||
v1 = "seven";
|
||||
CHECK(m.size() == 1);
|
||||
auto* got = m.get(7);
|
||||
REQUIRE(got != nullptr);
|
||||
CHECK(*got == "seven");
|
||||
}
|
||||
|
||||
TEST_CASE("Insert through put")
|
||||
{
|
||||
TestMap m;
|
||||
auto& v = m.put(20, "twenty");
|
||||
CHECK(v == "twenty");
|
||||
auto* got = m.get(20);
|
||||
REQUIRE(got != nullptr);
|
||||
CHECK(*got == "twenty");
|
||||
CHECK(m.size() == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("Overwrite existing key with put")
|
||||
{
|
||||
TestMap m;
|
||||
m.put(5, "five");
|
||||
CHECK(m.size() == 1);
|
||||
m.put(5, "FIVE");
|
||||
CHECK(m.size() == 1);
|
||||
auto* got = m.get(5);
|
||||
REQUIRE(got != nullptr);
|
||||
CHECK(*got == "FIVE");
|
||||
}
|
||||
|
||||
TEST_CASE("Once found, one not found")
|
||||
{
|
||||
TestMap m;
|
||||
m.put(1, "one");
|
||||
CHECK(m.get(1) != nullptr);
|
||||
CHECK(m.get(2) == nullptr);
|
||||
}
|
||||
|
||||
TEST_CASE("Try eraseBefore - nothing to do")
|
||||
{
|
||||
TestMap m;
|
||||
m.put(10, "a");
|
||||
m.put(11, "b");
|
||||
m.put(12, "c");
|
||||
CHECK(m.eraseBefore(10) == 0);
|
||||
CHECK(m.size() == 3);
|
||||
CHECK(m.contains(10));
|
||||
CHECK(m.contains(11));
|
||||
CHECK(m.contains(12));
|
||||
}
|
||||
|
||||
TEST_CASE("eraseBefore - removes several entries")
|
||||
{
|
||||
TestMap m;
|
||||
m.put(10, "a");
|
||||
m.put(11, "b");
|
||||
m.put(12, "c");
|
||||
m.put(13, "d");
|
||||
CHECK(m.eraseBefore(12) == 2);
|
||||
CHECK_FALSE(m.contains(10));
|
||||
CHECK_FALSE(m.contains(11));
|
||||
CHECK(m.contains(12));
|
||||
CHECK(m.contains(13));
|
||||
CHECK(m.size() == 2);
|
||||
}
|
||||
|
||||
TEST_CASE("eraseBefore - removes all entries")
|
||||
{
|
||||
TestMap m;
|
||||
m.put(1, "x");
|
||||
m.put(2, "y");
|
||||
CHECK(m.eraseBefore(1000) == 2);
|
||||
CHECK(m.size() == 0);
|
||||
CHECK(m.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("eraseBefore - same call, second time nothing to do")
|
||||
{
|
||||
TestMap m;
|
||||
m.put(10, "a");
|
||||
m.put(11, "b");
|
||||
m.put(12, "c");
|
||||
CHECK(m.eraseBefore(12) == 2);
|
||||
CHECK(m.contains(12));
|
||||
CHECK(m.eraseBefore(12) == 0);
|
||||
CHECK(m.size() == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("eraseBefore - single entry removed")
|
||||
{
|
||||
TestMap m;
|
||||
m.put(10, "v1");
|
||||
m.put(10, "v2");
|
||||
m.put(10, "v3");
|
||||
CHECK(m.size() == 1);
|
||||
CHECK(m.eraseBefore(11) == 1);
|
||||
CHECK(m.size() == 0);
|
||||
}
|
||||
|
||||
TEST_CASE("eraseBefore - outlier still removed in one call")
|
||||
{
|
||||
TestMap m;
|
||||
m.put(10, "a");
|
||||
m.put(12, "c");
|
||||
m.put(11, "b"); // out-of-order insert
|
||||
|
||||
CHECK(m.eraseBefore(12) == 2); // removes 10 and 11
|
||||
CHECK_FALSE(m.contains(10));
|
||||
CHECK_FALSE(m.contains(11));
|
||||
CHECK(m.contains(12));
|
||||
CHECK(m.size() == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("eraseBefore - erase in two steps (one first, then some more)")
|
||||
{
|
||||
TestMap m;
|
||||
for (int k : {10, 11, 12, 13})
|
||||
m.put(k, std::to_string(k));
|
||||
|
||||
CHECK(m.eraseBefore(11) == 1);
|
||||
CHECK_FALSE(m.contains(10));
|
||||
CHECK(m.size() == 3);
|
||||
|
||||
CHECK(m.eraseBefore(13) == 2);
|
||||
CHECK_FALSE(m.contains(11));
|
||||
CHECK_FALSE(m.contains(12));
|
||||
CHECK(m.contains(13));
|
||||
CHECK(m.size() == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("rehash does not lose entries")
|
||||
{
|
||||
TestMap m;
|
||||
for (int k = 0; k < 16; ++k)
|
||||
m.put(k, "v" + std::to_string(k));
|
||||
|
||||
m.reserve(64);
|
||||
m.rehash(32);
|
||||
|
||||
for (int k = 0; k < 16; ++k)
|
||||
{
|
||||
auto* v = m.get(k);
|
||||
REQUIRE(v != nullptr);
|
||||
CHECK(*v == "v" + std::to_string(k));
|
||||
}
|
||||
|
||||
CHECK(m.eraseBefore(8) == 8);
|
||||
for (int k = 0; k < 8; ++k)
|
||||
CHECK_FALSE(m.contains(k));
|
||||
for (int k = 8; k < 16; ++k)
|
||||
CHECK(m.contains(k));
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
@@ -1,2 +0,0 @@
|
||||
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
|
||||
#include <doctest/doctest.h>
|
||||
@@ -346,9 +346,7 @@ RCLConsensus::Adaptor::onClose(
|
||||
prevLedger, validations, initialSet, j_);
|
||||
}
|
||||
}
|
||||
else if (
|
||||
prevLedger->isVotingLedger() &&
|
||||
prevLedger->rules().enabled(featureNegativeUNL))
|
||||
else if (prevLedger->isVotingLedger())
|
||||
{
|
||||
// previous ledger was a voting ledger,
|
||||
// so the current consensus session is for a flag ledger,
|
||||
@@ -1015,8 +1013,7 @@ RCLConsensus::Adaptor::preStartRound(
|
||||
inboundTransactions_.newRound(prevLgr.seq());
|
||||
|
||||
// Notify NegativeUNLVote that new validators are added
|
||||
if (prevLgr.ledger_->rules().enabled(featureNegativeUNL) &&
|
||||
!nowTrusted.empty())
|
||||
if (!nowTrusted.empty())
|
||||
nUnlVote_.newValidators(prevLgr.seq() + 1, nowTrusted);
|
||||
|
||||
// propose only if we're in sync with the network (and validating)
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// FIXME: Need to clean up ledgers by index at some point
|
||||
|
||||
LedgerHistory::LedgerHistory(
|
||||
beast::insight::Collector::ptr const& collector,
|
||||
Application& app)
|
||||
@@ -26,7 +28,6 @@ LedgerHistory::LedgerHistory(
|
||||
std::chrono::minutes{5},
|
||||
stopwatch(),
|
||||
app_.journal("TaggedCache"))
|
||||
, mLedgersByIndex(512)
|
||||
, j_(app.journal("LedgerHistory"))
|
||||
{
|
||||
}
|
||||
@@ -54,18 +55,22 @@ LedgerHistory::insert(
|
||||
LedgerHash
|
||||
LedgerHistory::getLedgerHash(LedgerIndex index)
|
||||
{
|
||||
if (auto p = mLedgersByIndex.get(index))
|
||||
return *p;
|
||||
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
|
||||
return it->second;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger const>
|
||||
LedgerHistory::getLedgerBySeq(LedgerIndex index)
|
||||
{
|
||||
if (auto p = mLedgersByIndex.get(index))
|
||||
{
|
||||
uint256 const hash = *p;
|
||||
return getLedgerByHash(hash);
|
||||
auto it = mLedgersByIndex.find(index);
|
||||
|
||||
if (it != mLedgersByIndex.end())
|
||||
{
|
||||
uint256 hash = it->second;
|
||||
return getLedgerByHash(hash);
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Ledger const> ret = loadByIndex(index, app_);
|
||||
@@ -79,6 +84,7 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
|
||||
|
||||
{
|
||||
// Add this ledger to the local tracking by index
|
||||
|
||||
XRPL_ASSERT(
|
||||
ret->isImmutable(),
|
||||
"ripple::LedgerHistory::getLedgerBySeq : immutable result ledger");
|
||||
@@ -500,13 +506,12 @@ LedgerHistory::validatedLedger(
|
||||
bool
|
||||
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
|
||||
{
|
||||
if (auto cur = mLedgersByIndex.get(ledgerIndex))
|
||||
auto ledger = m_ledgers_by_hash.fetch(ledgerHash);
|
||||
auto it = mLedgersByIndex.find(ledgerIndex);
|
||||
if (ledger && (it != mLedgersByIndex.end()) && (it->second != ledgerHash))
|
||||
{
|
||||
if (*cur != ledgerHash)
|
||||
{
|
||||
mLedgersByIndex.put(ledgerIndex, ledgerHash);
|
||||
return false;
|
||||
}
|
||||
it->second = ledgerHash;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@@ -514,24 +519,12 @@ LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
|
||||
void
|
||||
LedgerHistory::clearLedgerCachePrior(LedgerIndex seq)
|
||||
{
|
||||
std::size_t hashesCleared = 0;
|
||||
for (LedgerHash it : m_ledgers_by_hash.getKeys())
|
||||
{
|
||||
auto const ledger = getLedgerByHash(it);
|
||||
if (!ledger || ledger->info().seq < seq)
|
||||
{
|
||||
m_ledgers_by_hash.del(it, false);
|
||||
++hashesCleared;
|
||||
}
|
||||
}
|
||||
JLOG(j_.debug()) << "LedgersByHash: cleared " << hashesCleared
|
||||
<< " entries before seq " << seq << " (total now "
|
||||
<< m_ledgers_by_hash.size() << ")";
|
||||
|
||||
std::size_t const indexesCleared = mLedgersByIndex.eraseBefore(seq);
|
||||
JLOG(j_.debug()) << "LedgerIndexMap: cleared " << indexesCleared
|
||||
<< " index entries before seq " << seq << " (total now "
|
||||
<< mLedgersByIndex.size() << ")";
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
#include <xrpld/app/main/Application.h>
|
||||
|
||||
#include <xrpl/beast/insight/Collector.h>
|
||||
#include <xrpl/ledger/LedgerIndexMap.h>
|
||||
#include <xrpl/protocol/RippleLedgerHash.h>
|
||||
|
||||
#include <optional>
|
||||
@@ -131,8 +130,7 @@ private:
|
||||
ConsensusValidated m_consensus_validated;
|
||||
|
||||
// Maps ledger indexes to the corresponding hash.
|
||||
ripple::LedgerIndexMap<LedgerIndex, LedgerHash>
|
||||
mLedgersByIndex; // validated ledgers
|
||||
std::map<LedgerIndex, LedgerHash> mLedgersByIndex; // validated ledgers
|
||||
|
||||
beast::Journal j_;
|
||||
};
|
||||
|
||||
@@ -28,7 +28,7 @@ buildLedgerImpl(
|
||||
{
|
||||
auto built = std::make_shared<Ledger>(*parent, closeTime);
|
||||
|
||||
if (built->isFlagLedger() && built->rules().enabled(featureNegativeUNL))
|
||||
if (built->isFlagLedger())
|
||||
{
|
||||
built->updateNegativeUNL();
|
||||
}
|
||||
|
||||
@@ -2063,8 +2063,7 @@ NetworkOPsImp::beginConsensus(
|
||||
"ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
|
||||
"hash");
|
||||
|
||||
if (prevLedger->rules().enabled(featureNegativeUNL))
|
||||
app_.validators().setNegativeUNL(prevLedger->negativeUNL());
|
||||
app_.validators().setNegativeUNL(prevLedger->negativeUNL());
|
||||
TrustChanges const changes = app_.validators().updateTrusted(
|
||||
app_.getValidations().getCurrentNodeIDs(),
|
||||
closingInfo.parentCloseTime,
|
||||
|
||||
@@ -51,13 +51,6 @@ Transactor::invokePreflight<Change>(PreflightContext const& ctx)
|
||||
return temBAD_SEQUENCE;
|
||||
}
|
||||
|
||||
if (ctx.tx.getTxnType() == ttUNL_MODIFY &&
|
||||
!ctx.rules.enabled(featureNegativeUNL))
|
||||
{
|
||||
JLOG(ctx.j.warn()) << "Change: NegativeUNL not enabled";
|
||||
return temDISABLED;
|
||||
}
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -79,13 +79,6 @@ VaultCreate::preflight(PreflightContext const& ctx)
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
XRPAmount
|
||||
VaultCreate::calculateBaseFee(ReadView const& view, STTx const& tx)
|
||||
{
|
||||
// One reserve increment is typically much greater than one base fee.
|
||||
return calculateOwnerReserveFee(view, tx);
|
||||
}
|
||||
|
||||
TER
|
||||
VaultCreate::preclaim(PreclaimContext const& ctx)
|
||||
{
|
||||
@@ -142,8 +135,9 @@ VaultCreate::doApply()
|
||||
|
||||
if (auto ter = dirLink(view(), account_, vault))
|
||||
return ter;
|
||||
adjustOwnerCount(view(), owner, 1, j_);
|
||||
auto ownerCount = owner->at(sfOwnerCount);
|
||||
// We will create Vault and PseudoAccount, hence increase OwnerCount by 2
|
||||
adjustOwnerCount(view(), owner, 2, j_);
|
||||
auto const ownerCount = owner->at(sfOwnerCount);
|
||||
if (mPriorBalance < view().fees().accountReserve(ownerCount))
|
||||
return tecINSUFFICIENT_RESERVE;
|
||||
|
||||
|
||||
@@ -23,9 +23,6 @@ public:
|
||||
static NotTEC
|
||||
preflight(PreflightContext const& ctx);
|
||||
|
||||
static XRPAmount
|
||||
calculateBaseFee(ReadView const& view, STTx const& tx);
|
||||
|
||||
static TER
|
||||
preclaim(PreclaimContext const& ctx);
|
||||
|
||||
|
||||
@@ -146,7 +146,35 @@ VaultDelete::doApply()
|
||||
return tecHAS_OBLIGATIONS; // LCOV_EXCL_LINE
|
||||
|
||||
// Destroy the pseudo-account.
|
||||
view().erase(view().peek(keylet::account(pseudoID)));
|
||||
auto vaultPseudoSLE = view().peek(keylet::account(pseudoID));
|
||||
if (!vaultPseudoSLE || vaultPseudoSLE->at(~sfVaultID) != vault->key())
|
||||
return tefBAD_LEDGER; // LCOV_EXCL_LINE
|
||||
|
||||
// Making the payment and removing the empty holding should have deleted any
|
||||
// obligations associated with the vault or vault pseudo-account.
|
||||
if (*vaultPseudoSLE->at(sfBalance))
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) << "VaultDelete: pseudo-account has a balance";
|
||||
return tecHAS_OBLIGATIONS;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
if (vaultPseudoSLE->at(sfOwnerCount) != 0)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) << "VaultDelete: pseudo-account still owns objects";
|
||||
return tecHAS_OBLIGATIONS;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
if (view().exists(keylet::ownerDir(pseudoID)))
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) << "VaultDelete: pseudo-account has a directory";
|
||||
return tecHAS_OBLIGATIONS;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
view().erase(vaultPseudoSLE);
|
||||
|
||||
// Remove the vault from its owner's directory.
|
||||
auto const ownerID = vault->at(sfOwner);
|
||||
@@ -170,7 +198,9 @@ VaultDelete::doApply()
|
||||
return tefBAD_LEDGER;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
adjustOwnerCount(view(), owner, -1, j_);
|
||||
|
||||
// We are destroying Vault and PseudoAccount, hence decrease by 2
|
||||
adjustOwnerCount(view(), owner, -2, j_);
|
||||
|
||||
// Destroy the vault.
|
||||
view().erase(vault);
|
||||
|
||||
@@ -95,7 +95,7 @@ getCountsJson(Application& app, int minObjectCount)
|
||||
ret[jss::treenode_cache_size] =
|
||||
app.getNodeFamily().getTreeNodeCache()->getCacheSize();
|
||||
ret[jss::treenode_track_size] =
|
||||
app.getNodeFamily().getTreeNodeCache()->getTrackSize();
|
||||
static_cast<int>(app.getNodeFamily().getTreeNodeCache()->size());
|
||||
|
||||
std::string uptime;
|
||||
auto s = UptimeClock::now();
|
||||
|
||||
Reference in New Issue
Block a user