mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-19 02:25:52 +00:00
Compare commits
4 Commits
vlntb/tagg
...
ximinez/te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e4b18fd92b | ||
|
|
2bf218344b | ||
|
|
88e7ceef6a | ||
|
|
0e6b981a56 |
4
.github/scripts/strategy-matrix/generate.py
vendored
4
.github/scripts/strategy-matrix/generate.py
vendored
@@ -130,8 +130,8 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64':
|
||||
continue
|
||||
|
||||
# We skip all clang 20+ on arm64 due to Boost build error.
|
||||
if f'{os['compiler_name']}-{os['compiler_version']}' in ['clang-20', 'clang-21'] and architecture['platform'] == 'linux/arm64':
|
||||
# We skip all clang-20 on arm64 due to boost 1.86 build error
|
||||
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64':
|
||||
continue
|
||||
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no
|
||||
|
||||
46
.github/scripts/strategy-matrix/linux.json
vendored
46
.github/scripts/strategy-matrix/linux.json
vendored
@@ -15,91 +15,63 @@
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "16",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "17",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "18",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "19",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20",
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15",
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20",
|
||||
"image_sha": "0525eae"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "21",
|
||||
"image_sha": "0525eae"
|
||||
"image_sha": "e1782cd"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define XRPL_BASICS_SHAMAP_HASH_H_INCLUDED
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/partitioned_unordered_map.h>
|
||||
|
||||
#include <ostream>
|
||||
|
||||
|
||||
@@ -71,6 +71,9 @@ public:
|
||||
int
|
||||
getCacheSize() const;
|
||||
|
||||
int
|
||||
getTrackSize() const;
|
||||
|
||||
float
|
||||
getHitRate();
|
||||
|
||||
@@ -148,6 +151,9 @@ public:
|
||||
bool
|
||||
retrieve(key_type const& key, T& data);
|
||||
|
||||
mutex_type&
|
||||
peekMutex();
|
||||
|
||||
std::vector<key_type>
|
||||
getKeys() const;
|
||||
|
||||
@@ -168,14 +174,11 @@ public:
|
||||
|
||||
private:
|
||||
SharedPointerType
|
||||
initialFetch(key_type const& key);
|
||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
|
||||
|
||||
void
|
||||
collect_metrics();
|
||||
|
||||
Mutex&
|
||||
lockPartition(key_type const& key) const;
|
||||
|
||||
private:
|
||||
struct Stats
|
||||
{
|
||||
@@ -278,8 +281,8 @@ private:
|
||||
[[maybe_unused]] clock_type::time_point const& now,
|
||||
typename KeyValueCacheType::map_type& partition,
|
||||
SweptPointersVector& stuffToSweep,
|
||||
std::atomic<int>& allRemoval,
|
||||
Mutex& partitionLock);
|
||||
std::atomic<int>& allRemovals,
|
||||
std::lock_guard<std::recursive_mutex> const&);
|
||||
|
||||
[[nodiscard]] std::thread
|
||||
sweepHelper(
|
||||
@@ -288,12 +291,14 @@ private:
|
||||
typename KeyOnlyCacheType::map_type& partition,
|
||||
SweptPointersVector&,
|
||||
std::atomic<int>& allRemovals,
|
||||
Mutex& partitionLock);
|
||||
std::lock_guard<std::recursive_mutex> const&);
|
||||
|
||||
beast::Journal m_journal;
|
||||
clock_type& m_clock;
|
||||
Stats m_stats;
|
||||
|
||||
mutex_type mutable m_mutex;
|
||||
|
||||
// Used for logging
|
||||
std::string m_name;
|
||||
|
||||
@@ -304,11 +309,10 @@ private:
|
||||
clock_type::duration const m_target_age;
|
||||
|
||||
// Number of items cached
|
||||
std::atomic<int> m_cache_count;
|
||||
int m_cache_count;
|
||||
cache_type m_cache; // Hold strong reference to recent objects
|
||||
std::atomic<std::uint64_t> m_hits;
|
||||
std::atomic<std::uint64_t> m_misses;
|
||||
mutable std::vector<mutex_type> partitionLocks_;
|
||||
std::uint64_t m_hits;
|
||||
std::uint64_t m_misses;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
#include <xrpl/basics/IntrusivePointer.ipp>
|
||||
#include <xrpl/basics/TaggedCache.h>
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -42,7 +41,6 @@ inline TaggedCache<
|
||||
, m_hits(0)
|
||||
, m_misses(0)
|
||||
{
|
||||
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -88,13 +86,8 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::size() const
|
||||
{
|
||||
std::size_t totalSize = 0;
|
||||
for (size_t i = 0; i < partitionLocks_.size(); ++i)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(partitionLocks_[i]);
|
||||
totalSize += m_cache.map()[i].size();
|
||||
}
|
||||
return totalSize;
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache.size();
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -117,7 +110,32 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::getCacheSize() const
|
||||
{
|
||||
return m_cache_count.load(std::memory_order_relaxed);
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache_count;
|
||||
}
|
||||
|
||||
template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache,
|
||||
class SharedWeakUnionPointer,
|
||||
class SharedPointerType,
|
||||
class Hash,
|
||||
class KeyEqual,
|
||||
class Mutex>
|
||||
inline int
|
||||
TaggedCache<
|
||||
Key,
|
||||
T,
|
||||
IsKeyCache,
|
||||
SharedWeakUnionPointer,
|
||||
SharedPointerType,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
Mutex>::getTrackSize() const
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
return m_cache.size();
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -140,10 +158,9 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::getHitRate()
|
||||
{
|
||||
auto const hits = m_hits.load(std::memory_order_relaxed);
|
||||
auto const misses = m_misses.load(std::memory_order_relaxed);
|
||||
float const total = float(hits + misses);
|
||||
return hits * (100.0f / std::max(1.0f, total));
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const total = static_cast<float>(m_hits + m_misses);
|
||||
return m_hits * (100.0f / std::max(1.0f, total));
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -166,12 +183,9 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::clear()
|
||||
{
|
||||
for (auto& mutex : partitionLocks_)
|
||||
mutex.lock();
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_cache.clear();
|
||||
for (auto& mutex : partitionLocks_)
|
||||
mutex.unlock();
|
||||
m_cache_count.store(0, std::memory_order_relaxed);
|
||||
m_cache_count = 0;
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -194,9 +208,11 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::reset()
|
||||
{
|
||||
clear();
|
||||
m_hits.store(0, std::memory_order_relaxed);
|
||||
m_misses.store(0, std::memory_order_relaxed);
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_cache.clear();
|
||||
m_cache_count = 0;
|
||||
m_hits = 0;
|
||||
m_misses = 0;
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -220,7 +236,7 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::touch_if_exists(KeyComparable const& key)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const iter(m_cache.find(key));
|
||||
if (iter == m_cache.end())
|
||||
{
|
||||
@@ -262,6 +278,8 @@ TaggedCache<
|
||||
|
||||
auto const start = std::chrono::steady_clock::now();
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
if (m_target_size == 0 ||
|
||||
(static_cast<int>(m_cache.size()) <= m_target_size))
|
||||
{
|
||||
@@ -293,13 +311,12 @@ TaggedCache<
|
||||
m_cache.map()[p],
|
||||
allStuffToSweep[p],
|
||||
allRemovals,
|
||||
partitionLocks_[p]));
|
||||
lock));
|
||||
}
|
||||
for (std::thread& worker : workers)
|
||||
worker.join();
|
||||
|
||||
int removals = allRemovals.load(std::memory_order_relaxed);
|
||||
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
|
||||
m_cache_count -= allRemovals;
|
||||
}
|
||||
// At this point allStuffToSweep will go out of scope outside the lock
|
||||
// and decrement the reference count on each strong pointer.
|
||||
@@ -333,8 +350,7 @@ TaggedCache<
|
||||
{
|
||||
// Remove from cache, if !valid, remove from map too. Returns true if
|
||||
// removed from cache
|
||||
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
auto cit = m_cache.find(key);
|
||||
|
||||
@@ -347,7 +363,7 @@ TaggedCache<
|
||||
|
||||
if (entry.isCached())
|
||||
{
|
||||
m_cache_count.fetch_sub(1, std::memory_order_relaxed);
|
||||
--m_cache_count;
|
||||
entry.ptr.convertToWeak();
|
||||
ret = true;
|
||||
}
|
||||
@@ -385,16 +401,17 @@ TaggedCache<
|
||||
{
|
||||
// Return canonical value, store if needed, refresh in cache
|
||||
// Return values: true=we had the data already
|
||||
std::lock_guard lock(m_mutex);
|
||||
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
auto cit = m_cache.find(key);
|
||||
|
||||
if (cit == m_cache.end())
|
||||
{
|
||||
m_cache.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
std::forward_as_tuple(m_clock.now(), data));
|
||||
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||
++m_cache_count;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -443,12 +460,12 @@ TaggedCache<
|
||||
data = cachedData;
|
||||
}
|
||||
|
||||
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||
++m_cache_count;
|
||||
return true;
|
||||
}
|
||||
|
||||
entry.ptr = data;
|
||||
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||
++m_cache_count;
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -524,11 +541,10 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::fetch(key_type const& key)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
|
||||
auto ret = initialFetch(key);
|
||||
std::lock_guard<mutex_type> l(m_mutex);
|
||||
auto ret = initialFetch(key, l);
|
||||
if (!ret)
|
||||
m_misses.fetch_add(1, std::memory_order_relaxed);
|
||||
++m_misses;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -592,8 +608,8 @@ TaggedCache<
|
||||
Mutex>::insert(key_type const& key)
|
||||
-> std::enable_if_t<IsKeyCache, ReturnType>
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
clock_type::time_point const now(m_clock.now());
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
auto [it, inserted] = m_cache.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
@@ -633,6 +649,29 @@ TaggedCache<
|
||||
return true;
|
||||
}
|
||||
|
||||
template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache,
|
||||
class SharedWeakUnionPointer,
|
||||
class SharedPointerType,
|
||||
class Hash,
|
||||
class KeyEqual,
|
||||
class Mutex>
|
||||
inline auto
|
||||
TaggedCache<
|
||||
Key,
|
||||
T,
|
||||
IsKeyCache,
|
||||
SharedWeakUnionPointer,
|
||||
SharedPointerType,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
Mutex>::peekMutex() -> mutex_type&
|
||||
{
|
||||
return m_mutex;
|
||||
}
|
||||
|
||||
template <
|
||||
class Key,
|
||||
class T,
|
||||
@@ -656,13 +695,10 @@ TaggedCache<
|
||||
std::vector<key_type> v;
|
||||
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
v.reserve(m_cache.size());
|
||||
for (std::size_t i = 0; i < partitionLocks_.size(); ++i)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(partitionLocks_[i]);
|
||||
for (auto const& entry : m_cache.map()[i])
|
||||
v.push_back(entry.first);
|
||||
}
|
||||
for (auto const& _ : m_cache)
|
||||
v.push_back(_.first);
|
||||
}
|
||||
|
||||
return v;
|
||||
@@ -688,12 +724,11 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::rate() const
|
||||
{
|
||||
auto const hits = m_hits.load(std::memory_order_relaxed);
|
||||
auto const misses = m_misses.load(std::memory_order_relaxed);
|
||||
auto const tot = hits + misses;
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const tot = m_hits + m_misses;
|
||||
if (tot == 0)
|
||||
return 0.0;
|
||||
return double(hits) / tot;
|
||||
return 0;
|
||||
return double(m_hits) / tot;
|
||||
}
|
||||
|
||||
template <
|
||||
@@ -717,16 +752,18 @@ TaggedCache<
|
||||
KeyEqual,
|
||||
Mutex>::fetch(key_type const& digest, Handler const& h)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(lockPartition(digest));
|
||||
|
||||
if (auto ret = initialFetch(digest))
|
||||
return ret;
|
||||
{
|
||||
std::lock_guard l(m_mutex);
|
||||
if (auto ret = initialFetch(digest, l))
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto sle = h();
|
||||
if (!sle)
|
||||
return {};
|
||||
|
||||
m_misses.fetch_add(1, std::memory_order_relaxed);
|
||||
std::lock_guard l(m_mutex);
|
||||
++m_misses;
|
||||
auto const [it, inserted] =
|
||||
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
|
||||
if (!inserted)
|
||||
@@ -753,10 +790,9 @@ TaggedCache<
|
||||
SharedPointerType,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
Mutex>::initialFetch(key_type const& key)
|
||||
Mutex>::
|
||||
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(lockPartition(key));
|
||||
|
||||
auto cit = m_cache.find(key);
|
||||
if (cit == m_cache.end())
|
||||
return {};
|
||||
@@ -764,7 +800,7 @@ TaggedCache<
|
||||
Entry& entry = cit->second;
|
||||
if (entry.isCached())
|
||||
{
|
||||
m_hits.fetch_add(1, std::memory_order_relaxed);
|
||||
++m_hits;
|
||||
entry.touch(m_clock.now());
|
||||
return entry.ptr.getStrong();
|
||||
}
|
||||
@@ -772,13 +808,12 @@ TaggedCache<
|
||||
if (entry.isCached())
|
||||
{
|
||||
// independent of cache size, so not counted as a hit
|
||||
m_cache_count.fetch_add(1, std::memory_order_relaxed);
|
||||
++m_cache_count;
|
||||
entry.touch(m_clock.now());
|
||||
return entry.ptr.getStrong();
|
||||
}
|
||||
|
||||
m_cache.erase(cit);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -807,11 +842,10 @@ TaggedCache<
|
||||
{
|
||||
beast::insight::Gauge::value_type hit_rate(0);
|
||||
{
|
||||
auto const hits = m_hits.load(std::memory_order_relaxed);
|
||||
auto const misses = m_misses.load(std::memory_order_relaxed);
|
||||
auto const total = hits + misses;
|
||||
std::lock_guard lock(m_mutex);
|
||||
auto const total(m_hits + m_misses);
|
||||
if (total != 0)
|
||||
hit_rate = (hits * 100) / total;
|
||||
hit_rate = (m_hits * 100) / total;
|
||||
}
|
||||
m_stats.hit_rate.set(hit_rate);
|
||||
}
|
||||
@@ -842,16 +876,12 @@ TaggedCache<
|
||||
typename KeyValueCacheType::map_type& partition,
|
||||
SweptPointersVector& stuffToSweep,
|
||||
std::atomic<int>& allRemovals,
|
||||
Mutex& partitionLock)
|
||||
std::lock_guard<std::recursive_mutex> const&)
|
||||
{
|
||||
return std::thread([&, this]() {
|
||||
beast::setCurrentThreadName("sweep-KVCache");
|
||||
|
||||
int cacheRemovals = 0;
|
||||
int mapRemovals = 0;
|
||||
|
||||
std::lock_guard<Mutex> lock(partitionLock);
|
||||
|
||||
// Keep references to all the stuff we sweep
|
||||
// so that we can destroy them outside the lock.
|
||||
stuffToSweep.reserve(partition.size());
|
||||
@@ -935,16 +965,12 @@ TaggedCache<
|
||||
typename KeyOnlyCacheType::map_type& partition,
|
||||
SweptPointersVector&,
|
||||
std::atomic<int>& allRemovals,
|
||||
Mutex& partitionLock)
|
||||
std::lock_guard<std::recursive_mutex> const&)
|
||||
{
|
||||
return std::thread([&, this]() {
|
||||
beast::setCurrentThreadName("sweep-KCache");
|
||||
|
||||
int cacheRemovals = 0;
|
||||
int mapRemovals = 0;
|
||||
|
||||
std::lock_guard<Mutex> lock(partitionLock);
|
||||
|
||||
// Keep references to all the stuff we sweep
|
||||
// so that we can destroy them outside the lock.
|
||||
{
|
||||
@@ -979,29 +1005,6 @@ TaggedCache<
|
||||
});
|
||||
}
|
||||
|
||||
template <
|
||||
class Key,
|
||||
class T,
|
||||
bool IsKeyCache,
|
||||
class SharedWeakUnionPointer,
|
||||
class SharedPointerType,
|
||||
class Hash,
|
||||
class KeyEqual,
|
||||
class Mutex>
|
||||
inline Mutex&
|
||||
TaggedCache<
|
||||
Key,
|
||||
T,
|
||||
IsKeyCache,
|
||||
SharedWeakUnionPointer,
|
||||
SharedPointerType,
|
||||
Hash,
|
||||
KeyEqual,
|
||||
Mutex>::lockPartition(key_type const& key) const
|
||||
{
|
||||
return partitionLocks_[m_cache.partition_index(key)];
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
|
||||
@@ -258,12 +258,6 @@ public:
|
||||
return map_;
|
||||
}
|
||||
|
||||
partition_map_type const&
|
||||
map() const
|
||||
{
|
||||
return map_;
|
||||
}
|
||||
|
||||
iterator
|
||||
begin()
|
||||
{
|
||||
@@ -308,12 +302,6 @@ public:
|
||||
return cend();
|
||||
}
|
||||
|
||||
std::size_t
|
||||
partition_index(key_type const& key) const
|
||||
{
|
||||
return partitioner(key);
|
||||
}
|
||||
|
||||
private:
|
||||
template <class T>
|
||||
void
|
||||
|
||||
@@ -66,6 +66,7 @@ XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo
|
||||
XRPL_FEATURE(DisallowIncoming, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(FlowSortStrands, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(NegativeUNL, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(RequireFullyCanonicalSig, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(DeletableAccounts, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes)
|
||||
@@ -129,7 +130,6 @@ XRPL_RETIRE_FEATURE(HardenedValidations)
|
||||
XRPL_RETIRE_FEATURE(ImmediateOfferKilled)
|
||||
XRPL_RETIRE_FEATURE(MultiSign)
|
||||
XRPL_RETIRE_FEATURE(MultiSignReserve)
|
||||
XRPL_RETIRE_FEATURE(NegativeUNL)
|
||||
XRPL_RETIRE_FEATURE(NonFungibleTokensV1_1)
|
||||
XRPL_RETIRE_FEATURE(PayChan)
|
||||
XRPL_RETIRE_FEATURE(SortedDirectories)
|
||||
|
||||
@@ -39,10 +39,10 @@ public:
|
||||
// Insert an item, retrieve it, and age it so it gets purged.
|
||||
{
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
BEAST_EXPECT(!c.insert(1, "one"));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
|
||||
{
|
||||
std::string s;
|
||||
@@ -53,7 +53,7 @@ public:
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
}
|
||||
|
||||
// Insert an item, maintain a strong pointer, age it, and
|
||||
@@ -61,7 +61,7 @@ public:
|
||||
{
|
||||
BEAST_EXPECT(!c.insert(2, "two"));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
|
||||
{
|
||||
auto p = c.fetch(2);
|
||||
@@ -69,14 +69,14 @@ public:
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
}
|
||||
|
||||
// Make sure its gone now that our reference is gone
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
}
|
||||
|
||||
// Insert the same key/value pair and make sure we get the same result
|
||||
@@ -92,7 +92,7 @@ public:
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
}
|
||||
|
||||
// Put an object in but keep a strong pointer to it, advance the clock a
|
||||
@@ -102,24 +102,24 @@ public:
|
||||
// Put an object in
|
||||
BEAST_EXPECT(!c.insert(4, "four"));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
|
||||
{
|
||||
// Keep a strong pointer to it
|
||||
auto const p1 = c.fetch(4);
|
||||
BEAST_EXPECT(p1 != nullptr);
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
// Advance the clock a lot
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
// Canonicalize a new object with the same key
|
||||
auto p2 = std::make_shared<std::string>("four");
|
||||
BEAST_EXPECT(c.canonicalize_replace_client(4, p2));
|
||||
BEAST_EXPECT(c.getCacheSize() == 1);
|
||||
BEAST_EXPECT(c.size() == 1);
|
||||
BEAST_EXPECT(c.getTrackSize() == 1);
|
||||
// Make sure we get the original object
|
||||
BEAST_EXPECT(p1.get() == p2.get());
|
||||
}
|
||||
@@ -127,7 +127,7 @@ public:
|
||||
++clock;
|
||||
c.sweep();
|
||||
BEAST_EXPECT(c.getCacheSize() == 0);
|
||||
BEAST_EXPECT(c.size() == 0);
|
||||
BEAST_EXPECT(c.getTrackSize() == 0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -15,6 +15,7 @@ namespace test {
|
||||
/*
|
||||
* This file implements the following negative UNL related tests:
|
||||
* -- test filling and applying ttUNL_MODIFY Tx and ledger update
|
||||
* -- test ttUNL_MODIFY Tx failure without featureNegativeUNL amendment
|
||||
* -- test the NegativeUNLVote class. The test cases are split to multiple
|
||||
* test classes to allow parallel execution.
|
||||
* -- test the negativeUNLFilter function
|
||||
@@ -207,7 +208,7 @@ class NegativeUNL_test : public beast::unit_test::suite
|
||||
|
||||
testcase("Create UNLModify Tx and apply to ledgers");
|
||||
|
||||
jtx::Env env(*this, jtx::testable_amendments());
|
||||
jtx::Env env(*this, jtx::testable_amendments() | featureNegativeUNL);
|
||||
std::vector<PublicKey> publicKeys = createPublicKeys(3);
|
||||
// genesis ledger
|
||||
auto l = std::make_shared<Ledger>(
|
||||
@@ -215,6 +216,7 @@ class NegativeUNL_test : public beast::unit_test::suite
|
||||
env.app().config(),
|
||||
std::vector<uint256>{},
|
||||
env.app().getNodeFamily());
|
||||
BEAST_EXPECT(l->rules().enabled(featureNegativeUNL));
|
||||
|
||||
// Record the public keys and ledger sequences of expected negative UNL
|
||||
// validators when we build the ledger history
|
||||
@@ -498,6 +500,44 @@ class NegativeUNL_test : public beast::unit_test::suite
|
||||
}
|
||||
};
|
||||
|
||||
class NegativeUNLNoAmendment_test : public beast::unit_test::suite
|
||||
{
|
||||
void
|
||||
testNegativeUNLNoAmendment()
|
||||
{
|
||||
testcase("No negative UNL amendment");
|
||||
|
||||
jtx::Env env(*this, jtx::testable_amendments() - featureNegativeUNL);
|
||||
std::vector<PublicKey> publicKeys = createPublicKeys(1);
|
||||
// genesis ledger
|
||||
auto l = std::make_shared<Ledger>(
|
||||
create_genesis,
|
||||
env.app().config(),
|
||||
std::vector<uint256>{},
|
||||
env.app().getNodeFamily());
|
||||
BEAST_EXPECT(!l->rules().enabled(featureNegativeUNL));
|
||||
|
||||
// generate more ledgers
|
||||
for (auto i = 0; i < 256 - 1; ++i)
|
||||
{
|
||||
l = std::make_shared<Ledger>(
|
||||
*l, env.app().timeKeeper().closeTime());
|
||||
}
|
||||
BEAST_EXPECT(l->seq() == 256);
|
||||
auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]);
|
||||
OpenView accum(&*l);
|
||||
BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, false));
|
||||
accum.apply(*l);
|
||||
BEAST_EXPECT(negUnlSizeTest(l, 0, false, false));
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testNegativeUNLNoAmendment();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Utility class for creating validators and ledger history
|
||||
*/
|
||||
@@ -523,7 +563,7 @@ struct NetworkHistory
|
||||
};
|
||||
|
||||
NetworkHistory(beast::unit_test::suite& suite, Parameter const& p)
|
||||
: env(suite, jtx::testable_amendments())
|
||||
: env(suite, jtx::testable_amendments() | featureNegativeUNL)
|
||||
, param(p)
|
||||
, validations(env.app().getValidations())
|
||||
{
|
||||
@@ -1827,6 +1867,7 @@ class NegativeUNLVoteFilterValidations_test : public beast::unit_test::suite
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(NegativeUNL, consensus, ripple);
|
||||
BEAST_DEFINE_TESTSUITE(NegativeUNLNoAmendment, consensus, ripple);
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(NegativeUNLVoteInternal, consensus, ripple);
|
||||
BEAST_DEFINE_TESTSUITE_MANUAL(NegativeUNLVoteScoreTable, consensus, ripple);
|
||||
|
||||
@@ -124,8 +124,7 @@ class Feature_test : public beast::unit_test::suite
|
||||
featureToName(fixRemoveNFTokenAutoTrustLine) ==
|
||||
"fixRemoveNFTokenAutoTrustLine");
|
||||
BEAST_EXPECT(featureToName(featureFlow) == "Flow");
|
||||
BEAST_EXPECT(
|
||||
featureToName(featureDeletableAccounts) == "DeletableAccounts");
|
||||
BEAST_EXPECT(featureToName(featureNegativeUNL) == "NegativeUNL");
|
||||
BEAST_EXPECT(
|
||||
featureToName(fixIncludeKeyletFields) == "fixIncludeKeyletFields");
|
||||
BEAST_EXPECT(featureToName(featureTokenEscrow) == "TokenEscrow");
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <doctest/doctest.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <thread>
|
||||
|
||||
@@ -17,6 +18,40 @@ using namespace ripple;
|
||||
|
||||
namespace {
|
||||
|
||||
struct logger
|
||||
{
|
||||
std::string name;
|
||||
logger const* const parent = nullptr;
|
||||
static std::size_t depth;
|
||||
logger(std::string n) : name(n)
|
||||
{
|
||||
std::clog << indent() << name << " begin\n";
|
||||
++depth;
|
||||
}
|
||||
|
||||
logger(logger const& p, std::string n) : parent(&p), name(n)
|
||||
{
|
||||
std::clog << indent() << parent->name << " : " << name << " begin\n";
|
||||
++depth;
|
||||
}
|
||||
|
||||
~logger()
|
||||
{
|
||||
--depth;
|
||||
if (parent)
|
||||
std::clog << indent() << parent->name << " : " << name << " end\n";
|
||||
else
|
||||
std::clog << indent() << name << " end\n";
|
||||
}
|
||||
|
||||
std::string
|
||||
indent()
|
||||
{
|
||||
return std::string(depth, ' ');
|
||||
}
|
||||
};
|
||||
std::size_t logger::depth = 0;
|
||||
|
||||
// Simple HTTP server using Beast for testing
|
||||
class TestHTTPServer
|
||||
{
|
||||
@@ -35,6 +70,7 @@ private:
|
||||
public:
|
||||
TestHTTPServer() : acceptor_(ioc_), port_(0)
|
||||
{
|
||||
logger l("TestHTTPServer()");
|
||||
// Bind to any available port
|
||||
endpoint_ = {boost::asio::ip::tcp::v4(), 0};
|
||||
acceptor_.open(endpoint_.protocol());
|
||||
@@ -50,6 +86,7 @@ public:
|
||||
|
||||
~TestHTTPServer()
|
||||
{
|
||||
logger l("~TestHTTPServer()");
|
||||
stop();
|
||||
}
|
||||
|
||||
@@ -87,6 +124,7 @@ private:
|
||||
void
|
||||
stop()
|
||||
{
|
||||
logger l("TestHTTPServer::stop");
|
||||
running_ = false;
|
||||
acceptor_.close();
|
||||
}
|
||||
@@ -94,6 +132,7 @@ private:
|
||||
void
|
||||
accept()
|
||||
{
|
||||
logger l("TestHTTPServer::accept");
|
||||
if (!running_)
|
||||
return;
|
||||
|
||||
@@ -115,31 +154,37 @@ private:
|
||||
void
|
||||
handleConnection(boost::asio::ip::tcp::socket socket)
|
||||
{
|
||||
logger l("TestHTTPServer::handleConnection");
|
||||
try
|
||||
{
|
||||
std::optional<logger> r(std::in_place, l, "read the http request");
|
||||
// Read the HTTP request
|
||||
boost::beast::flat_buffer buffer;
|
||||
boost::beast::http::request<boost::beast::http::string_body> req;
|
||||
boost::beast::http::read(socket, buffer, req);
|
||||
|
||||
// Create response
|
||||
r.emplace(l, "create response");
|
||||
boost::beast::http::response<boost::beast::http::string_body> res;
|
||||
res.version(req.version());
|
||||
res.result(status_code_);
|
||||
res.set(boost::beast::http::field::server, "TestServer");
|
||||
|
||||
// Add custom headers
|
||||
r.emplace(l, "add custom headers");
|
||||
for (auto const& [name, value] : custom_headers_)
|
||||
{
|
||||
res.set(name, value);
|
||||
}
|
||||
|
||||
// Set body and prepare payload first
|
||||
r.emplace(l, "set body and prepare payload");
|
||||
res.body() = response_body_;
|
||||
res.prepare_payload();
|
||||
|
||||
// Override Content-Length with custom headers after prepare_payload
|
||||
// This allows us to test case-insensitive header parsing
|
||||
r.emplace(l, "override content-length");
|
||||
for (auto const& [name, value] : custom_headers_)
|
||||
{
|
||||
if (boost::iequals(name, "Content-Length"))
|
||||
@@ -150,19 +195,25 @@ private:
|
||||
}
|
||||
|
||||
// Send response
|
||||
r.emplace(l, "send response");
|
||||
boost::beast::http::write(socket, res);
|
||||
|
||||
// Shutdown socket gracefully
|
||||
r.emplace(l, "shutdown socket");
|
||||
boost::system::error_code ec;
|
||||
socket.shutdown(boost::asio::ip::tcp::socket::shutdown_send, ec);
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
// Connection handling errors are expected
|
||||
logger c(l, "catch");
|
||||
}
|
||||
|
||||
if (running_)
|
||||
{
|
||||
logger r(l, "accept");
|
||||
accept();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -176,12 +227,16 @@ runHTTPTest(
|
||||
std::string& result_data,
|
||||
boost::system::error_code& result_error)
|
||||
{
|
||||
logger l("runHTTPTest");
|
||||
// Create a null journal for testing
|
||||
beast::Journal j{beast::Journal::getNullSink()};
|
||||
|
||||
std::optional<logger> r(std::in_place, l, "initializeSSLContext");
|
||||
|
||||
// Initialize HTTPClient SSL context
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
r.emplace(l, "HTTPClient::get");
|
||||
HTTPClient::get(
|
||||
false, // no SSL
|
||||
server.ioc(),
|
||||
@@ -206,6 +261,7 @@ runHTTPTest(
|
||||
while (!completed &&
|
||||
std::chrono::steady_clock::now() - start < std::chrono::seconds(10))
|
||||
{
|
||||
r.emplace(l, "ioc.run_one");
|
||||
if (server.ioc().run_one() == 0)
|
||||
{
|
||||
break;
|
||||
@@ -219,6 +275,8 @@ runHTTPTest(
|
||||
|
||||
TEST_CASE("HTTPClient case insensitive Content-Length")
|
||||
{
|
||||
logger l("HTTPClient case insensitive Content-Length");
|
||||
|
||||
// Test different cases of Content-Length header
|
||||
std::vector<std::string> header_cases = {
|
||||
"Content-Length", // Standard case
|
||||
@@ -230,6 +288,7 @@ TEST_CASE("HTTPClient case insensitive Content-Length")
|
||||
|
||||
for (auto const& header_name : header_cases)
|
||||
{
|
||||
logger h(l, header_name);
|
||||
TestHTTPServer server;
|
||||
std::string test_body = "Hello World!";
|
||||
server.setResponseBody(test_body);
|
||||
@@ -258,6 +317,7 @@ TEST_CASE("HTTPClient case insensitive Content-Length")
|
||||
|
||||
TEST_CASE("HTTPClient basic HTTP request")
|
||||
{
|
||||
logger l("HTTPClient basic HTTP request");
|
||||
TestHTTPServer server;
|
||||
std::string test_body = "Test response body";
|
||||
server.setResponseBody(test_body);
|
||||
@@ -279,6 +339,7 @@ TEST_CASE("HTTPClient basic HTTP request")
|
||||
|
||||
TEST_CASE("HTTPClient empty response")
|
||||
{
|
||||
logger l("HTTPClient empty response");
|
||||
TestHTTPServer server;
|
||||
server.setResponseBody(""); // Empty body
|
||||
server.setHeader("Content-Length", "0");
|
||||
@@ -299,6 +360,7 @@ TEST_CASE("HTTPClient empty response")
|
||||
|
||||
TEST_CASE("HTTPClient different status codes")
|
||||
{
|
||||
logger l("HTTPClient different status codes");
|
||||
std::vector<unsigned int> status_codes = {200, 404, 500};
|
||||
|
||||
for (auto status : status_codes)
|
||||
|
||||
@@ -346,7 +346,9 @@ RCLConsensus::Adaptor::onClose(
|
||||
prevLedger, validations, initialSet, j_);
|
||||
}
|
||||
}
|
||||
else if (prevLedger->isVotingLedger())
|
||||
else if (
|
||||
prevLedger->isVotingLedger() &&
|
||||
prevLedger->rules().enabled(featureNegativeUNL))
|
||||
{
|
||||
// previous ledger was a voting ledger,
|
||||
// so the current consensus session is for a flag ledger,
|
||||
@@ -1013,7 +1015,8 @@ RCLConsensus::Adaptor::preStartRound(
|
||||
inboundTransactions_.newRound(prevLgr.seq());
|
||||
|
||||
// Notify NegativeUNLVote that new validators are added
|
||||
if (!nowTrusted.empty())
|
||||
if (prevLgr.ledger_->rules().enabled(featureNegativeUNL) &&
|
||||
!nowTrusted.empty())
|
||||
nUnlVote_.newValidators(prevLgr.seq() + 1, nowTrusted);
|
||||
|
||||
// propose only if we're in sync with the network (and validating)
|
||||
|
||||
@@ -44,6 +44,8 @@ LedgerHistory::insert(
|
||||
ledger->stateMap().getHash().isNonZero(),
|
||||
"ripple::LedgerHistory::insert : nonzero hash");
|
||||
|
||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
||||
|
||||
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
|
||||
ledger->info().hash, ledger);
|
||||
if (validated)
|
||||
@@ -55,6 +57,7 @@ LedgerHistory::insert(
|
||||
LedgerHash
|
||||
LedgerHistory::getLedgerHash(LedgerIndex index)
|
||||
{
|
||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
||||
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
|
||||
return it->second;
|
||||
return {};
|
||||
@@ -64,11 +67,13 @@ std::shared_ptr<Ledger const>
|
||||
LedgerHistory::getLedgerBySeq(LedgerIndex index)
|
||||
{
|
||||
{
|
||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
||||
auto it = mLedgersByIndex.find(index);
|
||||
|
||||
if (it != mLedgersByIndex.end())
|
||||
{
|
||||
uint256 hash = it->second;
|
||||
sl.unlock();
|
||||
return getLedgerByHash(hash);
|
||||
}
|
||||
}
|
||||
@@ -84,6 +89,7 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
|
||||
|
||||
{
|
||||
// Add this ledger to the local tracking by index
|
||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
||||
|
||||
XRPL_ASSERT(
|
||||
ret->isImmutable(),
|
||||
@@ -433,6 +439,8 @@ LedgerHistory::builtLedger(
|
||||
XRPL_ASSERT(
|
||||
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
|
||||
|
||||
std::unique_lock sl(m_consensus_validated.peekMutex());
|
||||
|
||||
auto entry = std::make_shared<cv_entry>();
|
||||
m_consensus_validated.canonicalize_replace_client(index, entry);
|
||||
|
||||
@@ -473,6 +481,8 @@ LedgerHistory::validatedLedger(
|
||||
!hash.isZero(),
|
||||
"ripple::LedgerHistory::validatedLedger : nonzero hash");
|
||||
|
||||
std::unique_lock sl(m_consensus_validated.peekMutex());
|
||||
|
||||
auto entry = std::make_shared<cv_entry>();
|
||||
m_consensus_validated.canonicalize_replace_client(index, entry);
|
||||
|
||||
@@ -506,9 +516,10 @@ LedgerHistory::validatedLedger(
|
||||
bool
|
||||
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
|
||||
{
|
||||
auto ledger = m_ledgers_by_hash.fetch(ledgerHash);
|
||||
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
|
||||
auto it = mLedgersByIndex.find(ledgerIndex);
|
||||
if (ledger && (it != mLedgersByIndex.end()) && (it->second != ledgerHash))
|
||||
|
||||
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))
|
||||
{
|
||||
it->second = ledgerHash;
|
||||
return false;
|
||||
|
||||
@@ -28,7 +28,7 @@ buildLedgerImpl(
|
||||
{
|
||||
auto built = std::make_shared<Ledger>(*parent, closeTime);
|
||||
|
||||
if (built->isFlagLedger())
|
||||
if (built->isFlagLedger() && built->rules().enabled(featureNegativeUNL))
|
||||
{
|
||||
built->updateNegativeUNL();
|
||||
}
|
||||
|
||||
@@ -2063,7 +2063,8 @@ NetworkOPsImp::beginConsensus(
|
||||
"ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
|
||||
"hash");
|
||||
|
||||
app_.validators().setNegativeUNL(prevLedger->negativeUNL());
|
||||
if (prevLedger->rules().enabled(featureNegativeUNL))
|
||||
app_.validators().setNegativeUNL(prevLedger->negativeUNL());
|
||||
TrustChanges const changes = app_.validators().updateTrusted(
|
||||
app_.getValidations().getCurrentNodeIDs(),
|
||||
closingInfo.parentCloseTime,
|
||||
|
||||
@@ -51,6 +51,13 @@ Transactor::invokePreflight<Change>(PreflightContext const& ctx)
|
||||
return temBAD_SEQUENCE;
|
||||
}
|
||||
|
||||
if (ctx.tx.getTxnType() == ttUNL_MODIFY &&
|
||||
!ctx.rules.enabled(featureNegativeUNL))
|
||||
{
|
||||
JLOG(ctx.j.warn()) << "Change: NegativeUNL not enabled";
|
||||
return temDISABLED;
|
||||
}
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ getCountsJson(Application& app, int minObjectCount)
|
||||
ret[jss::treenode_cache_size] =
|
||||
app.getNodeFamily().getTreeNodeCache()->getCacheSize();
|
||||
ret[jss::treenode_track_size] =
|
||||
static_cast<int>(app.getNodeFamily().getTreeNodeCache()->size());
|
||||
app.getNodeFamily().getTreeNodeCache()->getTrackSize();
|
||||
|
||||
std::string uptime;
|
||||
auto s = UptimeClock::now();
|
||||
|
||||
Reference in New Issue
Block a user