Compare commits

..

4 Commits

Author SHA1 Message Date
Valentin Balaschenko
0c5fe98d02 perf: Move mutex to the partition level (#5486)
This change introduces two key optimizations:
* Mutex scope reduction: Limits the lock to individual partitions within `TaggedCache`, reducing contention.
* Decoupling: Removes the tight coupling between `LedgerHistory` and `TaggedCache`, improving modularity and testability.

Lock contention analysis based on eBPF showed significant improvements as a result of this change.
2025-11-18 16:15:50 +00:00
Bart
3fb6acd29e ci: Fix filtering out of Clang 20+ on ARM (#6046)
This change fixes the strategy matrix check to filter out the Clang 20+ on ARM, which still fail due to problems with Boost.
2025-11-17 21:54:13 +00:00
Bart
77b7cef5a7 ci: Use new Debian Trixie images (#6034)
This change uses the new Debian Trixie CI images added by XRPLF/ci#83.
2025-11-17 19:31:19 +00:00
Jingchen
2c187461cc refactor: Retire NegativeUNL amendment (#6033)
Amendments activated for more than 2 years can be retired. This change retires the NegativeUNL amendment.
2025-11-17 14:02:10 +00:00
29 changed files with 273 additions and 642 deletions

View File

@@ -130,8 +130,8 @@ def generate_strategy_matrix(all: bool, config: Config) -> list:
if os['distro_name'] == 'rhel' and architecture['platform'] == 'linux/arm64':
continue
# We skip all clang-20 on arm64 due to boost 1.86 build error
if f'{os['compiler_name']}-{os['compiler_version']}' == 'clang-20' and architecture['platform'] == 'linux/arm64':
# We skip all clang 20+ on arm64 due to Boost build error.
if f'{os['compiler_name']}-{os['compiler_version']}' in ['clang-20', 'clang-21'] and architecture['platform'] == 'linux/arm64':
continue
# Enable code coverage for Debian Bookworm using GCC 15 in Debug and no

View File

@@ -15,63 +15,91 @@
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "12",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "13",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "16",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "17",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "18",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "19",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "bookworm",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "e1782cd"
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "gcc",
"compiler_version": "14",
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "gcc",
"compiler_version": "15",
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "clang",
"compiler_version": "20",
"image_sha": "0525eae"
},
{
"distro_name": "debian",
"distro_version": "trixie",
"compiler_name": "clang",
"compiler_version": "21",
"image_sha": "0525eae"
},
{
"distro_name": "rhel",

View File

@@ -1,139 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_CANPROCESS_H_INCLUDED
#define RIPPLE_BASICS_CANPROCESS_H_INCLUDED
#include <functional>
#include <mutex>
#include <set>
/** RAII class to check if an Item is already being processed on another thread,
* as indicated by it's presence in a Collection.
*
* If the Item is not in the Collection, it will be added under lock in the
* ctor, and removed under lock in the dtor. The object will be considered
* "usable" and evaluate to `true`.
*
* If the Item is in the Collection, no changes will be made to the collection,
* and the CanProcess object will be considered "unusable".
*
* It's up to the caller to decide what "usable" and "unusable" mean. (e.g.
* Process or skip a block of code, or set a flag.)
*
* The current use is to avoid lock contention that would be involved in
* processing something associated with the Item.
*
* Examples:
*
* void IncomingLedgers::acquireAsync(LedgerHash const& hash, ...)
* {
* if (CanProcess check{acquiresMutex_, pendingAcquires_, hash})
* {
* acquire(hash, ...);
* }
* }
*
* bool
* NetworkOPsImp::recvValidation(
* std::shared_ptr<STValidation> const& val,
* std::string const& source)
* {
* CanProcess check(
* validationsMutex_, pendingValidations_, val->getLedgerHash());
* BypassAccept bypassAccept =
* check ? BypassAccept::no : BypassAccept::yes;
* handleNewValidation(app_, val, source, bypassAccept, m_journal);
* }
*
*/
class CanProcess
{
public:
template <class Mutex, class Collection, class Item>
CanProcess(Mutex& mtx, Collection& collection, Item const& item)
: cleanup_(insert(mtx, collection, item))
{
}
~CanProcess()
{
if (cleanup_)
cleanup_();
}
CanProcess(CanProcess const&) = delete;
CanProcess&
operator=(CanProcess const&) = delete;
explicit
operator bool() const
{
return static_cast<bool>(cleanup_);
}
private:
template <bool useIterator, class Mutex, class Collection, class Item>
std::function<void()>
doInsert(Mutex& mtx, Collection& collection, Item const& item)
{
std::unique_lock<Mutex> lock(mtx);
// TODO: Use structured binding once LLVM 16 is the minimum supported
// version. See also: https://github.com/llvm/llvm-project/issues/48582
// https://github.com/llvm/llvm-project/commit/127bf44385424891eb04cff8e52d3f157fc2cb7c
auto const insertResult = collection.insert(item);
auto const it = insertResult.first;
if (!insertResult.second)
return {};
if constexpr (useIterator)
return [&, it]() {
std::unique_lock<Mutex> lock(mtx);
collection.erase(it);
};
else
return [&]() {
std::unique_lock<Mutex> lock(mtx);
collection.erase(item);
};
}
// Generic insert() function doesn't use iterators because they may get
// invalidated
template <class Mutex, class Collection, class Item>
std::function<void()>
insert(Mutex& mtx, Collection& collection, Item const& item)
{
return doInsert<false>(mtx, collection, item);
}
// Specialize insert() for std::set, which does not invalidate iterators for
// insert and erase
template <class Mutex, class Item>
std::function<void()>
insert(Mutex& mtx, std::set<Item>& collection, Item const& item)
{
return doInsert<true>(mtx, collection, item);
}
// If set, then the item is "usable"
std::function<void()> cleanup_;
};
#endif

View File

@@ -2,7 +2,6 @@
#define XRPL_BASICS_SHAMAP_HASH_H_INCLUDED
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <ostream>

View File

@@ -71,9 +71,6 @@ public:
int
getCacheSize() const;
int
getTrackSize() const;
float
getHitRate();
@@ -151,9 +148,6 @@ public:
bool
retrieve(key_type const& key, T& data);
mutex_type&
peekMutex();
std::vector<key_type>
getKeys() const;
@@ -174,11 +168,14 @@ public:
private:
SharedPointerType
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
initialFetch(key_type const& key);
void
collect_metrics();
Mutex&
lockPartition(key_type const& key) const;
private:
struct Stats
{
@@ -281,8 +278,8 @@ private:
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&);
std::atomic<int>& allRemoval,
Mutex& partitionLock);
[[nodiscard]] std::thread
sweepHelper(
@@ -291,14 +288,12 @@ private:
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&);
Mutex& partitionLock);
beast::Journal m_journal;
clock_type& m_clock;
Stats m_stats;
mutex_type mutable m_mutex;
// Used for logging
std::string m_name;
@@ -309,10 +304,11 @@ private:
clock_type::duration const m_target_age;
// Number of items cached
int m_cache_count;
std::atomic<int> m_cache_count;
cache_type m_cache; // Hold strong reference to recent objects
std::uint64_t m_hits;
std::uint64_t m_misses;
std::atomic<std::uint64_t> m_hits;
std::atomic<std::uint64_t> m_misses;
mutable std::vector<mutex_type> partitionLocks_;
};
} // namespace ripple

View File

@@ -3,6 +3,7 @@
#include <xrpl/basics/IntrusivePointer.ipp>
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/beast/core/CurrentThreadName.h>
namespace ripple {
@@ -41,6 +42,7 @@ inline TaggedCache<
, m_hits(0)
, m_misses(0)
{
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
}
template <
@@ -86,8 +88,13 @@ TaggedCache<
KeyEqual,
Mutex>::size() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
std::size_t totalSize = 0;
for (size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
totalSize += m_cache.map()[i].size();
}
return totalSize;
}
template <
@@ -110,32 +117,7 @@ TaggedCache<
KeyEqual,
Mutex>::getCacheSize() const
{
std::lock_guard lock(m_mutex);
return m_cache_count;
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline int
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::getTrackSize() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
return m_cache_count.load(std::memory_order_relaxed);
}
template <
@@ -158,9 +140,10 @@ TaggedCache<
KeyEqual,
Mutex>::getHitRate()
{
std::lock_guard lock(m_mutex);
auto const total = static_cast<float>(m_hits + m_misses);
return m_hits * (100.0f / std::max(1.0f, total));
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
float const total = float(hits + misses);
return hits * (100.0f / std::max(1.0f, total));
}
template <
@@ -183,9 +166,12 @@ TaggedCache<
KeyEqual,
Mutex>::clear()
{
std::lock_guard lock(m_mutex);
for (auto& mutex : partitionLocks_)
mutex.lock();
m_cache.clear();
m_cache_count = 0;
for (auto& mutex : partitionLocks_)
mutex.unlock();
m_cache_count.store(0, std::memory_order_relaxed);
}
template <
@@ -208,11 +194,9 @@ TaggedCache<
KeyEqual,
Mutex>::reset()
{
std::lock_guard lock(m_mutex);
m_cache.clear();
m_cache_count = 0;
m_hits = 0;
m_misses = 0;
clear();
m_hits.store(0, std::memory_order_relaxed);
m_misses.store(0, std::memory_order_relaxed);
}
template <
@@ -236,7 +220,7 @@ TaggedCache<
KeyEqual,
Mutex>::touch_if_exists(KeyComparable const& key)
{
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto const iter(m_cache.find(key));
if (iter == m_cache.end())
{
@@ -278,8 +262,6 @@ TaggedCache<
auto const start = std::chrono::steady_clock::now();
{
std::lock_guard lock(m_mutex);
if (m_target_size == 0 ||
(static_cast<int>(m_cache.size()) <= m_target_size))
{
@@ -311,12 +293,13 @@ TaggedCache<
m_cache.map()[p],
allStuffToSweep[p],
allRemovals,
lock));
partitionLocks_[p]));
}
for (std::thread& worker : workers)
worker.join();
m_cache_count -= allRemovals;
int removals = allRemovals.load(std::memory_order_relaxed);
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
}
// At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer.
@@ -350,7 +333,8 @@ TaggedCache<
{
// Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
@@ -363,7 +347,7 @@ TaggedCache<
if (entry.isCached())
{
--m_cache_count;
m_cache_count.fetch_sub(1, std::memory_order_relaxed);
entry.ptr.convertToWeak();
ret = true;
}
@@ -401,17 +385,16 @@ TaggedCache<
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
if (cit == m_cache.end())
{
m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data));
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return false;
}
@@ -460,12 +443,12 @@ TaggedCache<
data = cachedData;
}
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return true;
}
entry.ptr = data;
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return false;
}
@@ -541,10 +524,11 @@ TaggedCache<
KeyEqual,
Mutex>::fetch(key_type const& key)
{
std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
std::lock_guard<Mutex> lock(lockPartition(key));
auto ret = initialFetch(key);
if (!ret)
++m_misses;
m_misses.fetch_add(1, std::memory_order_relaxed);
return ret;
}
@@ -608,8 +592,8 @@ TaggedCache<
Mutex>::insert(key_type const& key)
-> std::enable_if_t<IsKeyCache, ReturnType>
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
std::lock_guard<Mutex> lock(lockPartition(key));
auto [it, inserted] = m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
@@ -649,29 +633,6 @@ TaggedCache<
return true;
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline auto
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::peekMutex() -> mutex_type&
{
return m_mutex;
}
template <
class Key,
class T,
@@ -695,10 +656,13 @@ TaggedCache<
std::vector<key_type> v;
{
std::lock_guard lock(m_mutex);
v.reserve(m_cache.size());
for (auto const& _ : m_cache)
v.push_back(_.first);
for (std::size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
for (auto const& entry : m_cache.map()[i])
v.push_back(entry.first);
}
}
return v;
@@ -724,11 +688,12 @@ TaggedCache<
KeyEqual,
Mutex>::rate() const
{
std::lock_guard lock(m_mutex);
auto const tot = m_hits + m_misses;
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
auto const tot = hits + misses;
if (tot == 0)
return 0;
return double(m_hits) / tot;
return 0.0;
return double(hits) / tot;
}
template <
@@ -752,18 +717,16 @@ TaggedCache<
KeyEqual,
Mutex>::fetch(key_type const& digest, Handler const& h)
{
{
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest, l))
return ret;
}
std::lock_guard<Mutex> lock(lockPartition(digest));
if (auto ret = initialFetch(digest))
return ret;
auto sle = h();
if (!sle)
return {};
std::lock_guard l(m_mutex);
++m_misses;
m_misses.fetch_add(1, std::memory_order_relaxed);
auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted)
@@ -790,9 +753,10 @@ TaggedCache<
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
Mutex>::initialFetch(key_type const& key)
{
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
if (cit == m_cache.end())
return {};
@@ -800,7 +764,7 @@ TaggedCache<
Entry& entry = cit->second;
if (entry.isCached())
{
++m_hits;
m_hits.fetch_add(1, std::memory_order_relaxed);
entry.touch(m_clock.now());
return entry.ptr.getStrong();
}
@@ -808,12 +772,13 @@ TaggedCache<
if (entry.isCached())
{
// independent of cache size, so not counted as a hit
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
entry.touch(m_clock.now());
return entry.ptr.getStrong();
}
m_cache.erase(cit);
return {};
}
@@ -842,10 +807,11 @@ TaggedCache<
{
beast::insight::Gauge::value_type hit_rate(0);
{
std::lock_guard lock(m_mutex);
auto const total(m_hits + m_misses);
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
auto const total = hits + misses;
if (total != 0)
hit_rate = (m_hits * 100) / total;
hit_rate = (hits * 100) / total;
}
m_stats.hit_rate.set(hit_rate);
}
@@ -876,12 +842,16 @@ TaggedCache<
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
Mutex& partitionLock)
{
return std::thread([&, this]() {
beast::setCurrentThreadName("sweep-KVCache");
int cacheRemovals = 0;
int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size());
@@ -965,12 +935,16 @@ TaggedCache<
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
Mutex& partitionLock)
{
return std::thread([&, this]() {
beast::setCurrentThreadName("sweep-KCache");
int cacheRemovals = 0;
int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
{
@@ -1005,6 +979,29 @@ TaggedCache<
});
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline Mutex&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::lockPartition(key_type const& key) const
{
return partitionLocks_[m_cache.partition_index(key)];
}
} // namespace ripple
#endif

View File

@@ -258,6 +258,12 @@ public:
return map_;
}
partition_map_type const&
map() const
{
return map_;
}
iterator
begin()
{
@@ -302,6 +308,12 @@ public:
return cend();
}
std::size_t
partition_index(key_type const& key) const
{
return partitioner(key);
}
private:
template <class T>
void

View File

@@ -36,8 +36,6 @@ struct LedgerHeader
// If validated is false, it means "not yet validated."
// Once validated is true, it will never be set false at a later time.
// NOTE: If you are accessing this directly, you are probably doing it
// wrong. Use LedgerMaster::isValidated().
// VFALCO TODO Make this not mutable
bool mutable validated = false;
bool accepted = false;

View File

@@ -66,7 +66,6 @@ XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(DisallowIncoming, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(FlowSortStrands, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(NegativeUNL, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(RequireFullyCanonicalSig, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(DeletableAccounts, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes)
@@ -130,6 +129,7 @@ XRPL_RETIRE_FEATURE(HardenedValidations)
XRPL_RETIRE_FEATURE(ImmediateOfferKilled)
XRPL_RETIRE_FEATURE(MultiSign)
XRPL_RETIRE_FEATURE(MultiSignReserve)
XRPL_RETIRE_FEATURE(NegativeUNL)
XRPL_RETIRE_FEATURE(NonFungibleTokensV1_1)
XRPL_RETIRE_FEATURE(PayChan)
XRPL_RETIRE_FEATURE(SortedDirectories)

View File

@@ -91,8 +91,6 @@ public:
virtual void
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) override

View File

@@ -1,166 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2016 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpl/basics/CanProcess.h>
#include <xrpl/beast/unit_test.h>
#include <memory>
namespace ripple {
namespace test {
struct CanProcess_test : beast::unit_test::suite
{
template <class Mutex, class Collection, class Item>
void
test(
std::string const& name,
Mutex& mtx,
Collection& collection,
std::vector<Item> const& items)
{
testcase(name);
if (!BEAST_EXPECT(!items.empty()))
return;
if (!BEAST_EXPECT(collection.empty()))
return;
// CanProcess objects can't be copied or moved. To make that easier,
// store shared_ptrs
std::vector<std::shared_ptr<CanProcess>> trackers;
// Fill up the vector with two CanProcess for each Item. The first
// inserts the item into the collection and is "good". The second does
// not and is "bad".
for (int i = 0; i < items.size(); ++i)
{
{
auto const& good = trackers.emplace_back(
std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(*good);
}
BEAST_EXPECT(trackers.size() == (2 * i) + 1);
BEAST_EXPECT(collection.size() == i + 1);
{
auto const& bad = trackers.emplace_back(
std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(!*bad);
}
BEAST_EXPECT(trackers.size() == 2 * (i + 1));
BEAST_EXPECT(collection.size() == i + 1);
}
BEAST_EXPECT(collection.size() == items.size());
// Now remove the items from the vector<CanProcess> two at a time, and
// try to get another CanProcess for that item.
for (int i = 0; i < items.size(); ++i)
{
// Remove the "bad" one in the second position
// This will have no effect on the collection
{
auto const iter = trackers.begin() + 1;
BEAST_EXPECT(!**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * items.size()) - 1);
BEAST_EXPECT(collection.size() == items.size());
{
// Append a new "bad" one
auto const& bad = trackers.emplace_back(
std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(!*bad);
}
BEAST_EXPECT(trackers.size() == 2 * items.size());
BEAST_EXPECT(collection.size() == items.size());
// Remove the "good" one from the front
{
auto const iter = trackers.begin();
BEAST_EXPECT(**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * items.size()) - 1);
BEAST_EXPECT(collection.size() == items.size() - 1);
{
// Append a new "good" one
auto const& good = trackers.emplace_back(
std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(*good);
}
BEAST_EXPECT(trackers.size() == 2 * items.size());
BEAST_EXPECT(collection.size() == items.size());
}
// Now remove them all two at a time
for (int i = items.size() - 1; i >= 0; --i)
{
// Remove the "bad" one from the front
{
auto const iter = trackers.begin();
BEAST_EXPECT(!**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * i) + 1);
BEAST_EXPECT(collection.size() == i + 1);
// Remove the "good" one now in front
{
auto const iter = trackers.begin();
BEAST_EXPECT(**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == 2 * i);
BEAST_EXPECT(collection.size() == i);
}
BEAST_EXPECT(trackers.empty());
BEAST_EXPECT(collection.empty());
}
void
run() override
{
{
std::mutex m;
std::set<int> collection;
std::vector<int> const items{1, 2, 3, 4, 5};
test("set of int", m, collection, items);
}
{
std::mutex m;
std::set<std::string> collection;
std::vector<std::string> const items{
"one", "two", "three", "four", "five"};
test("set of string", m, collection, items);
}
{
std::mutex m;
std::unordered_set<char> collection;
std::vector<char> const items{'1', '2', '3', '4', '5'};
test("unorderd_set of char", m, collection, items);
}
{
std::mutex m;
std::unordered_set<std::uint64_t> collection;
std::vector<std::uint64_t> const items{100u, 1000u, 150u, 4u, 0u};
test("unordered_set of uint64_t", m, collection, items);
}
}
};
BEAST_DEFINE_TESTSUITE(CanProcess, ripple_basics, ripple);
} // namespace test
} // namespace ripple

View File

@@ -39,10 +39,10 @@ public:
// Insert an item, retrieve it, and age it so it gets purged.
{
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(!c.insert(1, "one"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
{
std::string s;
@@ -53,7 +53,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
// Insert an item, maintain a strong pointer, age it, and
@@ -61,7 +61,7 @@ public:
{
BEAST_EXPECT(!c.insert(2, "two"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
{
auto p = c.fetch(2);
@@ -69,14 +69,14 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
}
// Make sure its gone now that our reference is gone
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
// Insert the same key/value pair and make sure we get the same result
@@ -92,7 +92,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
// Put an object in but keep a strong pointer to it, advance the clock a
@@ -102,24 +102,24 @@ public:
// Put an object in
BEAST_EXPECT(!c.insert(4, "four"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
{
// Keep a strong pointer to it
auto const p1 = c.fetch(4);
BEAST_EXPECT(p1 != nullptr);
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
// Advance the clock a lot
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
// Canonicalize a new object with the same key
auto p2 = std::make_shared<std::string>("four");
BEAST_EXPECT(c.canonicalize_replace_client(4, p2));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
// Make sure we get the original object
BEAST_EXPECT(p1.get() == p2.get());
}
@@ -127,7 +127,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
}
};

View File

@@ -15,7 +15,6 @@ namespace test {
/*
* This file implements the following negative UNL related tests:
* -- test filling and applying ttUNL_MODIFY Tx and ledger update
* -- test ttUNL_MODIFY Tx failure without featureNegativeUNL amendment
* -- test the NegativeUNLVote class. The test cases are split to multiple
* test classes to allow parallel execution.
* -- test the negativeUNLFilter function
@@ -208,7 +207,7 @@ class NegativeUNL_test : public beast::unit_test::suite
testcase("Create UNLModify Tx and apply to ledgers");
jtx::Env env(*this, jtx::testable_amendments() | featureNegativeUNL);
jtx::Env env(*this, jtx::testable_amendments());
std::vector<PublicKey> publicKeys = createPublicKeys(3);
// genesis ledger
auto l = std::make_shared<Ledger>(
@@ -216,7 +215,6 @@ class NegativeUNL_test : public beast::unit_test::suite
env.app().config(),
std::vector<uint256>{},
env.app().getNodeFamily());
BEAST_EXPECT(l->rules().enabled(featureNegativeUNL));
// Record the public keys and ledger sequences of expected negative UNL
// validators when we build the ledger history
@@ -500,44 +498,6 @@ class NegativeUNL_test : public beast::unit_test::suite
}
};
class NegativeUNLNoAmendment_test : public beast::unit_test::suite
{
void
testNegativeUNLNoAmendment()
{
testcase("No negative UNL amendment");
jtx::Env env(*this, jtx::testable_amendments() - featureNegativeUNL);
std::vector<PublicKey> publicKeys = createPublicKeys(1);
// genesis ledger
auto l = std::make_shared<Ledger>(
create_genesis,
env.app().config(),
std::vector<uint256>{},
env.app().getNodeFamily());
BEAST_EXPECT(!l->rules().enabled(featureNegativeUNL));
// generate more ledgers
for (auto i = 0; i < 256 - 1; ++i)
{
l = std::make_shared<Ledger>(
*l, env.app().timeKeeper().closeTime());
}
BEAST_EXPECT(l->seq() == 256);
auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]);
OpenView accum(&*l);
BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, false));
accum.apply(*l);
BEAST_EXPECT(negUnlSizeTest(l, 0, false, false));
}
void
run() override
{
testNegativeUNLNoAmendment();
}
};
/**
* Utility class for creating validators and ledger history
*/
@@ -563,7 +523,7 @@ struct NetworkHistory
};
NetworkHistory(beast::unit_test::suite& suite, Parameter const& p)
: env(suite, jtx::testable_amendments() | featureNegativeUNL)
: env(suite, jtx::testable_amendments())
, param(p)
, validations(env.app().getValidations())
{
@@ -1867,7 +1827,6 @@ class NegativeUNLVoteFilterValidations_test : public beast::unit_test::suite
};
BEAST_DEFINE_TESTSUITE(NegativeUNL, consensus, ripple);
BEAST_DEFINE_TESTSUITE(NegativeUNLNoAmendment, consensus, ripple);
BEAST_DEFINE_TESTSUITE(NegativeUNLVoteInternal, consensus, ripple);
BEAST_DEFINE_TESTSUITE_MANUAL(NegativeUNLVoteScoreTable, consensus, ripple);

View File

@@ -124,7 +124,8 @@ class Feature_test : public beast::unit_test::suite
featureToName(fixRemoveNFTokenAutoTrustLine) ==
"fixRemoveNFTokenAutoTrustLine");
BEAST_EXPECT(featureToName(featureFlow) == "Flow");
BEAST_EXPECT(featureToName(featureNegativeUNL) == "NegativeUNL");
BEAST_EXPECT(
featureToName(featureDeletableAccounts) == "DeletableAccounts");
BEAST_EXPECT(
featureToName(fixIncludeKeyletFields) == "fixIncludeKeyletFields");
BEAST_EXPECT(featureToName(featureTokenEscrow) == "TokenEscrow");

View File

@@ -118,12 +118,15 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
// Tell the ledger acquire system that we need the consensus ledger
acquiringLedger_ = hash;
app_.getInboundLedgers().acquireAsync(
app_.getJobQueue().addJob(
jtADVANCE,
"getConsensusLedger1",
hash,
0,
InboundLedger::Reason::CONSENSUS);
[id = hash, &app = app_, this]() {
JLOG(j_.debug())
<< "JOB advanceLedger getConsensusLedger1 started";
app.getInboundLedgers().acquireAsync(
id, 0, InboundLedger::Reason::CONSENSUS);
});
}
return std::nullopt;
}
@@ -343,9 +346,7 @@ RCLConsensus::Adaptor::onClose(
prevLedger, validations, initialSet, j_);
}
}
else if (
prevLedger->isVotingLedger() &&
prevLedger->rules().enabled(featureNegativeUNL))
else if (prevLedger->isVotingLedger())
{
// previous ledger was a voting ledger,
// so the current consensus session is for a flag ledger,
@@ -1012,8 +1013,7 @@ RCLConsensus::Adaptor::preStartRound(
inboundTransactions_.newRound(prevLgr.seq());
// Notify NegativeUNLVote that new validators are added
if (prevLgr.ledger_->rules().enabled(featureNegativeUNL) &&
!nowTrusted.empty())
if (!nowTrusted.empty())
nUnlVote_.newValidators(prevLgr.seq() + 1, nowTrusted);
// propose only if we're in sync with the network (and validating)
@@ -1056,8 +1056,7 @@ void
RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const
{
if (!positions && app_.getOPs().isFull())
app_.getOPs().setMode(
OperatingMode::CONNECTED, "updateOperatingMode: no positions");
app_.getOPs().setMode(OperatingMode::CONNECTED);
}
void

View File

@@ -120,12 +120,15 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
JLOG(j_.warn())
<< "Need validated ledger for preferred ledger analysis " << hash;
app_.getInboundLedgers().acquireAsync(
jtADVANCE,
"getConsensusLedger2",
hash,
0,
InboundLedger::Reason::CONSENSUS);
Application* pApp = &app_;
app_.getJobQueue().addJob(
jtADVANCE, "getConsensusLedger2", [pApp, hash, this]() {
JLOG(j_.debug())
<< "JOB advanceLedger getConsensusLedger2 started";
pApp->getInboundLedgers().acquireAsync(
hash, 0, InboundLedger::Reason::CONSENSUS);
});
return std::nullopt;
}

View File

@@ -28,8 +28,6 @@ public:
// instead. Inbound ledger acquisition is asynchronous anyway.
virtual void
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) = 0;

View File

@@ -44,8 +44,6 @@ LedgerHistory::insert(
ledger->stateMap().getHash().isNonZero(),
"ripple::LedgerHistory::insert : nonzero hash");
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
ledger->info().hash, ledger);
if (validated)
@@ -57,7 +55,6 @@ LedgerHistory::insert(
LedgerHash
LedgerHistory::getLedgerHash(LedgerIndex index)
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
return it->second;
return {};
@@ -67,13 +64,11 @@ std::shared_ptr<Ledger const>
LedgerHistory::getLedgerBySeq(LedgerIndex index)
{
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto it = mLedgersByIndex.find(index);
if (it != mLedgersByIndex.end())
{
uint256 hash = it->second;
sl.unlock();
return getLedgerByHash(hash);
}
}
@@ -89,7 +84,6 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
{
// Add this ledger to the local tracking by index
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
XRPL_ASSERT(
ret->isImmutable(),
@@ -439,8 +433,6 @@ LedgerHistory::builtLedger(
XRPL_ASSERT(
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -481,8 +473,6 @@ LedgerHistory::validatedLedger(
!hash.isZero(),
"ripple::LedgerHistory::validatedLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -516,10 +506,9 @@ LedgerHistory::validatedLedger(
bool
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto ledger = m_ledgers_by_hash.fetch(ledgerHash);
auto it = mLedgersByIndex.find(ledgerIndex);
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))
if (ledger && (it != mLedgersByIndex.end()) && (it->second != ledgerHash))
{
it->second = ledgerHash;
return false;

View File

@@ -28,7 +28,7 @@ buildLedgerImpl(
{
auto built = std::make_shared<Ledger>(*parent, closeTime);
if (built->isFlagLedger() && built->rules().enabled(featureNegativeUNL))
if (built->isFlagLedger())
{
built->updateNegativeUNL();
}

View File

@@ -373,14 +373,7 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&)
if (!wasProgress)
{
if (checkLocal())
{
// Done. Something else (probably consensus) built the ledger
// locally while waiting for data (or possibly before requesting)
XRPL_ASSERT(isDone(), "ripple::InboundLedger::onTimer : done");
JLOG(journal_.info()) << "Finished while waiting " << hash_;
return;
}
checkLocal();
mByHash = true;

View File

@@ -5,9 +5,9 @@
#include <xrpld/core/JobQueue.h>
#include <xrpld/perflog/PerfLog.h>
#include <xrpl/basics/CanProcess.h>
#include <xrpl/basics/DecayingSample.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/scope.h>
#include <xrpl/beast/container/aged_map.h>
#include <xrpl/protocol/jss.h>
@@ -64,15 +64,12 @@ public:
(reason != InboundLedger::Reason::CONSENSUS))
return {};
std::stringstream ss;
bool isNew = true;
std::shared_ptr<InboundLedger> inbound;
{
ScopedLockType sl(mLock);
if (stopping_)
{
JLOG(j_.debug()) << "Abort(stopping): " << ss.str();
return {};
}
@@ -96,66 +93,53 @@ public:
++mCounter;
}
}
ss << " IsNew: " << (isNew ? "true" : "false");
if (inbound->isFailed())
{
JLOG(j_.debug()) << "Abort(failed): " << ss.str();
return {};
}
if (!isNew)
inbound->update(seq);
if (!inbound->isComplete())
{
JLOG(j_.debug()) << "InProgress: " << ss.str();
return {};
}
JLOG(j_.debug()) << "Complete: " << ss.str();
return inbound->getLedger();
};
using namespace std::chrono_literals;
return perf::measureDurationAndLog(
std::shared_ptr<Ledger const> ledger = perf::measureDurationAndLog(
doAcquire, "InboundLedgersImp::acquire", 500ms, j_);
return ledger;
}
void
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) override
{
if (auto check = std::make_shared<CanProcess const>(
acquiresMutex_, pendingAcquires_, hash);
*check)
std::unique_lock lock(acquiresMutex_);
try
{
app_.getJobQueue().addJob(
type, name, [check, name, hash, seq, reason, this]() {
JLOG(j_.debug())
<< "JOB acquireAsync " << name << " started ";
try
{
acquire(hash, seq, reason);
}
catch (std::exception const& e)
{
JLOG(j_.warn()) << "Exception thrown for acquiring new "
"inbound ledger "
<< hash << ": " << e.what();
}
catch (...)
{
JLOG(j_.warn())
<< "Unknown exception thrown for acquiring new "
"inbound ledger "
<< hash;
}
});
if (pendingAcquires_.contains(hash))
return;
pendingAcquires_.insert(hash);
scope_unlock unlock(lock);
acquire(hash, seq, reason);
}
catch (std::exception const& e)
{
JLOG(j_.warn())
<< "Exception thrown for acquiring new inbound ledger " << hash
<< ": " << e.what();
}
catch (...)
{
JLOG(j_.warn())
<< "Unknown exception thrown for acquiring new inbound ledger "
<< hash;
}
pendingAcquires_.erase(hash);
}
std::shared_ptr<InboundLedger>

View File

@@ -942,9 +942,8 @@ LedgerMaster::checkAccept(std::shared_ptr<Ledger const> const& ledger)
}
JLOG(m_journal.info()) << "Advancing accepted ledger to "
<< ledger->info().seq << " ("
<< to_short_string(ledger->info().hash)
<< ") with >= " << minVal << " validations";
<< ledger->info().seq << " with >= " << minVal
<< " validations";
ledger->setValidated();
ledger->setFull();

View File

@@ -12,8 +12,7 @@ TimeoutCounter::TimeoutCounter(
QueueJobParameter&& jobParameter,
beast::Journal journal)
: app_(app)
, sink_(journal, to_short_string(hash) + " ")
, journal_(sink_)
, journal_(journal)
, hash_(hash)
, timeouts_(0)
, complete_(false)
@@ -33,8 +32,6 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
{
if (isDone())
return;
JLOG(journal_.debug()) << "Setting timer for " << timerInterval_.count()
<< "ms";
timer_.expires_after(timerInterval_);
timer_.async_wait(
[wptr = pmDowncast()](boost::system::error_code const& ec) {
@@ -43,12 +40,6 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
if (auto ptr = wptr.lock())
{
JLOG(ptr->journal_.debug())
<< "timer: ec: " << ec << " (operation_aborted: "
<< boost::asio::error::operation_aborted << " - "
<< (ec == boost::asio::error::operation_aborted ? "aborted"
: "other")
<< ")";
ScopedLockType sl(ptr->mtx_);
ptr->queueJob(sl);
}

View File

@@ -5,7 +5,6 @@
#include <xrpld/core/Job.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/beast/utility/WrappedSink.h>
#include <boost/asio/basic_waitable_timer.hpp>
@@ -105,7 +104,6 @@ protected:
// Used in this class for access to boost::asio::io_context and
// ripple::Overlay. Used in subtypes for the kitchen sink.
Application& app_;
beast::WrappedSink sink_;
beast::Journal journal_;
mutable std::recursive_mutex mtx_;

View File

@@ -203,7 +203,7 @@ public:
/** Add a suppression peer and get message's relay status.
* Return pair:
* element 1: true if the key is added.
* element 1: true if the peer is added.
* element 2: optional is seated to the relay time point or
* is unseated if has not relayed yet. */
std::pair<bool, std::optional<Stopwatch::time_point>>

View File

@@ -34,10 +34,10 @@
#include <xrpld/rpc/MPTokenIssuanceID.h>
#include <xrpld/rpc/ServerHandler.h>
#include <xrpl/basics/CanProcess.h>
#include <xrpl/basics/UptimeClock.h>
#include <xrpl/basics/mulDiv.h>
#include <xrpl/basics/safe_cast.h>
#include <xrpl/basics/scope.h>
#include <xrpl/beast/utility/rngfill.h>
#include <xrpl/crypto/RFC1751.h>
#include <xrpl/crypto/csprng.h>
@@ -408,7 +408,7 @@ public:
isFull() override;
void
setMode(OperatingMode om, char const* reason) override;
setMode(OperatingMode om) override;
bool
isBlocked() override;
@@ -886,7 +886,7 @@ NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
inline void
NetworkOPsImp::setStandAlone()
{
setMode(OperatingMode::FULL, "setStandAlone");
setMode(OperatingMode::FULL);
}
inline void
@@ -1036,9 +1036,7 @@ NetworkOPsImp::processHeartbeatTimer()
{
if (mMode != OperatingMode::DISCONNECTED)
{
setMode(
OperatingMode::DISCONNECTED,
"Heartbeat: insufficient peers");
setMode(OperatingMode::DISCONNECTED);
std::stringstream ss;
ss << "Node count (" << numPeers << ") has fallen "
<< "below required minimum (" << minPeerCount_ << ").";
@@ -1063,7 +1061,7 @@ NetworkOPsImp::processHeartbeatTimer()
if (mMode == OperatingMode::DISCONNECTED)
{
setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers");
setMode(OperatingMode::CONNECTED);
JLOG(m_journal.info())
<< "Node count (" << numPeers << ") is sufficient.";
CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
@@ -1075,9 +1073,9 @@ NetworkOPsImp::processHeartbeatTimer()
auto origMode = mMode.load();
CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
if (mMode == OperatingMode::SYNCING)
setMode(OperatingMode::SYNCING, "Heartbeat: check syncing");
setMode(OperatingMode::SYNCING);
else if (mMode == OperatingMode::CONNECTED)
setMode(OperatingMode::CONNECTED, "Heartbeat: check connected");
setMode(OperatingMode::CONNECTED);
auto newMode = mMode.load();
if (origMode != newMode)
{
@@ -1826,7 +1824,7 @@ void
NetworkOPsImp::setAmendmentBlocked()
{
amendmentBlocked_ = true;
setMode(OperatingMode::CONNECTED, "setAmendmentBlocked");
setMode(OperatingMode::CONNECTED);
}
inline bool
@@ -1857,7 +1855,7 @@ void
NetworkOPsImp::setUNLBlocked()
{
unlBlocked_ = true;
setMode(OperatingMode::CONNECTED, "setUNLBlocked");
setMode(OperatingMode::CONNECTED);
}
inline void
@@ -1958,7 +1956,7 @@ NetworkOPsImp::checkLastClosedLedger(
if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
{
setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger");
setMode(OperatingMode::CONNECTED);
}
if (consensus)
@@ -2047,9 +2045,8 @@ NetworkOPsImp::beginConsensus(
// this shouldn't happen unless we jump ledgers
if (mMode == OperatingMode::FULL)
{
JLOG(m_journal.warn())
<< "beginConsensus Don't have LCL, going to tracking";
setMode(OperatingMode::TRACKING, "beginConsensus: No LCL");
JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
setMode(OperatingMode::TRACKING);
CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
}
@@ -2066,8 +2063,7 @@ NetworkOPsImp::beginConsensus(
"ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
"hash");
if (prevLedger->rules().enabled(featureNegativeUNL))
app_.validators().setNegativeUNL(prevLedger->negativeUNL());
app_.validators().setNegativeUNL(prevLedger->negativeUNL());
TrustChanges const changes = app_.validators().updateTrusted(
app_.getValidations().getCurrentNodeIDs(),
closingInfo.parentCloseTime,
@@ -2185,7 +2181,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
// validations we have for LCL. If the ledger is good enough, go to
// TRACKING - TODO
if (!needNetworkLedger_)
setMode(OperatingMode::TRACKING, "endConsensus: check tracking");
setMode(OperatingMode::TRACKING);
}
if (((mMode == OperatingMode::CONNECTED) ||
@@ -2199,7 +2195,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2 * current->info().closeTimeResolution))
{
setMode(OperatingMode::FULL, "endConsensus: check full");
setMode(OperatingMode::FULL);
}
}
@@ -2211,7 +2207,7 @@ NetworkOPsImp::consensusViewChange()
{
if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
{
setMode(OperatingMode::CONNECTED, "consensusViewChange");
setMode(OperatingMode::CONNECTED);
}
}
@@ -2530,7 +2526,7 @@ NetworkOPsImp::pubPeerStatus(std::function<Json::Value(void)> const& func)
}
void
NetworkOPsImp::setMode(OperatingMode om, char const* reason)
NetworkOPsImp::setMode(OperatingMode om)
{
using namespace std::chrono_literals;
if (om == OperatingMode::CONNECTED)
@@ -2550,12 +2546,11 @@ NetworkOPsImp::setMode(OperatingMode om, char const* reason)
if (mMode == om)
return;
auto const sink = om < mMode ? m_journal.warn() : m_journal.info();
mMode = om;
accounting_.mode(om);
JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason;
JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
pubServer();
}
@@ -2567,28 +2562,34 @@ NetworkOPsImp::recvValidation(
JLOG(m_journal.trace())
<< "recvValidation " << val->getLedgerHash() << " from " << source;
std::unique_lock lock(validationsMutex_);
BypassAccept bypassAccept = BypassAccept::no;
try
{
CanProcess const check(
validationsMutex_, pendingValidations_, val->getLedgerHash());
try
{
BypassAccept bypassAccept =
check ? BypassAccept::no : BypassAccept::yes;
handleNewValidation(app_, val, source, bypassAccept, m_journal);
}
catch (std::exception const& e)
{
JLOG(m_journal.warn())
<< "Exception thrown for handling new validation "
<< val->getLedgerHash() << ": " << e.what();
}
catch (...)
{
JLOG(m_journal.warn())
<< "Unknown exception thrown for handling new validation "
<< val->getLedgerHash();
}
if (pendingValidations_.contains(val->getLedgerHash()))
bypassAccept = BypassAccept::yes;
else
pendingValidations_.insert(val->getLedgerHash());
scope_unlock unlock(lock);
handleNewValidation(app_, val, source, bypassAccept, m_journal);
}
catch (std::exception const& e)
{
JLOG(m_journal.warn())
<< "Exception thrown for handling new validation "
<< val->getLedgerHash() << ": " << e.what();
}
catch (...)
{
JLOG(m_journal.warn())
<< "Unknown exception thrown for handling new validation "
<< val->getLedgerHash();
}
if (bypassAccept == BypassAccept::no)
{
pendingValidations_.erase(val->getLedgerHash());
}
lock.unlock();
pubValidation(val);

View File

@@ -191,7 +191,7 @@ public:
virtual bool
isFull() = 0;
virtual void
setMode(OperatingMode om, char const* reason) = 0;
setMode(OperatingMode om) = 0;
virtual bool
isBlocked() = 0;
virtual bool

View File

@@ -51,13 +51,6 @@ Transactor::invokePreflight<Change>(PreflightContext const& ctx)
return temBAD_SEQUENCE;
}
if (ctx.tx.getTxnType() == ttUNL_MODIFY &&
!ctx.rules.enabled(featureNegativeUNL))
{
JLOG(ctx.j.warn()) << "Change: NegativeUNL not enabled";
return temDISABLED;
}
return tesSUCCESS;
}

View File

@@ -95,7 +95,7 @@ getCountsJson(Application& app, int minObjectCount)
ret[jss::treenode_cache_size] =
app.getNodeFamily().getTreeNodeCache()->getCacheSize();
ret[jss::treenode_track_size] =
app.getNodeFamily().getTreeNodeCache()->getTrackSize();
static_cast<int>(app.getNodeFamily().getTreeNodeCache()->size());
std::string uptime;
auto s = UptimeClock::now();