Compare commits

...

12 Commits

Author SHA1 Message Date
Vladislav Vysokikh
df24ee0774 Version 2.6.2 2025-11-19 20:47:46 +00:00
Vladislav Vysokikh
c6aa80b0ef fixed macos build and removed ci flow publishing libxrpl 2025-11-19 20:47:32 +00:00
Bronek Kozicki
283bc3ea39 Remove directory size limit (#5935)
This change introduces the `fixDirectoryLimit` amendment to remove the directory pages limit. We found that the directory size limit is easier to hit than originally assumed, and there is no good reason to keep this limit, since the object reserve provides the necessary incentive to avoid creating unnecessary objects on the ledger.
2025-11-19 18:07:13 +00:00
Jingchen
ebc2a9a625 fix: Skip processing transaction batch if the batch is empty (#5670)
Avoids an assertion failure in NetworkOPsImp::apply in the unlikely event that all incoming transactions are invalid.
2025-11-18 09:17:48 +00:00
Ed Hennis
70d5c624e8 Set version to 2.6.1 2025-09-30 16:09:11 -04:00
Bronek Kozicki
c46888f8f7 Set version to 2.6.1-rc2 2025-09-18 18:09:04 +01:00
Bronek Kozicki
2ae65d2fdb Mark PermissionDelegation as unsupported 2025-09-18 18:04:12 +01:00
Bronek Kozicki
8d01f35eb9 Set version to 2.6.1-rc1 2025-09-16 15:35:54 -04:00
Bronek Kozicki
1020a32d76 Downgrade to boost 1.83 2025-09-16 15:35:47 -04:00
Michael Legleux
2df7dcfdeb Set version to 2.6.0 2025-08-27 10:25:53 -07:00
Ed Hennis
c5fe970646 Set version to 2.6.0-rc3 2025-08-22 17:32:31 -04:00
Ed Hennis
c57cd8b23e Revert "perf: Move mutex to the partition level (#5486)"
This reverts commit 94decc753b.
2025-08-22 17:30:08 -04:00
24 changed files with 638 additions and 260 deletions

View File

@@ -1,91 +0,0 @@
name: Check libXRPL compatibility with Clio
env:
CONAN_REMOTE_URL: https://conan.ripplex.io
CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }}
on:
pull_request:
paths:
- "src/libxrpl/protocol/BuildInfo.cpp"
- ".github/workflows/libxrpl.yml"
types: [opened, reopened, synchronize, ready_for_review]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
publish:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
name: Publish libXRPL
outputs:
outcome: ${{ steps.upload.outputs.outcome }}
version: ${{ steps.version.outputs.version }}
channel: ${{ steps.channel.outputs.channel }}
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
steps:
- name: Wait for essential checks to succeed
uses: lewagon/wait-on-check-action@v1.3.4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
running-workflow-name: wait-for-check-regexp
check-regexp: "(dependencies|test).*linux.*" # Ignore windows and mac tests but make sure linux passes
repo-token: ${{ secrets.GITHUB_TOKEN }}
wait-interval: 10
- name: Checkout
uses: actions/checkout@v4
- name: Generate channel
id: channel
shell: bash
run: |
echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee ${GITHUB_OUTPUT}
- name: Export new package
shell: bash
run: |
conan export . ${{ steps.channel.outputs.channel }}
- name: Add Conan remote
shell: bash
run: |
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
conan remote add xrplf ${{ env.CONAN_REMOTE_URL }} --insert 0 --force
echo "Listing Conan remotes."
conan remote list
- name: Parse new version
id: version
shell: bash
run: |
echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \
| awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT}
- name: Try to authenticate to Conan remote
id: remote
shell: bash
run: |
# `conan user` implicitly uses the environment variables CONAN_LOGIN_USERNAME_<REMOTE> and CONAN_PASSWORD_<REMOTE>.
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
echo outcome=$(conan user --remote xrplf --password >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
- name: Upload new package
id: upload
if: (steps.remote.outputs.outcome == 'success')
shell: bash
run: |
echo "conan upload version ${{ steps.version.outputs.version }} on channel ${{ steps.channel.outputs.channel }}"
echo outcome=$(conan upload xrpl/${{ steps.version.outputs.version }}@${{ steps.channel.outputs.channel }} --remote ripple --confirm >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
notify_clio:
name: Notify Clio
runs-on: ubuntu-latest
needs: publish
env:
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
steps:
- name: Notify Clio about new version
if: (needs.publish.outputs.outcome == 'success')
shell: bash
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[version]=${{ needs.publish.outputs.version }}@${{ needs.publish.outputs.channel }}" \
-F "client_payload[pr]=${{ github.event.pull_request.number }}"

View File

@@ -50,30 +50,35 @@ jobs:
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: install Conan - name: Delete old build tools installed using Homebrew
run: | run: |
brew install conan brew uninstall --force \
- name: install Ninja cmake \
if: matrix.generator == 'Ninja' conan
run: brew install ninja
- name: install python - name: Install build tools using Homebrew
run: | run: |
if which python > /dev/null 2>&1; then brew install --quiet \
echo "Python executable exists" ca-certificates \
else ninja \
brew install python@3.13 python@3.14
ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python
fi - name: Remove old fmt using Homebrew
- name: install cmake
run: | run: |
if which cmake > /dev/null 2>&1; then brew unlink fmt
echo "cmake executable exists" brew cleanup
else brew link fmt
brew install cmake
fi - name: List software installed using Homebrew
- name: install nproc run: brew list --version
- name: Install build tools using pip
shell: bash
run: | run: |
brew install coreutils pip3 install --break-system-packages --upgrade pip
pip3 install --break-system-packages \
cmake==4.1.2 \
conan==2.22.1
- name: check environment - name: check environment
run: | run: |
env | sort env | sort

View File

@@ -26,6 +26,9 @@ tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% if compiler == "apple-clang" and compiler_version >= 17 %} {% if compiler == "apple-clang" and compiler_version >= 17 %}
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %} {% endif %}
{% if compiler == "clang" and compiler_version == 16 %}
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
{% endif %}
{% if compiler == "gcc" and compiler_version < 13 %} {% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags=['-Wno-restrict'] tools.build:cxxflags=['-Wno-restrict']
{% endif %} {% endif %}

View File

@@ -104,7 +104,7 @@ class Xrpl(ConanFile):
def requirements(self): def requirements(self):
# Conan 2 requires transitive headers to be specified # Conan 2 requires transitive headers to be specified
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
self.requires('boost/1.86.0', force=True, **transitive_headers_opt) self.requires('boost/1.83.0', force=True, **transitive_headers_opt)
self.requires('date/3.0.4', **transitive_headers_opt) self.requires('date/3.0.4', **transitive_headers_opt)
self.requires('lz4/1.10.0', force=True) self.requires('lz4/1.10.0', force=True)
self.requires('protobuf/3.21.12', force=True) self.requires('protobuf/3.21.12', force=True)

View File

@@ -21,6 +21,7 @@
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED #define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <ostream> #include <ostream>

View File

@@ -90,6 +90,9 @@ public:
int int
getCacheSize() const; getCacheSize() const;
int
getTrackSize() const;
float float
getHitRate(); getHitRate();
@@ -167,6 +170,9 @@ public:
bool bool
retrieve(key_type const& key, T& data); retrieve(key_type const& key, T& data);
mutex_type&
peekMutex();
std::vector<key_type> std::vector<key_type>
getKeys() const; getKeys() const;
@@ -187,14 +193,11 @@ public:
private: private:
SharedPointerType SharedPointerType
initialFetch(key_type const& key); initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
void void
collect_metrics(); collect_metrics();
Mutex&
lockPartition(key_type const& key) const;
private: private:
struct Stats struct Stats
{ {
@@ -297,8 +300,8 @@ private:
[[maybe_unused]] clock_type::time_point const& now, [[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition, typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep, SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemoval, std::atomic<int>& allRemovals,
Mutex& partitionLock); std::lock_guard<std::recursive_mutex> const&);
[[nodiscard]] std::thread [[nodiscard]] std::thread
sweepHelper( sweepHelper(
@@ -307,12 +310,14 @@ private:
typename KeyOnlyCacheType::map_type& partition, typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&, SweptPointersVector&,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
Mutex& partitionLock); std::lock_guard<std::recursive_mutex> const&);
beast::Journal m_journal; beast::Journal m_journal;
clock_type& m_clock; clock_type& m_clock;
Stats m_stats; Stats m_stats;
mutex_type mutable m_mutex;
// Used for logging // Used for logging
std::string m_name; std::string m_name;
@@ -323,11 +328,10 @@ private:
clock_type::duration const m_target_age; clock_type::duration const m_target_age;
// Number of items cached // Number of items cached
std::atomic<int> m_cache_count; int m_cache_count;
cache_type m_cache; // Hold strong reference to recent objects cache_type m_cache; // Hold strong reference to recent objects
std::atomic<std::uint64_t> m_hits; std::uint64_t m_hits;
std::atomic<std::uint64_t> m_misses; std::uint64_t m_misses;
mutable std::vector<mutex_type> partitionLocks_;
}; };
} // namespace ripple } // namespace ripple

View File

@@ -22,7 +22,6 @@
#include <xrpl/basics/IntrusivePointer.ipp> #include <xrpl/basics/IntrusivePointer.ipp>
#include <xrpl/basics/TaggedCache.h> #include <xrpl/basics/TaggedCache.h>
#include <xrpl/beast/core/CurrentThreadName.h>
namespace ripple { namespace ripple {
@@ -61,7 +60,6 @@ inline TaggedCache<
, m_hits(0) , m_hits(0)
, m_misses(0) , m_misses(0)
{ {
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
} }
template < template <
@@ -107,13 +105,8 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::size() const Mutex>::size() const
{ {
std::size_t totalSize = 0; std::lock_guard lock(m_mutex);
for (size_t i = 0; i < partitionLocks_.size(); ++i) return m_cache.size();
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
totalSize += m_cache.map()[i].size();
}
return totalSize;
} }
template < template <
@@ -136,7 +129,32 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::getCacheSize() const Mutex>::getCacheSize() const
{ {
return m_cache_count.load(std::memory_order_relaxed); std::lock_guard lock(m_mutex);
return m_cache_count;
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline int
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::getTrackSize() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
} }
template < template <
@@ -159,10 +177,9 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::getHitRate() Mutex>::getHitRate()
{ {
auto const hits = m_hits.load(std::memory_order_relaxed); std::lock_guard lock(m_mutex);
auto const misses = m_misses.load(std::memory_order_relaxed); auto const total = static_cast<float>(m_hits + m_misses);
float const total = float(hits + misses); return m_hits * (100.0f / std::max(1.0f, total));
return hits * (100.0f / std::max(1.0f, total));
} }
template < template <
@@ -185,12 +202,9 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::clear() Mutex>::clear()
{ {
for (auto& mutex : partitionLocks_) std::lock_guard lock(m_mutex);
mutex.lock();
m_cache.clear(); m_cache.clear();
for (auto& mutex : partitionLocks_) m_cache_count = 0;
mutex.unlock();
m_cache_count.store(0, std::memory_order_relaxed);
} }
template < template <
@@ -213,9 +227,11 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::reset() Mutex>::reset()
{ {
clear(); std::lock_guard lock(m_mutex);
m_hits.store(0, std::memory_order_relaxed); m_cache.clear();
m_misses.store(0, std::memory_order_relaxed); m_cache_count = 0;
m_hits = 0;
m_misses = 0;
} }
template < template <
@@ -239,7 +255,7 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::touch_if_exists(KeyComparable const& key) Mutex>::touch_if_exists(KeyComparable const& key)
{ {
std::lock_guard<Mutex> lock(lockPartition(key)); std::lock_guard lock(m_mutex);
auto const iter(m_cache.find(key)); auto const iter(m_cache.find(key));
if (iter == m_cache.end()) if (iter == m_cache.end())
{ {
@@ -281,6 +297,8 @@ TaggedCache<
auto const start = std::chrono::steady_clock::now(); auto const start = std::chrono::steady_clock::now();
{ {
std::lock_guard lock(m_mutex);
if (m_target_size == 0 || if (m_target_size == 0 ||
(static_cast<int>(m_cache.size()) <= m_target_size)) (static_cast<int>(m_cache.size()) <= m_target_size))
{ {
@@ -312,13 +330,12 @@ TaggedCache<
m_cache.map()[p], m_cache.map()[p],
allStuffToSweep[p], allStuffToSweep[p],
allRemovals, allRemovals,
partitionLocks_[p])); lock));
} }
for (std::thread& worker : workers) for (std::thread& worker : workers)
worker.join(); worker.join();
int removals = allRemovals.load(std::memory_order_relaxed); m_cache_count -= allRemovals;
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
} }
// At this point allStuffToSweep will go out of scope outside the lock // At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer. // and decrement the reference count on each strong pointer.
@@ -352,8 +369,7 @@ TaggedCache<
{ {
// Remove from cache, if !valid, remove from map too. Returns true if // Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache // removed from cache
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
@@ -366,7 +382,7 @@ TaggedCache<
if (entry.isCached()) if (entry.isCached())
{ {
m_cache_count.fetch_sub(1, std::memory_order_relaxed); --m_cache_count;
entry.ptr.convertToWeak(); entry.ptr.convertToWeak();
ret = true; ret = true;
} }
@@ -404,16 +420,17 @@ TaggedCache<
{ {
// Return canonical value, store if needed, refresh in cache // Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already // Return values: true=we had the data already
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
if (cit == m_cache.end()) if (cit == m_cache.end())
{ {
m_cache.emplace( m_cache.emplace(
std::piecewise_construct, std::piecewise_construct,
std::forward_as_tuple(key), std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data)); std::forward_as_tuple(m_clock.now(), data));
m_cache_count.fetch_add(1, std::memory_order_relaxed); ++m_cache_count;
return false; return false;
} }
@@ -462,12 +479,12 @@ TaggedCache<
data = cachedData; data = cachedData;
} }
m_cache_count.fetch_add(1, std::memory_order_relaxed); ++m_cache_count;
return true; return true;
} }
entry.ptr = data; entry.ptr = data;
m_cache_count.fetch_add(1, std::memory_order_relaxed); ++m_cache_count;
return false; return false;
} }
@@ -543,11 +560,10 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::fetch(key_type const& key) Mutex>::fetch(key_type const& key)
{ {
std::lock_guard<Mutex> lock(lockPartition(key)); std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
auto ret = initialFetch(key);
if (!ret) if (!ret)
m_misses.fetch_add(1, std::memory_order_relaxed); ++m_misses;
return ret; return ret;
} }
@@ -611,8 +627,8 @@ TaggedCache<
Mutex>::insert(key_type const& key) Mutex>::insert(key_type const& key)
-> std::enable_if_t<IsKeyCache, ReturnType> -> std::enable_if_t<IsKeyCache, ReturnType>
{ {
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now()); clock_type::time_point const now(m_clock.now());
std::lock_guard<Mutex> lock(lockPartition(key));
auto [it, inserted] = m_cache.emplace( auto [it, inserted] = m_cache.emplace(
std::piecewise_construct, std::piecewise_construct,
std::forward_as_tuple(key), std::forward_as_tuple(key),
@@ -652,6 +668,29 @@ TaggedCache<
return true; return true;
} }
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline auto
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::peekMutex() -> mutex_type&
{
return m_mutex;
}
template < template <
class Key, class Key,
class T, class T,
@@ -675,13 +714,10 @@ TaggedCache<
std::vector<key_type> v; std::vector<key_type> v;
{ {
std::lock_guard lock(m_mutex);
v.reserve(m_cache.size()); v.reserve(m_cache.size());
for (std::size_t i = 0; i < partitionLocks_.size(); ++i) for (auto const& _ : m_cache)
{ v.push_back(_.first);
std::lock_guard<Mutex> lock(partitionLocks_[i]);
for (auto const& entry : m_cache.map()[i])
v.push_back(entry.first);
}
} }
return v; return v;
@@ -707,12 +743,11 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::rate() const Mutex>::rate() const
{ {
auto const hits = m_hits.load(std::memory_order_relaxed); std::lock_guard lock(m_mutex);
auto const misses = m_misses.load(std::memory_order_relaxed); auto const tot = m_hits + m_misses;
auto const tot = hits + misses;
if (tot == 0) if (tot == 0)
return 0.0; return 0;
return double(hits) / tot; return double(m_hits) / tot;
} }
template < template <
@@ -736,16 +771,18 @@ TaggedCache<
KeyEqual, KeyEqual,
Mutex>::fetch(key_type const& digest, Handler const& h) Mutex>::fetch(key_type const& digest, Handler const& h)
{ {
std::lock_guard<Mutex> lock(lockPartition(digest)); {
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest)) if (auto ret = initialFetch(digest, l))
return ret; return ret;
}
auto sle = h(); auto sle = h();
if (!sle) if (!sle)
return {}; return {};
m_misses.fetch_add(1, std::memory_order_relaxed); std::lock_guard l(m_mutex);
++m_misses;
auto const [it, inserted] = auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle))); m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted) if (!inserted)
@@ -772,10 +809,9 @@ TaggedCache<
SharedPointerType, SharedPointerType,
Hash, Hash,
KeyEqual, KeyEqual,
Mutex>::initialFetch(key_type const& key) Mutex>::
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
{ {
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key); auto cit = m_cache.find(key);
if (cit == m_cache.end()) if (cit == m_cache.end())
return {}; return {};
@@ -783,7 +819,7 @@ TaggedCache<
Entry& entry = cit->second; Entry& entry = cit->second;
if (entry.isCached()) if (entry.isCached())
{ {
m_hits.fetch_add(1, std::memory_order_relaxed); ++m_hits;
entry.touch(m_clock.now()); entry.touch(m_clock.now());
return entry.ptr.getStrong(); return entry.ptr.getStrong();
} }
@@ -791,13 +827,12 @@ TaggedCache<
if (entry.isCached()) if (entry.isCached())
{ {
// independent of cache size, so not counted as a hit // independent of cache size, so not counted as a hit
m_cache_count.fetch_add(1, std::memory_order_relaxed); ++m_cache_count;
entry.touch(m_clock.now()); entry.touch(m_clock.now());
return entry.ptr.getStrong(); return entry.ptr.getStrong();
} }
m_cache.erase(cit); m_cache.erase(cit);
return {}; return {};
} }
@@ -826,11 +861,10 @@ TaggedCache<
{ {
beast::insight::Gauge::value_type hit_rate(0); beast::insight::Gauge::value_type hit_rate(0);
{ {
auto const hits = m_hits.load(std::memory_order_relaxed); std::lock_guard lock(m_mutex);
auto const misses = m_misses.load(std::memory_order_relaxed); auto const total(m_hits + m_misses);
auto const total = hits + misses;
if (total != 0) if (total != 0)
hit_rate = (hits * 100) / total; hit_rate = (m_hits * 100) / total;
} }
m_stats.hit_rate.set(hit_rate); m_stats.hit_rate.set(hit_rate);
} }
@@ -861,16 +895,12 @@ TaggedCache<
typename KeyValueCacheType::map_type& partition, typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep, SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
Mutex& partitionLock) std::lock_guard<std::recursive_mutex> const&)
{ {
return std::thread([&, this]() { return std::thread([&, this]() {
beast::setCurrentThreadName("sweep-KVCache");
int cacheRemovals = 0; int cacheRemovals = 0;
int mapRemovals = 0; int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep // Keep references to all the stuff we sweep
// so that we can destroy them outside the lock. // so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size()); stuffToSweep.reserve(partition.size());
@@ -954,16 +984,12 @@ TaggedCache<
typename KeyOnlyCacheType::map_type& partition, typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&, SweptPointersVector&,
std::atomic<int>& allRemovals, std::atomic<int>& allRemovals,
Mutex& partitionLock) std::lock_guard<std::recursive_mutex> const&)
{ {
return std::thread([&, this]() { return std::thread([&, this]() {
beast::setCurrentThreadName("sweep-KCache");
int cacheRemovals = 0; int cacheRemovals = 0;
int mapRemovals = 0; int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep // Keep references to all the stuff we sweep
// so that we can destroy them outside the lock. // so that we can destroy them outside the lock.
{ {
@@ -998,29 +1024,6 @@ TaggedCache<
}); });
} }
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline Mutex&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::lockPartition(key_type const& key) const
{
return partitionLocks_[m_cache.partition_index(key)];
}
} // namespace ripple } // namespace ripple
#endif #endif

View File

@@ -277,12 +277,6 @@ public:
return map_; return map_;
} }
partition_map_type const&
map() const
{
return map_;
}
iterator iterator
begin() begin()
{ {
@@ -327,12 +321,6 @@ public:
return cend(); return cend();
} }
std::size_t
partition_index(key_type const& key) const
{
return partitioner(key);
}
private: private:
template <class T> template <class T>
void void

View File

@@ -22,6 +22,7 @@
#include <xrpl/basics/ByteUtilities.h> #include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint> #include <cstdint>
@@ -55,7 +56,10 @@ std::size_t constexpr oversizeMetaDataCap = 5200;
/** The maximum number of entries per directory page */ /** The maximum number of entries per directory page */
std::size_t constexpr dirNodeMaxEntries = 32; std::size_t constexpr dirNodeMaxEntries = 32;
/** The maximum number of pages allowed in a directory */ /** The maximum number of pages allowed in a directory
Made obsolete by fixDirectoryLimit amendment.
*/
std::uint64_t constexpr dirNodeMaxPages = 262144; std::uint64_t constexpr dirNodeMaxPages = 262144;
/** The maximum number of items in an NFT page */ /** The maximum number of items in an NFT page */

View File

@@ -29,9 +29,8 @@
// Add new amendments to the top of this list. // Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order. // Keep it sorted in reverse chronological order.
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo) XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo)
@@ -41,7 +40,7 @@ XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionDelegation, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions // Check flags in Credential transactions
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)

View File

@@ -36,7 +36,7 @@ namespace BuildInfo {
// and follow the format described at http://semver.org/ // and follow the format described at http://semver.org/
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// clang-format off // clang-format off
char const* const versionString = "2.6.0-rc2" char const* const versionString = "2.6.2"
// clang-format on // clang-format on
#if defined(DEBUG) || defined(SANITIZER) #if defined(DEBUG) || defined(SANITIZER)

View File

@@ -568,6 +568,39 @@ struct Credentials_test : public beast::unit_test::suite
jle[jss::result][jss::node]["CredentialType"] == jle[jss::result][jss::node]["CredentialType"] ==
strHex(std::string_view(credType))); strHex(std::string_view(credType)));
} }
{
testcase("Credentials fail, directory full");
std::uint32_t const issuerSeq{env.seq(issuer) + 1};
env(ticket::create(issuer, 63));
env.close();
// Everything below can only be tested on open ledger.
auto const res1 = directory::bumpLastPage(
env,
directory::maximumPageIndex(env),
keylet::ownerDir(issuer.id()),
directory::adjustOwnerNode);
BEAST_EXPECT(res1);
auto const jv = credentials::create(issuer, subject, credType);
env(jv, ter(tecDIR_FULL));
// Free one directory entry by using a ticket
env(noop(issuer), ticket::use(issuerSeq + 40));
// Fill subject directory
env(ticket::create(subject, 63));
auto const res2 = directory::bumpLastPage(
env,
directory::maximumPageIndex(env),
keylet::ownerDir(subject.id()),
directory::adjustOwnerNode);
BEAST_EXPECT(res2);
env(jv, ter(tecDIR_FULL));
// End test
env.close();
}
} }
{ {
@@ -1094,6 +1127,7 @@ struct Credentials_test : public beast::unit_test::suite
testSuccessful(all); testSuccessful(all);
testCredentialsDelete(all); testCredentialsDelete(all);
testCreateFailed(all); testCreateFailed(all);
testCreateFailed(all - fixDirectoryLimit);
testAcceptFailed(all); testAcceptFailed(all);
testDeleteFailed(all); testDeleteFailed(all);
testFeatureFailed(all - featureCredentials); testFeatureFailed(all - featureCredentials);

View File

@@ -0,0 +1,80 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Dev Null Productions
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx.h>
#include <test/jtx/CaptureLogs.h>
#include <test/jtx/Env.h>
#include <xrpld/app/misc/HashRouter.h>
namespace ripple {
namespace test {
class NetworkOPs_test : public beast::unit_test::suite
{
public:
void
run() override
{
testAllBadHeldTransactions();
}
void
testAllBadHeldTransactions()
{
// All trasactions are already marked as SF_BAD, and we should be able
// to handle the case properly without an assertion failure
testcase("No valid transactions in batch");
std::string logs;
{
using namespace jtx;
auto const alice = Account{"alice"};
Env env{
*this,
envconfig(),
std::make_unique<CaptureLogs>(&logs),
beast::severities::kAll};
env.memoize(env.master);
env.memoize(alice);
auto const jtx = env.jt(ticket::create(alice, 1), seq(1), fee(10));
auto transacionId = jtx.stx->getTransactionID();
env.app().getHashRouter().setFlags(
transacionId, HashRouterFlags::HELD);
env(jtx, json(jss::Sequence, 1), ter(terNO_ACCOUNT));
env.app().getHashRouter().setFlags(
transacionId, HashRouterFlags::BAD);
env.close();
}
BEAST_EXPECT(
logs.find("No transaction to process!") != std::string::npos);
}
};
BEAST_DEFINE_TESTSUITE(NetworkOPs, app, ripple);
} // namespace test
} // namespace ripple

View File

@@ -58,10 +58,10 @@ public:
// Insert an item, retrieve it, and age it so it gets purged. // Insert an item, retrieve it, and age it so it gets purged.
{ {
BEAST_EXPECT(c.getCacheSize() == 0); BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0); BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(!c.insert(1, "one")); BEAST_EXPECT(!c.insert(1, "one"));
BEAST_EXPECT(c.getCacheSize() == 1); BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1); BEAST_EXPECT(c.getTrackSize() == 1);
{ {
std::string s; std::string s;
@@ -72,7 +72,7 @@ public:
++clock; ++clock;
c.sweep(); c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0); BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0); BEAST_EXPECT(c.getTrackSize() == 0);
} }
// Insert an item, maintain a strong pointer, age it, and // Insert an item, maintain a strong pointer, age it, and
@@ -80,7 +80,7 @@ public:
{ {
BEAST_EXPECT(!c.insert(2, "two")); BEAST_EXPECT(!c.insert(2, "two"));
BEAST_EXPECT(c.getCacheSize() == 1); BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1); BEAST_EXPECT(c.getTrackSize() == 1);
{ {
auto p = c.fetch(2); auto p = c.fetch(2);
@@ -88,14 +88,14 @@ public:
++clock; ++clock;
c.sweep(); c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0); BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 1); BEAST_EXPECT(c.getTrackSize() == 1);
} }
// Make sure its gone now that our reference is gone // Make sure its gone now that our reference is gone
++clock; ++clock;
c.sweep(); c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0); BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0); BEAST_EXPECT(c.getTrackSize() == 0);
} }
// Insert the same key/value pair and make sure we get the same result // Insert the same key/value pair and make sure we get the same result
@@ -111,7 +111,7 @@ public:
++clock; ++clock;
c.sweep(); c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0); BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0); BEAST_EXPECT(c.getTrackSize() == 0);
} }
// Put an object in but keep a strong pointer to it, advance the clock a // Put an object in but keep a strong pointer to it, advance the clock a
@@ -121,24 +121,24 @@ public:
// Put an object in // Put an object in
BEAST_EXPECT(!c.insert(4, "four")); BEAST_EXPECT(!c.insert(4, "four"));
BEAST_EXPECT(c.getCacheSize() == 1); BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1); BEAST_EXPECT(c.getTrackSize() == 1);
{ {
// Keep a strong pointer to it // Keep a strong pointer to it
auto const p1 = c.fetch(4); auto const p1 = c.fetch(4);
BEAST_EXPECT(p1 != nullptr); BEAST_EXPECT(p1 != nullptr);
BEAST_EXPECT(c.getCacheSize() == 1); BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1); BEAST_EXPECT(c.getTrackSize() == 1);
// Advance the clock a lot // Advance the clock a lot
++clock; ++clock;
c.sweep(); c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0); BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 1); BEAST_EXPECT(c.getTrackSize() == 1);
// Canonicalize a new object with the same key // Canonicalize a new object with the same key
auto p2 = std::make_shared<std::string>("four"); auto p2 = std::make_shared<std::string>("four");
BEAST_EXPECT(c.canonicalize_replace_client(4, p2)); BEAST_EXPECT(c.canonicalize_replace_client(4, p2));
BEAST_EXPECT(c.getCacheSize() == 1); BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.size() == 1); BEAST_EXPECT(c.getTrackSize() == 1);
// Make sure we get the original object // Make sure we get the original object
BEAST_EXPECT(p1.get() == p2.get()); BEAST_EXPECT(p1.get() == p2.get());
} }
@@ -146,7 +146,7 @@ public:
++clock; ++clock;
c.sweep(); c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0); BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.size() == 0); BEAST_EXPECT(c.getTrackSize() == 0);
} }
} }
}; };

View File

@@ -39,6 +39,7 @@
#include <test/jtx/delivermin.h> #include <test/jtx/delivermin.h>
#include <test/jtx/deposit.h> #include <test/jtx/deposit.h>
#include <test/jtx/did.h> #include <test/jtx/did.h>
#include <test/jtx/directory.h>
#include <test/jtx/domain.h> #include <test/jtx/domain.h>
#include <test/jtx/escrow.h> #include <test/jtx/escrow.h>
#include <test/jtx/fee.h> #include <test/jtx/fee.h>

81
src/test/jtx/directory.h Normal file
View File

@@ -0,0 +1,81 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_TEST_JTX_DIRECTORY_H_INCLUDED
#define RIPPLE_TEST_JTX_DIRECTORY_H_INCLUDED
#include <test/jtx/Env.h>
#include <xrpl/basics/Expected.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <cstdint>
#include <limits>
namespace ripple::test::jtx {
/** Directory operations. */
namespace directory {
enum Error {
DirectoryRootNotFound,
DirectoryTooSmall,
DirectoryPageDuplicate,
DirectoryPageNotFound,
InvalidLastPage,
AdjustmentError
};
/// Move the position of the last page in the user's directory on open ledger to
/// newLastPage. Requirements:
/// - directory must have at least two pages (root and one more)
/// - adjust should be used to update owner nodes of the objects affected
/// - newLastPage must be greater than index of the last page in the directory
///
/// Use this to test tecDIR_FULL errors in open ledger.
/// NOTE: effects will be DISCARDED on env.close()
auto
bumpLastPage(
Env& env,
std::uint64_t newLastPage,
Keylet directory,
std::function<bool(ApplyView&, uint256, std::uint64_t)> adjust)
-> Expected<void, Error>;
/// Implementation of adjust for the most common ledger entry, i.e. one where
/// page index is stored in sfOwnerNode (and only there). Pass this function
/// to bumpLastPage if the last page of directory has only objects
/// of this kind (e.g. ticket, DID, offer, deposit preauth, MPToken etc.)
bool
adjustOwnerNode(ApplyView& view, uint256 key, std::uint64_t page);
inline auto
maximumPageIndex(Env const& env) -> std::uint64_t
{
if (env.enabled(fixDirectoryLimit))
return std::numeric_limits<std::uint64_t>::max();
return dirNodeMaxPages - 1;
}
} // namespace directory
} // namespace ripple::test::jtx
#endif

View File

@@ -0,0 +1,145 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx/directory.h>
#include <xrpld/ledger/Sandbox.h>
namespace ripple::test::jtx {
/** Directory operations. */
namespace directory {
auto
bumpLastPage(
Env& env,
std::uint64_t newLastPage,
Keylet directory,
std::function<bool(ApplyView&, uint256, std::uint64_t)> adjust)
-> Expected<void, Error>
{
Expected<void, Error> res{};
env.app().openLedger().modify(
[&](OpenView& view, beast::Journal j) -> bool {
Sandbox sb(&view, tapNONE);
// Find the root page
auto sleRoot = sb.peek(directory);
if (!sleRoot)
{
res = Unexpected<Error>(DirectoryRootNotFound);
return false;
}
// Find last page
auto const lastIndex = sleRoot->getFieldU64(sfIndexPrevious);
if (lastIndex == 0)
{
res = Unexpected<Error>(DirectoryTooSmall);
return false;
}
if (sb.exists(keylet::page(directory, newLastPage)))
{
res = Unexpected<Error>(DirectoryPageDuplicate);
return false;
}
if (lastIndex >= newLastPage)
{
res = Unexpected<Error>(InvalidLastPage);
return false;
}
auto slePage = sb.peek(keylet::page(directory, lastIndex));
if (!slePage)
{
res = Unexpected<Error>(DirectoryPageNotFound);
return false;
}
// Copy its data and delete the page
auto indexes = slePage->getFieldV256(sfIndexes);
auto prevIndex = slePage->at(~sfIndexPrevious);
auto owner = slePage->at(~sfOwner);
sb.erase(slePage);
// Create new page to replace slePage
auto sleNew =
std::make_shared<SLE>(keylet::page(directory, newLastPage));
sleNew->setFieldH256(sfRootIndex, directory.key);
sleNew->setFieldV256(sfIndexes, indexes);
if (owner)
sleNew->setAccountID(sfOwner, *owner);
if (prevIndex)
sleNew->setFieldU64(sfIndexPrevious, *prevIndex);
sb.insert(sleNew);
// Adjust root previous and previous node's next
sleRoot->setFieldU64(sfIndexPrevious, newLastPage);
if (prevIndex.value_or(0) == 0)
sleRoot->setFieldU64(sfIndexNext, newLastPage);
else
{
auto slePrev = sb.peek(keylet::page(directory, *prevIndex));
if (!slePrev)
{
res = Unexpected<Error>(DirectoryPageNotFound);
return false;
}
slePrev->setFieldU64(sfIndexNext, newLastPage);
sb.update(slePrev);
}
sb.update(sleRoot);
// Fixup page numbers in the objects referred by indexes
if (adjust)
for (auto const key : indexes)
{
if (!adjust(sb, key, newLastPage))
{
res = Unexpected<Error>(AdjustmentError);
return false;
}
}
sb.apply(view);
return true;
});
return res;
}
bool
adjustOwnerNode(ApplyView& view, uint256 key, std::uint64_t page)
{
auto sle = view.peek({ltANY, key});
if (sle && sle->isFieldPresent(sfOwnerNode))
{
sle->setFieldU64(sfOwnerNode, page);
view.update(sle);
return true;
}
return false;
}
} // namespace directory
} // namespace ripple::test::jtx

View File

@@ -23,9 +23,11 @@
#include <xrpl/basics/random.h> #include <xrpl/basics/random.h>
#include <xrpl/protocol/Feature.h> #include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Protocol.h> #include <xrpl/protocol/Protocol.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/jss.h> #include <xrpl/protocol/jss.h>
#include <algorithm> #include <algorithm>
#include <limits>
namespace ripple { namespace ripple {
namespace test { namespace test {
@@ -490,6 +492,91 @@ struct Directory_test : public beast::unit_test::suite
} }
} }
void
testDirectoryFull()
{
using namespace test::jtx;
Account alice("alice");
auto const testCase = [&, this](FeatureBitset features, auto setup) {
using namespace test::jtx;
Env env(*this, features);
env.fund(XRP(20000), alice);
env.close();
auto const [lastPage, full] = setup(env);
// Populate root page and last page
for (int i = 0; i < 63; ++i)
env(credentials::create(alice, alice, std::to_string(i)));
env.close();
// NOTE, everything below can only be tested on open ledger because
// there is no transaction type to express what bumpLastPage does.
// Bump position of last page from 1 to highest possible
auto const res = directory::bumpLastPage(
env,
lastPage,
keylet::ownerDir(alice.id()),
[lastPage, this](
ApplyView& view, uint256 key, std::uint64_t page) {
auto sle = view.peek({ltCREDENTIAL, key});
if (!BEAST_EXPECT(sle))
return false;
BEAST_EXPECT(page == lastPage);
sle->setFieldU64(sfIssuerNode, page);
// sfSubjectNode is not set in self-issued credentials
view.update(sle);
return true;
});
BEAST_EXPECT(res);
// Create one more credential
env(credentials::create(alice, alice, std::to_string(63)));
// Not enough space for another object if full
auto const expected = full ? ter{tecDIR_FULL} : ter{tesSUCCESS};
env(credentials::create(alice, alice, "foo"), expected);
// Destroy all objects in directory
for (int i = 0; i < 64; ++i)
env(credentials::deleteCred(
alice, alice, alice, std::to_string(i)));
if (!full)
env(credentials::deleteCred(alice, alice, alice, "foo"));
// Verify directory is empty.
auto const sle = env.le(keylet::ownerDir(alice.id()));
BEAST_EXPECT(sle == nullptr);
// Test completed
env.close();
};
testCase(
testable_amendments() - fixDirectoryLimit,
[this](Env&) -> std::tuple<std::uint64_t, bool> {
testcase("directory full without fixDirectoryLimit");
return {dirNodeMaxPages - 1, true};
});
testCase(
testable_amendments(), //
[this](Env&) -> std::tuple<std::uint64_t, bool> {
testcase("directory not full with fixDirectoryLimit");
return {dirNodeMaxPages - 1, false};
});
testCase(
testable_amendments(), //
[this](Env&) -> std::tuple<std::uint64_t, bool> {
testcase("directory full with fixDirectoryLimit");
return {std::numeric_limits<std::uint64_t>::max(), true};
});
}
void void
run() override run() override
{ {
@@ -498,6 +585,7 @@ struct Directory_test : public beast::unit_test::suite
testRipd1353(); testRipd1353();
testEmptyChain(); testEmptyChain();
testPreviousTxnID(); testPreviousTxnID();
testDirectoryFull();
} }
}; };

View File

@@ -681,7 +681,7 @@ class ServerStatus_test : public beast::unit_test::suite,
resp["Upgrade"] == "websocket"); resp["Upgrade"] == "websocket");
BEAST_EXPECT( BEAST_EXPECT(
resp.find("Connection") != resp.end() && resp.find("Connection") != resp.end() &&
resp["Connection"] == "Upgrade"); resp["Connection"] == "upgrade");
} }
void void

View File

@@ -63,6 +63,8 @@ LedgerHistory::insert(
ledger->stateMap().getHash().isNonZero(), ledger->stateMap().getHash().isNonZero(),
"ripple::LedgerHistory::insert : nonzero hash"); "ripple::LedgerHistory::insert : nonzero hash");
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache( bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
ledger->info().hash, ledger); ledger->info().hash, ledger);
if (validated) if (validated)
@@ -74,6 +76,7 @@ LedgerHistory::insert(
LedgerHash LedgerHash
LedgerHistory::getLedgerHash(LedgerIndex index) LedgerHistory::getLedgerHash(LedgerIndex index)
{ {
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end()) if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
return it->second; return it->second;
return {}; return {};
@@ -83,11 +86,13 @@ std::shared_ptr<Ledger const>
LedgerHistory::getLedgerBySeq(LedgerIndex index) LedgerHistory::getLedgerBySeq(LedgerIndex index)
{ {
{ {
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto it = mLedgersByIndex.find(index); auto it = mLedgersByIndex.find(index);
if (it != mLedgersByIndex.end()) if (it != mLedgersByIndex.end())
{ {
uint256 hash = it->second; uint256 hash = it->second;
sl.unlock();
return getLedgerByHash(hash); return getLedgerByHash(hash);
} }
} }
@@ -103,6 +108,7 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
{ {
// Add this ledger to the local tracking by index // Add this ledger to the local tracking by index
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
XRPL_ASSERT( XRPL_ASSERT(
ret->isImmutable(), ret->isImmutable(),
@@ -452,6 +458,8 @@ LedgerHistory::builtLedger(
XRPL_ASSERT( XRPL_ASSERT(
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash"); !hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
auto entry = std::make_shared<cv_entry>(); auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry); m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -492,6 +500,8 @@ LedgerHistory::validatedLedger(
!hash.isZero(), !hash.isZero(),
"ripple::LedgerHistory::validatedLedger : nonzero hash"); "ripple::LedgerHistory::validatedLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
auto entry = std::make_shared<cv_entry>(); auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry); m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -525,9 +535,10 @@ LedgerHistory::validatedLedger(
bool bool
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
{ {
auto ledger = m_ledgers_by_hash.fetch(ledgerHash); std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto it = mLedgersByIndex.find(ledgerIndex); auto it = mLedgersByIndex.find(ledgerIndex);
if (ledger && (it != mLedgersByIndex.end()) && (it->second != ledgerHash))
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))
{ {
it->second = ledgerHash; it->second = ledgerHash;
return false; return false;

View File

@@ -1448,6 +1448,11 @@ NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
for (auto& t : transactions) for (auto& t : transactions)
mTransactions.push_back(std::move(t)); mTransactions.push_back(std::move(t));
} }
if (mTransactions.empty())
{
JLOG(m_journal.debug()) << "No transaction to process!";
return;
}
doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) { doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
XRPL_ASSERT( XRPL_ASSERT(

View File

@@ -23,6 +23,9 @@
#include <xrpl/beast/utility/instrumentation.h> #include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/Protocol.h> #include <xrpl/protocol/Protocol.h>
#include <limits>
#include <type_traits>
namespace ripple { namespace ripple {
std::optional<std::uint64_t> std::optional<std::uint64_t>
@@ -92,8 +95,21 @@ ApplyView::dirAdd(
return page; return page;
} }
// We rely on modulo arithmetic of unsigned integers (guaranteed in
// [basic.fundamental] paragraph 2) to detect page representation overflow.
// For signed integers this would be UB, hence static_assert here.
static_assert(std::is_unsigned_v<decltype(page)>);
// Defensive check against breaking changes in compiler.
static_assert([]<typename T>(std::type_identity<T>) constexpr -> T {
T tmp = std::numeric_limits<T>::max();
return ++tmp;
}(std::type_identity<decltype(page)>{}) == 0);
++page;
// Check whether we're out of pages. // Check whether we're out of pages.
if (++page >= dirNodeMaxPages) if (page == 0)
return std::nullopt;
if (!rules().enabled(fixDirectoryLimit) &&
page >= dirNodeMaxPages) // Old pages limit
return std::nullopt; return std::nullopt;
// We are about to create a new node; we'll link it to // We are about to create a new node; we'll link it to

View File

@@ -1286,8 +1286,7 @@ PeerImp::handleTransaction(
// Charge strongly for attempting to relay a txn with tfInnerBatchTxn // Charge strongly for attempting to relay a txn with tfInnerBatchTxn
// LCOV_EXCL_START // LCOV_EXCL_START
if (stx->isFlag(tfInnerBatchTxn) && if (stx->isFlag(tfInnerBatchTxn))
getCurrentTransactionRules()->enabled(featureBatch))
{ {
JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing " JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
"tfInnerBatchTxn (handleTransaction)."; "tfInnerBatchTxn (handleTransaction).";
@@ -2851,8 +2850,7 @@ PeerImp::checkTransaction(
{ {
// charge strongly for relaying batch txns // charge strongly for relaying batch txns
// LCOV_EXCL_START // LCOV_EXCL_START
if (stx->isFlag(tfInnerBatchTxn) && if (stx->isFlag(tfInnerBatchTxn))
getCurrentTransactionRules()->enabled(featureBatch))
{ {
JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing " JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
"tfInnerBatchTxn (checkSignature)."; "tfInnerBatchTxn (checkSignature).";
@@ -2866,6 +2864,9 @@ PeerImp::checkTransaction(
(stx->getFieldU32(sfLastLedgerSequence) < (stx->getFieldU32(sfLastLedgerSequence) <
app_.getLedgerMaster().getValidLedgerIndex())) app_.getLedgerMaster().getValidLedgerIndex()))
{ {
JLOG(p_journal_.info())
<< "Marking transaction " << stx->getTransactionID()
<< "as BAD because it's expired";
app_.getHashRouter().setFlags( app_.getHashRouter().setFlags(
stx->getTransactionID(), HashRouterFlags::BAD); stx->getTransactionID(), HashRouterFlags::BAD);
charge(Resource::feeUselessData, "expired tx"); charge(Resource::feeUselessData, "expired tx");
@@ -2922,7 +2923,7 @@ PeerImp::checkTransaction(
{ {
if (!validReason.empty()) if (!validReason.empty())
{ {
JLOG(p_journal_.trace()) JLOG(p_journal_.debug())
<< "Exception checking transaction: " << validReason; << "Exception checking transaction: " << validReason;
} }
@@ -2949,7 +2950,7 @@ PeerImp::checkTransaction(
{ {
if (!reason.empty()) if (!reason.empty())
{ {
JLOG(p_journal_.trace()) JLOG(p_journal_.debug())
<< "Exception checking transaction: " << reason; << "Exception checking transaction: " << reason;
} }
app_.getHashRouter().setFlags( app_.getHashRouter().setFlags(

View File

@@ -114,7 +114,7 @@ getCountsJson(Application& app, int minObjectCount)
ret[jss::treenode_cache_size] = ret[jss::treenode_cache_size] =
app.getNodeFamily().getTreeNodeCache()->getCacheSize(); app.getNodeFamily().getTreeNodeCache()->getCacheSize();
ret[jss::treenode_track_size] = ret[jss::treenode_track_size] =
static_cast<int>(app.getNodeFamily().getTreeNodeCache()->size()); app.getNodeFamily().getTreeNodeCache()->getTrackSize();
std::string uptime; std::string uptime;
auto s = UptimeClock::now(); auto s = UptimeClock::now();