Compare commits

...

11 Commits

Author SHA1 Message Date
RichardAH
3da7c00d73 Merge branch 'dev' into nd-migrate-to-conan-2-2025-09-22 2025-10-09 21:11:59 +11:00
tequ
92e3a927fc refactor KEYLET_LINE in utils_keylet (#502)
Fixes the use of high and low in variable names, as these are determined by ripple::keylet::line processing.

Co-authored-by: RichardAH <richard.holland@starstone.co.nz>
2025-10-09 21:02:14 +11:00
Nicholas Dudfield
c470ab2169 refactor: replace HBB env var with conan options
Replace HBB_RELEASE_BUILD environment variable with explicit Conan options
to decouple conanfile.py from specific build environments.

- Add 'with_wasmedge' and 'tool_requires_b2' options to conanfile.py
- Remove HBB_RELEASE_BUILD env var check from conanfile.py
- Pass options explicitly in build-core.sh: -o with_wasmedge=False -o tool_requires_b2=True
- Remove HBB_RELEASE_BUILD=1 export from build-full.sh
- Clean up HBB-specific commentary (already documented in release-builder.sh)
2025-10-06 15:16:35 +07:00
Nicholas Dudfield
a6377be629 fix: constrain conan version to 2.x in release-builder
Prevent potential breakage from future Conan 3.x by explicitly
constraining pip install to "conan>=2.0,<3.0"
2025-10-06 14:17:50 +07:00
Nicholas Dudfield
03cec6cb4f docs: update BUILD.md for conan 2 migration
- Update minimum requirements from Conan 1.55 to Conan 2.x
- Replace conan profile new/update commands with Conan 2 syntax
- Update export commands to use --version --user --channel flags
- Add WasmEdge export step
- Update profile configuration to use direct file editing at ~/.conan2/profiles/default
- Update troubleshooting section with Conan 2 cache locations and commands
- Replace outdated BOOST_ASIO_HAS_STD_INVOKE_RESULT workaround with Apple Clang 17+ gRPC fix
- Fix Conan downloads URL
2025-10-01 17:48:21 +07:00
Nicholas Dudfield
f01f6896f3 fix: add set -ex to release-builder.sh for fail-fast behavior
Enable bash strict mode to:
- Exit immediately on command failure (set -e)
- Print commands before execution for debugging (set -x)

This matches the documented behavior in the script comments.
2025-09-29 13:10:59 +07:00
Nicholas Dudfield
87d1fc0b17 refactor: remove redundant dual boost comment
The comment between configure() and requirements() was redundant
since the detailed explanation in requirements() is more comprehensive.
2025-09-29 08:51:04 +07:00
Nicholas Dudfield
e4e0c95d3c build: migrate to Conan 2
Migrates the build system from Conan 1 to Conan 2:

- Update all conan commands to v2 syntax (export, install, profile)
- Convert profiles to Conan 2 INI format with [settings] and [conf] sections
- Add tool_requires() for build tools (protobuf, grpc, b2)
- Fix wasmedge/0.11.2 recipe for Conan 2 compatibility (package_id syntax)
- Update CI workflows for all platforms (Linux, macOS, Docker/HBB)
- Document dual Boost setup (manual for WasmEdge, Conan for app)
- Force glibc-compatible builds via source compilation in HBB
- Ensure dependency version consistency with override=True

Maintains full backwards compatibility while preparing for Conan 1 EOL.
2025-09-26 14:49:31 +07:00
tequ
8f7ebf0377 Optimize github action cache (#544)
* optimize github action cache

* fix

* refactor: improve github actions cache optimization (#3)

- move ccache configuration logic to dedicated action
- rename conanfile-changed to should-save-conan-cache for clarity

---------

Co-authored-by: Niq Dudfield <ndudfield@gmail.com>
2025-09-08 15:53:40 +10:00
Niq Dudfield
46cf6785ab fix(tests): prevent buffer corruption from concurrent log writes (#565)
std::endl triggers flush() which calls sync() on the shared log buffer.
Multiple threads racing in sync() cause str()/str("") operations to
corrupt buffer state, leading to crashes and double frees.

Added mutex to serialize access to suite.log, preventing concurrent
sync() calls on the same buffer.
2025-09-08 13:57:49 +10:00
Niq Dudfield
3c4c9c87c5 Fix rwdb memory leak with online_delete and remove flatmap (#570)
Co-authored-by: Denis Angell <dangell@transia.co>
2025-08-26 14:00:58 +10:00
24 changed files with 1374 additions and 1328 deletions

View File

@@ -14,6 +14,18 @@ inputs:
description: 'How to check compiler for changes'
required: false
default: 'content'
is_main_branch:
description: 'Whether the current branch is the main branch'
required: false
default: 'false'
main_cache_dir:
description: 'Path to the main branch cache directory'
required: false
default: '~/.ccache-main'
current_cache_dir:
description: 'Path to the current branch cache directory'
required: false
default: '~/.ccache-current'
runs:
using: 'composite'
@@ -21,11 +33,31 @@ runs:
- name: Configure ccache
shell: bash
run: |
# Create cache directories
mkdir -p ${{ inputs.main_cache_dir }} ${{ inputs.current_cache_dir }}
# Set compiler check globally
ccache -o compiler_check=${{ inputs.compiler_check }}
# Use a single config file location
mkdir -p ~/.ccache
export CONF_PATH="${CCACHE_CONFIGPATH:-${CCACHE_DIR:-$HOME/.ccache}/ccache.conf}"
mkdir -p $(dirname "$CONF_PATH")
export CONF_PATH="$HOME/.ccache/ccache.conf"
# Apply common settings
echo "max_size = ${{ inputs.max_size }}" > "$CONF_PATH"
echo "hash_dir = ${{ inputs.hash_dir }}" >> "$CONF_PATH"
echo "compiler_check = ${{ inputs.compiler_check }}" >> "$CONF_PATH"
if [ "${{ inputs.is_main_branch }}" == "true" ]; then
# Main branch: use main branch cache
ccache --set-config=cache_dir="${{ inputs.main_cache_dir }}"
echo "CCACHE_DIR=${{ inputs.main_cache_dir }}" >> $GITHUB_ENV
else
# Feature branch: use current branch cache with main as secondary
ccache --set-config=cache_dir="${{ inputs.current_cache_dir }}"
ccache --set-config=secondary_storage="file:${{ inputs.main_cache_dir }}"
echo "CCACHE_DIR=${{ inputs.current_cache_dir }}" >> $GITHUB_ENV
fi
ccache -p # Print config for verification
ccache -z # Zero statistics before the build

View File

@@ -48,12 +48,23 @@ runs:
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
- name: Restore ccache directory
- name: Restore ccache directory for default branch
if: inputs.ccache_enabled == 'true'
id: ccache-restore
uses: actions/cache/restore@v4
with:
path: ~/.ccache
path: ~/.ccache-main
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
restore-keys: |
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
- name: Restore ccache directory for current branch
if: inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
id: ccache-restore-current-branch
uses: actions/cache/restore@v4
with:
path: ~/.ccache-current
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
restore-keys: |
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
@@ -75,6 +86,7 @@ runs:
export CXX="${{ inputs.cxx }}"
fi
# Configure ccache launcher args
CCACHE_ARGS=""
if [ "${{ inputs.ccache_enabled }}" = "true" ]; then
@@ -82,6 +94,10 @@ runs:
fi
# Run CMake configure
# Note: conanfile.py hardcodes 'build/generators' as the output path.
# If we're in a 'build' folder, Conan detects this and uses just 'generators/'
# If we're in '.build' (non-standard), Conan adds the full 'build/generators/'
# So we get: .build/build/generators/ with our non-standard folder name
cmake .. \
-G "${{ inputs.generator }}" \
$CCACHE_ARGS \
@@ -99,9 +115,16 @@ runs:
shell: bash
run: ccache -s
- name: Save ccache directory
if: inputs.ccache_enabled == 'true'
- name: Save ccache directory for default branch
if: always() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name == inputs.main_branch
uses: actions/cache/save@v4
with:
path: ~/.ccache
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
path: ~/.ccache-main
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
- name: Save ccache directory for current branch
if: always() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
uses: actions/cache/save@v4
with:
path: ~/.ccache-current
key: ${{ steps.ccache-restore-current-branch.outputs.cache-primary-key }}

View File

@@ -42,6 +42,26 @@ runs:
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
- name: Check conanfile changes
if: inputs.cache_enabled == 'true'
id: check-conanfile-changes
shell: bash
run: |
# Check if we're on the main branch
if [ "${{ github.ref_name }}" == "${{ inputs.main_branch }}" ]; then
echo "should-save-conan-cache=true" >> $GITHUB_OUTPUT
else
# Fetch main branch for comparison
git fetch origin ${{ inputs.main_branch }}
# Check if conanfile.txt or conanfile.py has changed compared to main branch
if git diff --quiet origin/${{ inputs.main_branch }}..HEAD -- '**/conanfile.txt' '**/conanfile.py'; then
echo "should-save-conan-cache=false" >> $GITHUB_OUTPUT
else
echo "should-save-conan-cache=true" >> $GITHUB_OUTPUT
fi
fi
- name: Restore Conan cache
if: inputs.cache_enabled == 'true'
id: cache-restore-conan
@@ -58,8 +78,9 @@ runs:
- name: Export custom recipes
shell: bash
run: |
conan export external/snappy snappy/1.1.10@xahaud/stable
conan export external/soci soci/4.0.3@xahaud/stable
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
conan export external/soci --version 4.0.3 --user xahaud --channel stable
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
- name: Install dependencies
shell: bash
@@ -76,7 +97,7 @@ runs:
..
- name: Save Conan cache
if: inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
if: always() && inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true' && steps.check-conanfile-changes.outputs.should-save-conan-cache == 'true'
uses: actions/cache/save@v4
with:
path: |

View File

@@ -5,6 +5,8 @@ on:
branches: ["dev", "candidate", "release"]
pull_request:
branches: ["dev", "candidate", "release"]
schedule:
- cron: '0 0 * * *'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -30,9 +32,9 @@ jobs:
- name: Install Conan
run: |
brew install conan@1
# Add Conan 1 to the PATH for this job
echo "$(brew --prefix conan@1)/bin" >> $GITHUB_PATH
brew install conan
# Verify Conan 2 is installed
conan --version
- name: Install Coreutils
run: |
@@ -58,12 +60,20 @@ jobs:
- name: Install CMake
run: |
if which cmake > /dev/null 2>&1; then
echo "cmake executable exists"
cmake --version
else
brew install cmake
fi
# Install CMake 3.x to match local dev environments
# With Conan 2 and the policy args passed to CMake, newer versions
# can have issues with dependencies that require cmake_minimum_required < 3.5
brew uninstall cmake --ignore-dependencies 2>/dev/null || true
# Download and install CMake 3.31.7 directly
curl -L https://github.com/Kitware/CMake/releases/download/v3.31.7/cmake-3.31.7-macos-universal.tar.gz -o cmake.tar.gz
tar -xzf cmake.tar.gz
# Move the entire CMake.app to /Applications
sudo mv cmake-3.31.7-macos-universal/CMake.app /Applications/
echo "/Applications/CMake.app/Contents/bin" >> $GITHUB_PATH
/Applications/CMake.app/Contents/bin/cmake --version
- name: Install ccache
run: brew install ccache
@@ -74,6 +84,7 @@ jobs:
max_size: 2G
hash_dir: true
compiler_check: content
is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }}
- name: Check environment
run: |
@@ -89,8 +100,30 @@ jobs:
- name: Configure Conan
run: |
conan profile new default --detect || true # Ignore error if profile exists
conan profile update settings.compiler.cppstd=20 default
# Create the default profile directory if it doesn't exist
mkdir -p ~/.conan2/profiles
# Detect compiler version
COMPILER_VERSION=$(clang --version | grep -oE 'version [0-9]+' | grep -oE '[0-9]+')
# Create profile with our specific settings
cat > ~/.conan2/profiles/default <<EOF
[settings]
arch=armv8
build_type=Release
compiler=apple-clang
compiler.cppstd=20
compiler.libcxx=libc++
compiler.version=${COMPILER_VERSION}
os=Macos
[conf]
# Workaround for gRPC with newer Apple Clang
tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
EOF
# Display profile for verification
conan profile show
- name: Install dependencies
uses: ./.github/actions/xahau-ga-dependencies
@@ -113,4 +146,4 @@ jobs:
- name: Test
run: |
${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc)
${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc)

View File

@@ -5,6 +5,8 @@ on:
branches: ["dev", "candidate", "release"]
pull_request:
branches: ["dev", "candidate", "release"]
schedule:
- cron: '0 0 * * *'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -39,8 +41,8 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y ninja-build ${{ matrix.cc }} ${{ matrix.cxx }} ccache
# Install specific Conan version needed
pip install --upgrade "conan<2"
# Install Conan 2
pip install --upgrade "conan>=2.0"
- name: Configure ccache
uses: ./.github/actions/xahau-configure-ccache
@@ -48,21 +50,34 @@ jobs:
max_size: 2G
hash_dir: true
compiler_check: content
is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }}
- name: Configure Conan
run: |
conan profile new default --detect || true # Ignore error if profile exists
conan profile update settings.compiler.cppstd=20 default
conan profile update settings.compiler=${{ matrix.compiler }} default
conan profile update settings.compiler.libcxx=libstdc++11 default
conan profile update env.CC=/usr/bin/${{ matrix.cc }} default
conan profile update env.CXX=/usr/bin/${{ matrix.cxx }} default
conan profile update conf.tools.build:compiler_executables='{"c": "/usr/bin/${{ matrix.cc }}", "cpp": "/usr/bin/${{ matrix.cxx }}"}' default
# Create the default profile directory if it doesn't exist
mkdir -p ~/.conan2/profiles
# Create profile with our specific settings
cat > ~/.conan2/profiles/default <<EOF
[settings]
arch=x86_64
build_type=Release
compiler=${{ matrix.compiler }}
compiler.cppstd=20
compiler.libcxx=libstdc++11
compiler.version=${{ matrix.compiler_version }}
os=Linux
[buildenv]
CC=/usr/bin/${{ matrix.cc }}
CXX=/usr/bin/${{ matrix.cxx }}
[conf]
tools.build:compiler_executables={"c": "/usr/bin/${{ matrix.cc }}", "cpp": "/usr/bin/${{ matrix.cxx }}"}
EOF
# Set compiler version from matrix
conan profile update settings.compiler.version=${{ matrix.compiler_version }} default
# Display profile for verification
conan profile show default
conan profile show
- name: Check environment
run: |

View File

@@ -33,7 +33,7 @@ git checkout develop
## Minimum Requirements
- [Python 3.7](https://www.python.org/downloads/)
- [Conan 1.55](https://conan.io/downloads.html)
- [Conan 2.x](https://conan.io/downloads)
- [CMake 3.16](https://cmake.org/download/)
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
@@ -65,13 +65,24 @@ can't build earlier Boost versions.
1. (Optional) If you've never used Conan, use autodetect to set up a default profile.
```
conan profile new default --detect
conan profile detect --force
```
2. Update the compiler settings.
For Conan 2, you can edit the profile directly at `~/.conan2/profiles/default`,
or use the Conan CLI. Ensure C++20 is set:
```
conan profile update settings.compiler.cppstd=20 default
conan profile show
```
Look for `compiler.cppstd=20` in the output. If it's not set, edit the profile:
```
# Edit ~/.conan2/profiles/default and ensure these settings exist:
[settings]
compiler.cppstd=20
```
Linux developers will commonly have a default Conan [profile][] that compiles
@@ -80,7 +91,9 @@ can't build earlier Boost versions.
then you will need to choose the `libstdc++11` ABI.
```
conan profile update settings.compiler.libcxx=libstdc++11 default
# In ~/.conan2/profiles/default, ensure:
[settings]
compiler.libcxx=libstdc++11
```
On Windows, you should use the x64 native build tools.
@@ -91,7 +104,9 @@ can't build earlier Boost versions.
architecture.
```
conan profile update settings.arch=x86_64 default
# In ~/.conan2/profiles/default, ensure:
[settings]
arch=x86_64
```
3. (Optional) If you have multiple compilers installed on your platform,
@@ -100,16 +115,18 @@ can't build earlier Boost versions.
in the generated CMake toolchain file.
```
conan profile update 'conf.tools.build:compiler_executables={"c": "<path>", "cpp": "<path>"}' default
# In ~/.conan2/profiles/default, add under [conf] section:
[conf]
tools.build:compiler_executables={"c": "<path>", "cpp": "<path>"}
```
It should choose the compiler for dependencies as well,
but not all of them have a Conan recipe that respects this setting (yet).
For the rest, you can set these environment variables:
For setting environment variables for dependencies:
```
conan profile update env.CC=<path> default
conan profile update env.CXX=<path> default
# In ~/.conan2/profiles/default, add under [buildenv] section:
[buildenv]
CC=<path>
CXX=<path>
```
4. Export our [Conan recipe for Snappy](./external/snappy).
@@ -117,14 +134,20 @@ can't build earlier Boost versions.
which allows you to statically link it with GCC, if you want.
```
conan export external/snappy snappy/1.1.10@xahaud/stable
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
```
5. Export our [Conan recipe for SOCI](./external/soci).
It patches their CMake to correctly import its dependencies.
```
conan export external/soci soci/4.0.3@xahaud/stable
conan export external/soci --version 4.0.3 --user xahaud --channel stable
```
6. Export our [Conan recipe for WasmEdge](./external/wasmedge).
```
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
```
### Build and Test
@@ -259,23 +282,26 @@ and can be helpful for detecting `#include` omissions.
If you have trouble building dependencies after changing Conan settings,
try removing the Conan cache.
For Conan 2:
```
rm -rf ~/.conan/data
rm -rf ~/.conan2/p
```
Or clear the entire Conan 2 cache:
```
conan cache clean "*"
```
### no std::result_of
### macOS compilation with Apple Clang 17+
If your compiler version is recent enough to have removed `std::result_of` as
part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor
definition to your build.
If you're on macOS with Apple Clang 17 or newer, you need to add a compiler flag to work around a compilation error in gRPC dependencies.
Edit `~/.conan2/profiles/default` and add under the `[conf]` section:
```
conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
[conf]
tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
```

View File

@@ -548,7 +548,6 @@ target_sources (rippled PRIVATE
src/ripple/nodestore/backend/CassandraFactory.cpp
src/ripple/nodestore/backend/RWDBFactory.cpp
src/ripple/nodestore/backend/MemoryFactory.cpp
src/ripple/nodestore/backend/FlatmapFactory.cpp
src/ripple/nodestore/backend/NuDBFactory.cpp
src/ripple/nodestore/backend/NullFactory.cpp
src/ripple/nodestore/backend/RocksDBFactory.cpp
@@ -995,6 +994,11 @@ if (tests)
subdir: resource
#]===============================]
src/test/resource/Logic_test.cpp
#[===============================[
test sources:
subdir: rdb
#]===============================]
src/test/rdb/RelationalDatabase_test.cpp
#[===============================[
test sources:
subdir: rpc

View File

@@ -186,6 +186,10 @@ test.protocol > ripple.crypto
test.protocol > ripple.json
test.protocol > ripple.protocol
test.protocol > test.toplevel
test.rdb > ripple.app
test.rdb > ripple.core
test.rdb > test.jtx
test.rdb > test.toplevel
test.resource > ripple.basics
test.resource > ripple.beast
test.resource > ripple.resource

View File

@@ -12,6 +12,13 @@ echo "-- GITHUB_REPOSITORY: $1"
echo "-- GITHUB_SHA: $2"
echo "-- GITHUB_RUN_NUMBER: $4"
# Use mounted filesystem for temp files to avoid container space limits
export TMPDIR=/io/tmp
export TEMP=/io/tmp
export TMP=/io/tmp
mkdir -p /io/tmp
echo "=== Using temp directory: /io/tmp ==="
umask 0000;
cd /io/ &&
@@ -43,10 +50,17 @@ export LDFLAGS="-static-libstdc++"
git config --global --add safe.directory /io &&
git checkout src/ripple/protocol/impl/BuildInfo.cpp &&
sed -i s/\"0.0.0\"/\"$(date +%Y).$(date +%-m).$(date +%-d)-$(git rev-parse --abbrev-ref HEAD)$(if [ -n "$4" ]; then echo "+$4"; fi)\"/g src/ripple/protocol/impl/BuildInfo.cpp &&
conan export external/snappy snappy/1.1.10@xahaud/stable &&
conan export external/soci soci/4.0.3@xahaud/stable &&
conan export external/snappy --version 1.1.10 --user xahaud --channel stable &&
conan export external/soci --version 4.0.3 --user xahaud --channel stable &&
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable &&
cd release-build &&
conan install .. --output-folder . --build missing --settings build_type=$BUILD_TYPE &&
# Install dependencies - tool_requires in conanfile.py handles glibc 2.28 compatibility
# for build tools (protoc, grpc plugins, b2) in HBB environment
# The tool_requires('b2/5.3.2') in conanfile.py should force b2 to build from source
# with the correct toolchain, avoiding the GLIBCXX_3.4.29 issue
echo "=== Installing dependencies ===" &&
conan install .. --output-folder . --build missing --settings build_type=$BUILD_TYPE \
-o with_wasmedge=False -o tool_requires_b2=True &&
cmake .. -G Ninja \
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
@@ -56,10 +70,13 @@ cmake .. -G Ninja \
-Dxrpld=TRUE \
-Dtests=TRUE &&
ccache -z &&
ninja -j $3 &&
ninja -j $3 && echo "=== Re-running final link with verbose output ===" && rm -f rippled && ninja -v rippled &&
ccache -s &&
strip -s rippled &&
strip -s rippled &&
mv rippled xahaud &&
echo "=== Full ldd output ===" &&
ldd xahaud &&
echo "=== Running libcheck ===" &&
libcheck xahaud &&
echo "Build host: `hostname`" > release.info &&
echo "Build date: `date`" >> release.info &&

View File

@@ -1063,14 +1063,16 @@
# RWDB is recommended for Validator and Peer nodes that are not required to
# store history.
#
# RWDB maintains its high speed regardless of the amount of history
# stored. Online delete should NOT be used instead RWDB will use the
# ledger_history config value to determine how many ledgers to keep in memory.
#
# Required keys for NuDB, RWDB and RocksDB:
# Required keys for NuDB and RocksDB:
#
# path Location to store the database
#
# Required keys for RWDB:
#
# online_delete Required. RWDB stores data in memory and will
# grow unbounded without online_delete. See the
# online_delete section below.
#
# Required keys for Cassandra:
#
# contact_points IP of a node in the Cassandra cluster
@@ -1110,7 +1112,17 @@
# if sufficient IOPS capacity is available.
# Default 0.
#
# Optional keys for NuDB or RocksDB:
# online_delete for RWDB, NuDB and RocksDB:
#
# online_delete Minimum value of 256. Enable automatic purging
# of older ledger information. Maintain at least this
# number of ledger records online. Must be greater
# than or equal to ledger_history.
#
# REQUIRED for RWDB to prevent out-of-memory errors.
# Optional for NuDB and RocksDB.
#
# Optional keys for NuDB and RocksDB:
#
# earliest_seq The default is 32570 to match the XRP ledger
# network's earliest allowed sequence. Alternate
@@ -1120,12 +1132,7 @@
# it must be defined with the same value in both
# sections.
#
# online_delete Minimum value of 256. Enable automatic purging
# of older ledger information. Maintain at least this
# number of ledger records online. Must be greater
# than or equal to ledger_history. If using RWDB
# this value is ignored.
#
# These keys modify the behavior of online_delete, and thus are only
# relevant if online_delete is defined and non-zero:
#

View File

@@ -21,22 +21,20 @@ class Xrpl(ConanFile):
'static': [True, False],
'tests': [True, False],
'unity': [True, False],
'with_wasmedge': [True, False],
'tool_requires_b2': [True, False],
}
requires = [
'boost/1.86.0',
'date/3.0.1',
'libarchive/3.6.0',
'lz4/1.9.3',
'lz4/1.9.4',
'grpc/1.50.1',
'nudb/2.0.8',
'openssl/1.1.1u',
'protobuf/3.21.9',
'snappy/1.1.10@xahaud/stable',
'protobuf/3.21.12',
'soci/4.0.3@xahaud/stable',
'sqlite3/3.42.0',
'zlib/1.2.13',
'wasmedge/0.11.2',
'zlib/1.3.1',
]
default_options = {
@@ -50,42 +48,44 @@ class Xrpl(ConanFile):
'static': True,
'tests': True,
'unity': False,
'with_wasmedge': True,
'tool_requires_b2': False,
'cassandra-cpp-driver:shared': False,
'date:header_only': True,
'grpc:shared': False,
'grpc:secure': True,
'libarchive:shared': False,
'libarchive:with_acl': False,
'libarchive:with_bzip2': False,
'libarchive:with_cng': False,
'libarchive:with_expat': False,
'libarchive:with_iconv': False,
'libarchive:with_libxml2': False,
'libarchive:with_lz4': True,
'libarchive:with_lzma': False,
'libarchive:with_lzo': False,
'libarchive:with_nettle': False,
'libarchive:with_openssl': False,
'libarchive:with_pcreposix': False,
'libarchive:with_xattr': False,
'libarchive:with_zlib': False,
'libpq:shared': False,
'lz4:shared': False,
'openssl:shared': False,
'protobuf:shared': False,
'protobuf:with_zlib': True,
'rocksdb:enable_sse': False,
'rocksdb:lite': False,
'rocksdb:shared': False,
'rocksdb:use_rtti': True,
'rocksdb:with_jemalloc': False,
'rocksdb:with_lz4': True,
'rocksdb:with_snappy': True,
'snappy:shared': False,
'soci:shared': False,
'soci:with_sqlite3': True,
'soci:with_boost': True,
'cassandra-cpp-driver/*:shared': False,
'date/*:header_only': True,
'grpc/*:shared': False,
'grpc/*:secure': True,
'libarchive/*:shared': False,
'libarchive/*:with_acl': False,
'libarchive/*:with_bzip2': False,
'libarchive/*:with_cng': False,
'libarchive/*:with_expat': False,
'libarchive/*:with_iconv': False,
'libarchive/*:with_libxml2': False,
'libarchive/*:with_lz4': True,
'libarchive/*:with_lzma': False,
'libarchive/*:with_lzo': False,
'libarchive/*:with_nettle': False,
'libarchive/*:with_openssl': False,
'libarchive/*:with_pcreposix': False,
'libarchive/*:with_xattr': False,
'libarchive/*:with_zlib': False,
'libpq/*:shared': False,
'lz4/*:shared': False,
'openssl/*:shared': False,
'protobuf/*:shared': False,
'protobuf/*:with_zlib': True,
'rocksdb/*:enable_sse': False,
'rocksdb/*:lite': False,
'rocksdb/*:shared': False,
'rocksdb/*:use_rtti': True,
'rocksdb/*:with_jemalloc': False,
'rocksdb/*:with_lz4': True,
'rocksdb/*:with_snappy': True,
'snappy/*:shared': False,
'soci/*:shared': False,
'soci/*:with_sqlite3': True,
'soci/*:with_boost': True,
}
def set_version(self):
@@ -96,11 +96,28 @@ class Xrpl(ConanFile):
match = next(m for m in matches if m)
self.version = match.group(1)
def build_requirements(self):
# These provide build tools (protoc, grpc plugins) that run during build
self.tool_requires('protobuf/3.21.12')
self.tool_requires('grpc/1.50.1')
# Explicitly require b2 (e.g. for building from source for glibc compatibility)
if self.options.tool_requires_b2:
self.tool_requires('b2/5.3.2')
def configure(self):
if self.settings.compiler == 'apple-clang':
self.options['boost'].visibility = 'global'
self.options['boost/*'].visibility = 'global'
def requirements(self):
# Force sqlite3 version to avoid conflicts with soci
self.requires('sqlite3/3.42.0', override=True)
# Force our custom snappy build for all dependencies
self.requires('snappy/1.1.10@xahaud/stable', override=True)
# Force boost version for all dependencies to avoid conflicts
self.requires('boost/1.86.0', override=True)
if self.options.with_wasmedge:
self.requires('wasmedge/0.11.2@xahaud/stable')
if self.options.jemalloc:
self.requires('jemalloc/5.2.1')
if self.options.reporting:

View File

@@ -38,8 +38,15 @@ class WasmedgeConan(ConanFile):
raise ConanInvalidConfiguration("Binaries for this combination of version/os/arch/compiler are not available")
def package_id(self):
del self.info.settings.compiler.version
self.info.settings.compiler = self._compiler_alias
# Make binary compatible across compiler versions (since we're downloading prebuilt)
self.info.settings.rm_safe("compiler.version")
# Group compilers by their binary compatibility
# Note: We must use self.info.settings here, not self.settings (forbidden in Conan 2)
compiler_name = str(self.info.settings.compiler)
if compiler_name in ["Visual Studio", "msvc"]:
self.info.settings.compiler = "Visual Studio"
else:
self.info.settings.compiler = "gcc"
def build(self):
# This is packaging binaries so the download needs to be in build

View File

@@ -1,9 +1,11 @@
#!/bin/bash
#!/bin/bash
# We use set -e and bash with -u to bail on first non zero exit code of any
# processes launched or upon any unbound variable.
# We use set -x to print commands before running them to help with
# debugging.
set -ex
echo "START BUILDING (HOST)"
echo "Cleaning previously built binary"
@@ -90,29 +92,37 @@ RUN /hbb_exe/activate-exec bash -c "dnf install -y epel-release && \
llvm14-static llvm14-devel && \
dnf clean all"
# Install Conan and CMake
RUN /hbb_exe/activate-exec pip3 install "conan==1.66.0" && \
# Install Conan 2 and CMake
RUN /hbb_exe/activate-exec pip3 install "conan>=2.0,<3.0" && \
/hbb_exe/activate-exec wget -q https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-x86_64.tar.gz -O cmake.tar.gz && \
mkdir cmake && \
tar -xzf cmake.tar.gz --strip-components=1 -C cmake && \
rm cmake.tar.gz
# Install Boost 1.86.0
RUN /hbb_exe/activate-exec bash -c "cd /tmp && \
# Dual Boost configuration in HBB environment:
# - Manual Boost in /usr/local (minimal: for WasmEdge which is pre-built in Docker)
# - Conan Boost (full: for the application and all dependencies via toolchain)
#
# Install minimal Boost 1.86.0 for WasmEdge only (filesystem and its dependencies)
# The main application will use Conan-provided Boost for all other components
# IMPORTANT: Understanding Boost linking options:
# - link=static: Creates static Boost libraries (.a files) instead of shared (.so files)
# - runtime-link=shared: Links Boost libraries against shared libc (glibc)
# WasmEdge only needs boost::filesystem and boost::system
RUN /hbb_exe/activate-exec bash -c "echo 'Boost cache bust: v5-minimal' && \
rm -rf /usr/local/lib/libboost* /usr/local/include/boost && \
cd /tmp && \
wget -q https://archives.boost.io/release/1.86.0/source/boost_1_86_0.tar.gz -O boost.tar.gz && \
mkdir boost && \
tar -xzf boost.tar.gz --strip-components=1 -C boost && \
cd boost && \
./bootstrap.sh && \
./b2 link=static -j${BUILD_CORES} && \
./b2 install && \
./b2 install \
link=static runtime-link=shared -j${BUILD_CORES} \
--with-filesystem --with-system && \
cd /tmp && \
rm -rf boost boost.tar.gz"
ENV BOOST_ROOT=/usr/local/src/boost_1_86_0
ENV Boost_LIBRARY_DIRS=/usr/local/lib
ENV BOOST_INCLUDEDIR=/usr/local/src/boost_1_86_0
ENV CMAKE_EXE_LINKER_FLAGS="-static-libstdc++"
ENV LLVM_DIR=/usr/lib64/llvm14/lib/cmake/llvm
@@ -155,6 +165,10 @@ RUN cd /tmp && \
cd build && \
/hbb_exe/activate-exec bash -c "source /opt/rh/gcc-toolset-11/enable && \
ln -sf /opt/rh/gcc-toolset-11/root/usr/bin/ar /usr/bin/ar && \
ln -sf /opt/rh/gcc-toolset-11/root/usr/bin/ranlib /usr/bin/ranlib && \
echo '=== Binutils version check ===' && \
ar --version | head -1 && \
ranlib --version | head -1 && \
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr/local \
@@ -176,14 +190,28 @@ RUN cd /tmp && \
# Set environment variables
ENV PATH=/usr/local/bin:$PATH
# Configure ccache and Conan
# Configure ccache and Conan 2
# NOTE: Using echo commands instead of heredocs because heredocs in Docker RUN commands are finnicky
RUN /hbb_exe/activate-exec bash -c "ccache -M 10G && \
ccache -o cache_dir=/cache/ccache && \
ccache -o compiler_check=content && \
conan config set storage.path=/cache/conan && \
(conan profile new default --detect || true) && \
conan profile update settings.compiler.libcxx=libstdc++11 default && \
conan profile update settings.compiler.cppstd=20 default"
mkdir -p ~/.conan2 /cache/conan2 /cache/conan2_download /cache/conan2_sources && \
echo 'core.cache:storage_path=/cache/conan2' > ~/.conan2/global.conf && \
echo 'core.download:download_cache=/cache/conan2_download' >> ~/.conan2/global.conf && \
echo 'core.sources:download_cache=/cache/conan2_sources' >> ~/.conan2/global.conf && \
conan profile detect --force && \
echo '[settings]' > ~/.conan2/profiles/default && \
echo 'arch=x86_64' >> ~/.conan2/profiles/default && \
echo 'build_type=Release' >> ~/.conan2/profiles/default && \
echo 'compiler=gcc' >> ~/.conan2/profiles/default && \
echo 'compiler.cppstd=20' >> ~/.conan2/profiles/default && \
echo 'compiler.libcxx=libstdc++11' >> ~/.conan2/profiles/default && \
echo 'compiler.version=11' >> ~/.conan2/profiles/default && \
echo 'os=Linux' >> ~/.conan2/profiles/default && \
echo '' >> ~/.conan2/profiles/default && \
echo '[conf]' >> ~/.conan2/profiles/default && \
echo '# Force building from source for packages with binary compatibility issues' >> ~/.conan2/profiles/default && \
echo '*:tools.system.package_manager:mode=build' >> ~/.conan2/profiles/default"
DOCKERFILE_EOF
)

View File

@@ -3150,15 +3150,15 @@ DEFINE_HOOK_FUNCTION(
if (a == 0 || b == 0 || c == 0 || d == 0 || e == 0 || f == 0)
return INVALID_ARGUMENT;
uint32_t hi_ptr = a, hi_len = b, lo_ptr = c, lo_len = d,
uint32_t acc1_ptr = a, acc1_len = b, acc2_ptr = c, acc2_len = d,
cu_ptr = e, cu_len = f;
if (NOT_IN_BOUNDS(hi_ptr, hi_len, memory_length) ||
NOT_IN_BOUNDS(lo_ptr, lo_len, memory_length) ||
if (NOT_IN_BOUNDS(acc1_ptr, acc1_len, memory_length) ||
NOT_IN_BOUNDS(acc2_ptr, acc2_len, memory_length) ||
NOT_IN_BOUNDS(cu_ptr, cu_len, memory_length))
return OUT_OF_BOUNDS;
if (hi_len != 20 || lo_len != 20)
if (acc1_len != 20 || acc2_len != 20)
return INVALID_ARGUMENT;
std::optional<Currency> cur =
@@ -3167,8 +3167,8 @@ DEFINE_HOOK_FUNCTION(
return INVALID_ARGUMENT;
auto kl = ripple::keylet::line(
AccountID::fromVoid(memory + hi_ptr),
AccountID::fromVoid(memory + lo_ptr),
AccountID::fromVoid(memory + acc1_ptr),
AccountID::fromVoid(memory + acc2_ptr),
*cur);
return serialize_keylet(kl, memory, write_ptr, write_len);
}

View File

@@ -1,851 +0,0 @@
#ifndef RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
#define RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED
#include <ripple/app/ledger/AcceptedLedger.h>
#include <ripple/app/ledger/LedgerMaster.h>
#include <ripple/app/ledger/TransactionMaster.h>
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
#include <algorithm>
#include <map>
#include <mutex>
#include <optional>
#include <shared_mutex>
#include <vector>
#include <boost/unordered/concurrent_flat_map.hpp>
namespace ripple {
struct base_uint_hasher
{
using result_type = std::size_t;
result_type
operator()(base_uint<256> const& value) const
{
return hardened_hash<>{}(value);
}
result_type
operator()(AccountID const& value) const
{
return hardened_hash<>{}(value);
}
};
class FlatmapDatabase : public SQLiteDatabase
{
private:
struct LedgerData
{
LedgerInfo info;
boost::unordered::
concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
transactions;
};
struct AccountTxData
{
boost::unordered::
concurrent_flat_map<std::pair<uint32_t, uint32_t>, AccountTx>
transactions;
};
Application& app_;
boost::unordered::concurrent_flat_map<LedgerIndex, LedgerData> ledgers_;
boost::unordered::
concurrent_flat_map<uint256, LedgerIndex, base_uint_hasher>
ledgerHashToSeq_;
boost::unordered::concurrent_flat_map<uint256, AccountTx, base_uint_hasher>
transactionMap_;
boost::unordered::
concurrent_flat_map<AccountID, AccountTxData, base_uint_hasher>
accountTxMap_;
public:
FlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
: app_(app)
{
}
std::optional<LedgerIndex>
getMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
ledgers_.visit_all([&minSeq](auto const& pair) {
if (!minSeq || pair.first < *minSeq)
{
minSeq = pair.first;
}
});
return minSeq;
}
std::optional<LedgerIndex>
getTransactionsMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
transactionMap_.visit_all([&minSeq](auto const& pair) {
LedgerIndex seq = pair.second.second->getLgrSeq();
if (!minSeq || seq < *minSeq)
{
minSeq = seq;
}
});
return minSeq;
}
std::optional<LedgerIndex>
getAccountTransactionsMinLedgerSeq() override
{
std::optional<LedgerIndex> minSeq;
accountTxMap_.visit_all([&minSeq](auto const& pair) {
pair.second.transactions.visit_all([&minSeq](auto const& tx) {
if (!minSeq || tx.first.first < *minSeq)
{
minSeq = tx.first.first;
}
});
});
return minSeq;
}
std::optional<LedgerIndex>
getMaxLedgerSeq() override
{
std::optional<LedgerIndex> maxSeq;
ledgers_.visit_all([&maxSeq](auto const& pair) {
if (!maxSeq || pair.first > *maxSeq)
{
maxSeq = pair.first;
}
});
return maxSeq;
}
void
deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.visit(ledgerSeq, [this](auto& item) {
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
item.second.transactions.clear();
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first == ledgerSeq;
});
});
}
void
deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.erase_if([this, ledgerSeq](auto const& item) {
if (item.first < ledgerSeq)
{
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
ledgerHashToSeq_.erase(item.second.info.hash);
return true;
}
return false;
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
void
deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
ledgers_.visit_all([this, ledgerSeq](auto& item) {
if (item.first < ledgerSeq)
{
item.second.transactions.visit_all([this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
item.second.transactions.clear();
}
});
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
void
deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) override
{
accountTxMap_.visit_all([ledgerSeq](auto& item) {
item.second.transactions.erase_if([ledgerSeq](auto const& tx) {
return tx.first.first < ledgerSeq;
});
});
}
std::size_t
getTransactionCount() override
{
return transactionMap_.size();
}
std::size_t
getAccountTransactionCount() override
{
std::size_t count = 0;
accountTxMap_.visit_all([&count](auto const& item) {
count += item.second.transactions.size();
});
return count;
}
CountMinMax
getLedgerCountMinMax() override
{
CountMinMax result{0, 0, 0};
ledgers_.visit_all([&result](auto const& item) {
result.numberOfRows++;
if (result.minLedgerSequence == 0 ||
item.first < result.minLedgerSequence)
{
result.minLedgerSequence = item.first;
}
if (item.first > result.maxLedgerSequence)
{
result.maxLedgerSequence = item.first;
}
});
return result;
}
bool
saveValidatedLedger(
std::shared_ptr<Ledger const> const& ledger,
bool current) override
{
try
{
LedgerData ledgerData;
ledgerData.info = ledger->info();
auto aLedger = std::make_shared<AcceptedLedger>(ledger, app_);
for (auto const& acceptedLedgerTx : *aLedger)
{
auto const& txn = acceptedLedgerTx->getTxn();
auto const& meta = acceptedLedgerTx->getMeta();
auto const& id = txn->getTransactionID();
std::string reason;
auto accTx = std::make_pair(
std::make_shared<ripple::Transaction>(txn, reason, app_),
std::make_shared<ripple::TxMeta>(meta));
ledgerData.transactions.emplace(id, accTx);
transactionMap_.emplace(id, accTx);
for (auto const& account : meta.getAffectedAccounts())
{
accountTxMap_.visit(account, [&](auto& data) {
data.second.transactions.emplace(
std::make_pair(
ledger->info().seq,
acceptedLedgerTx->getTxnSeq()),
accTx);
});
}
}
ledgers_.emplace(ledger->info().seq, std::move(ledgerData));
ledgerHashToSeq_.emplace(ledger->info().hash, ledger->info().seq);
if (current)
{
auto const cutoffSeq =
ledger->info().seq > app_.config().LEDGER_HISTORY
? ledger->info().seq - app_.config().LEDGER_HISTORY
: 0;
if (cutoffSeq > 0)
{
const std::size_t BATCH_SIZE = 128;
std::size_t deleted = 0;
ledgers_.erase_if([&](auto const& item) {
if (deleted >= BATCH_SIZE)
return false;
if (item.first < cutoffSeq)
{
item.second.transactions.visit_all(
[this](auto const& txPair) {
transactionMap_.erase(txPair.first);
});
ledgerHashToSeq_.erase(item.second.info.hash);
deleted++;
return true;
}
return false;
});
if (deleted > 0)
{
accountTxMap_.visit_all([cutoffSeq](auto& item) {
item.second.transactions.erase_if(
[cutoffSeq](auto const& tx) {
return tx.first.first < cutoffSeq;
});
});
}
app_.getLedgerMaster().clearPriorLedgers(cutoffSeq);
}
}
return true;
}
catch (std::exception const&)
{
deleteTransactionByLedgerSeq(ledger->info().seq);
return false;
}
}
std::optional<LedgerInfo>
getLedgerInfoByIndex(LedgerIndex ledgerSeq) override
{
std::optional<LedgerInfo> result;
ledgers_.visit(ledgerSeq, [&result](auto const& item) {
result = item.second.info;
});
return result;
}
std::optional<LedgerInfo>
getNewestLedgerInfo() override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&result](auto const& item) {
if (!result || item.second.info.seq > result->seq)
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= ledgerFirstIndex &&
(!result || item.first < result->seq))
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) override
{
std::optional<LedgerInfo> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= ledgerFirstIndex &&
(!result || item.first > result->seq))
{
result = item.second.info;
}
});
return result;
}
std::optional<LedgerInfo>
getLedgerInfoByHash(uint256 const& ledgerHash) override
{
std::optional<LedgerInfo> result;
ledgerHashToSeq_.visit(ledgerHash, [this, &result](auto const& item) {
ledgers_.visit(item.second, [&result](auto const& item) {
result = item.second.info;
});
});
return result;
}
uint256
getHashByIndex(LedgerIndex ledgerIndex) override
{
uint256 result;
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
result = item.second.info.hash;
});
return result;
}
std::optional<LedgerHashPair>
getHashesByIndex(LedgerIndex ledgerIndex) override
{
std::optional<LedgerHashPair> result;
ledgers_.visit(ledgerIndex, [&result](auto const& item) {
result = LedgerHashPair{
item.second.info.hash, item.second.info.parentHash};
});
return result;
}
std::map<LedgerIndex, LedgerHashPair>
getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override
{
std::map<LedgerIndex, LedgerHashPair> result;
ledgers_.visit_all([&](auto const& item) {
if (item.first >= minSeq && item.first <= maxSeq)
{
result[item.first] = LedgerHashPair{
item.second.info.hash, item.second.info.parentHash};
}
});
return result;
}
std::variant<AccountTx, TxSearched>
getTransaction(
uint256 const& id,
std::optional<ClosedInterval<std::uint32_t>> const& range,
error_code_i& ec) override
{
std::variant<AccountTx, TxSearched> result = TxSearched::unknown;
transactionMap_.visit(id, [&](auto const& item) {
auto const& tx = item.second;
if (!range ||
(range->lower() <= tx.second->getLgrSeq() &&
tx.second->getLgrSeq() <= range->upper()))
{
result = tx;
}
else
{
result = TxSearched::all;
}
});
return result;
}
bool
ledgerDbHasSpace(Config const& config) override
{
return true; // In-memory database always has space
}
bool
transactionDbHasSpace(Config const& config) override
{
return true; // In-memory database always has space
}
std::uint32_t
getKBUsedAll() override
{
std::uint32_t size = sizeof(*this);
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
accountTxMap_.visit_all([&size](auto const& item) {
size += sizeof(AccountID) + sizeof(AccountTxData);
size += item.second.transactions.size() * sizeof(AccountTx);
});
return size / 1024; // Convert to KB
}
std::uint32_t
getKBUsedLedger() override
{
std::uint32_t size =
ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
return size / 1024;
}
std::uint32_t
getKBUsedTransaction() override
{
std::uint32_t size =
transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
accountTxMap_.visit_all([&size](auto const& item) {
size += sizeof(AccountID) + sizeof(AccountTxData);
size += item.second.transactions.size() * sizeof(AccountTx);
});
return size / 1024;
}
void
closeLedgerDB() override
{
// No-op for in-memory database
}
void
closeTransactionDB() override
{
// No-op for in-memory database
}
~FlatmapDatabase()
{
// Concurrent maps need visit_all
accountTxMap_.visit_all(
[](auto& pair) { pair.second.transactions.clear(); });
accountTxMap_.clear();
transactionMap_.clear();
ledgers_.visit_all(
[](auto& pair) { pair.second.transactions.clear(); });
ledgers_.clear();
ledgerHashToSeq_.clear();
}
std::vector<std::shared_ptr<Transaction>>
getTxHistory(LedgerIndex startIndex) override
{
std::vector<std::shared_ptr<Transaction>> result;
transactionMap_.visit_all([&](auto const& item) {
if (item.second.second->getLgrSeq() >= startIndex)
{
result.push_back(item.second.first);
}
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a->getLedger() > b->getLedger();
});
if (result.size() > 20)
{
result.resize(20);
}
return result;
}
// Helper function to handle limits
template <typename Container>
void
applyLimit(Container& container, std::size_t limit, bool bUnlimited)
{
if (!bUnlimited && limit > 0 && container.size() > limit)
{
container.resize(limit);
}
}
AccountTxs
getOldestAccountTxs(AccountTxOptions const& options) override
{
AccountTxs result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.push_back(tx.second);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a.second->getLgrSeq() < b.second->getLgrSeq();
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
AccountTxs
getNewestAccountTxs(AccountTxOptions const& options) override
{
AccountTxs result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.push_back(tx.second);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return a.second->getLgrSeq() > b.second->getLgrSeq();
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
MetaTxsList
getOldestAccountTxsB(AccountTxOptions const& options) override
{
MetaTxsList result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.emplace_back(
tx.second.first->getSTransaction()
->getSerializer()
.peekData(),
tx.second.second->getAsObject()
.getSerializer()
.peekData(),
tx.first.first);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return std::get<2>(a) < std::get<2>(b);
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
MetaTxsList
getNewestAccountTxsB(AccountTxOptions const& options) override
{
MetaTxsList result;
accountTxMap_.visit(options.account, [&](auto const& item) {
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
result.emplace_back(
tx.second.first->getSTransaction()
->getSerializer()
.peekData(),
tx.second.second->getAsObject()
.getSerializer()
.peekData(),
tx.first.first);
}
});
});
std::sort(
result.begin(), result.end(), [](auto const& a, auto const& b) {
return std::get<2>(a) > std::get<2>(b);
});
applyLimit(result, options.limit, options.bUnlimited);
return result;
}
std::pair<AccountTxs, std::optional<AccountTxMarker>>
oldestAccountTxPage(AccountTxPageOptions const& options) override
{
AccountTxs result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(tx);
}
});
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
return a.first < b.first;
});
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return tx.first.first == options.marker->ledgerSeq &&
tx.first.second == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
result.push_back(it->second);
}
if (it != txs.end())
{
marker = AccountTxMarker{it->first.first, it->first.second};
}
});
return {result, marker};
}
std::pair<AccountTxs, std::optional<AccountTxMarker>>
newestAccountTxPage(AccountTxPageOptions const& options) override
{
AccountTxs result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::pair<std::pair<uint32_t, uint32_t>, AccountTx>>
txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(tx);
}
});
std::sort(txs.begin(), txs.end(), [](auto const& a, auto const& b) {
return a.first > b.first;
});
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return tx.first.first == options.marker->ledgerSeq &&
tx.first.second == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
result.push_back(it->second);
}
if (it != txs.end())
{
marker = AccountTxMarker{it->first.first, it->first.second};
}
});
return {result, marker};
}
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
oldestAccountTxPageB(AccountTxPageOptions const& options) override
{
MetaTxsList result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(
tx.first.first, tx.first.second, tx.second);
}
});
std::sort(txs.begin(), txs.end());
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return std::get<0>(tx) == options.marker->ledgerSeq &&
std::get<1>(tx) == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
const auto& [_, __, tx] = *it;
result.emplace_back(
tx.first->getSTransaction()->getSerializer().peekData(),
tx.second->getAsObject().getSerializer().peekData(),
std::get<0>(*it));
}
if (it != txs.end())
{
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
}
});
return {result, marker};
}
std::pair<MetaTxsList, std::optional<AccountTxMarker>>
newestAccountTxPageB(AccountTxPageOptions const& options) override
{
MetaTxsList result;
std::optional<AccountTxMarker> marker;
accountTxMap_.visit(options.account, [&](auto const& item) {
std::vector<std::tuple<uint32_t, uint32_t, AccountTx>> txs;
item.second.transactions.visit_all([&](auto const& tx) {
if (tx.first.first >= options.minLedger &&
tx.first.first <= options.maxLedger)
{
txs.emplace_back(
tx.first.first, tx.first.second, tx.second);
}
});
std::sort(txs.begin(), txs.end(), std::greater<>());
auto it = txs.begin();
if (options.marker)
{
it = std::find_if(txs.begin(), txs.end(), [&](auto const& tx) {
return std::get<0>(tx) == options.marker->ledgerSeq &&
std::get<1>(tx) == options.marker->txnSeq;
});
if (it != txs.end())
++it;
}
for (; it != txs.end() &&
(options.limit == 0 || result.size() < options.limit);
++it)
{
const auto& [_, __, tx] = *it;
result.emplace_back(
tx.first->getSTransaction()->getSerializer().peekData(),
tx.second->getAsObject().getSerializer().peekData(),
std::get<0>(*it));
}
if (it != txs.end())
{
marker = AccountTxMarker{std::get<0>(*it), std::get<1>(*it)};
}
});
return {result, marker};
}
};
// Factory function
std::unique_ptr<SQLiteDatabase>
getFlatmapDatabase(Application& app, Config const& config, JobQueue& jobQueue)
{
return std::make_unique<FlatmapDatabase>(app, config, jobQueue);
}
} // namespace ripple
#endif // RIPPLE_APP_RDB_BACKEND_FLATMAPDATABASE_H_INCLUDED

View File

@@ -28,9 +28,8 @@ private:
struct AccountTxData
{
AccountTxs transactions;
std::map<uint32_t, std::map<uint32_t, size_t>>
ledgerTxMap; // ledgerSeq -> txSeq -> index in transactions
std::map<uint32_t, std::vector<AccountTx>>
ledgerTxMap; // ledgerSeq -> vector of transactions
};
Application& app_;
@@ -65,9 +64,12 @@ public:
return {};
std::shared_lock<std::shared_mutex> lock(mutex_);
if (transactionMap_.empty())
return std::nullopt;
return transactionMap_.begin()->second.second->getLgrSeq();
for (const auto& [ledgerSeq, ledgerData] : ledgers_)
{
if (!ledgerData.transactions.empty())
return ledgerSeq;
}
return std::nullopt;
}
std::optional<LedgerIndex>
@@ -163,14 +165,6 @@ public:
{
txIt = accountData.ledgerTxMap.erase(txIt);
}
accountData.transactions.erase(
std::remove_if(
accountData.transactions.begin(),
accountData.transactions.end(),
[ledgerSeq](const AccountTx& tx) {
return tx.second->getLgrSeq() < ledgerSeq;
}),
accountData.transactions.end());
}
}
std::size_t
@@ -193,7 +187,10 @@ public:
std::size_t count = 0;
for (const auto& [_, accountData] : accountTxMap_)
{
count += accountData.transactions.size();
for (const auto& [_, txVector] : accountData.ledgerTxMap)
{
count += txVector.size();
}
}
return count;
}
@@ -293,10 +290,7 @@ public:
accountTxMap_[account] = AccountTxData();
auto& accountData = accountTxMap_[account];
accountData.transactions.push_back(accTx);
accountData
.ledgerTxMap[seq][acceptedLedgerTx->getTxnSeq()] =
accountData.transactions.size() - 1;
accountData.ledgerTxMap[seq].push_back(accTx);
}
app_.getMasterTransaction().inLedger(
@@ -451,59 +445,108 @@ public:
return true; // In-memory database always has space
}
// Red-black tree node overhead per map entry
static constexpr size_t MAP_NODE_OVERHEAD = 40;
private:
std::uint64_t
getBytesUsedLedger_unlocked() const
{
std::uint64_t size = 0;
// Count structural overhead of ledger storage including map node
// overhead Note: sizeof(LedgerData) includes the map container for
// transactions, but not the actual transaction data
size += ledgers_.size() *
(sizeof(LedgerIndex) + sizeof(LedgerData) + MAP_NODE_OVERHEAD);
// Add the transaction map nodes inside each ledger (ledger's view of
// its transactions)
for (const auto& [_, ledgerData] : ledgers_)
{
size += ledgerData.transactions.size() *
(sizeof(uint256) + sizeof(AccountTx) + MAP_NODE_OVERHEAD);
}
// Count the ledger hash to sequence lookup map
size += ledgerHashToSeq_.size() *
(sizeof(uint256) + sizeof(LedgerIndex) + MAP_NODE_OVERHEAD);
return size;
}
std::uint64_t
getBytesUsedTransaction_unlocked() const
{
if (!useTxTables_)
return 0;
std::uint64_t size = 0;
// Count structural overhead of transaction map
// sizeof(AccountTx) is just the size of two shared_ptrs (~32 bytes)
size += transactionMap_.size() *
(sizeof(uint256) + sizeof(AccountTx) + MAP_NODE_OVERHEAD);
// Add actual transaction and metadata data sizes
for (const auto& [_, accountTx] : transactionMap_)
{
if (accountTx.first)
size += accountTx.first->getSTransaction()
->getSerializer()
.peekData()
.size();
if (accountTx.second)
size += accountTx.second->getAsObject()
.getSerializer()
.peekData()
.size();
}
// Count structural overhead of account transaction index
// The actual transaction data is already counted above from
// transactionMap_
for (const auto& [accountId, accountData] : accountTxMap_)
{
size +=
sizeof(accountId) + sizeof(AccountTxData) + MAP_NODE_OVERHEAD;
for (const auto& [ledgerSeq, txVector] : accountData.ledgerTxMap)
{
// Use capacity() to account for actual allocated memory
size += sizeof(ledgerSeq) + MAP_NODE_OVERHEAD;
size += txVector.capacity() * sizeof(AccountTx);
}
}
return size;
}
public:
std::uint32_t
getKBUsedAll() override
{
std::shared_lock<std::shared_mutex> lock(mutex_);
std::uint32_t size = sizeof(*this);
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
for (const auto& [_, accountData] : accountTxMap_)
{
size += sizeof(AccountID) + sizeof(AccountTxData);
size += accountData.transactions.size() * sizeof(AccountTx);
for (const auto& [_, innerMap] : accountData.ledgerTxMap)
{
size += sizeof(uint32_t) +
innerMap.size() * (sizeof(uint32_t) + sizeof(size_t));
}
}
return size / 1024;
// Total = base object + ledger infrastructure + transaction data
std::uint64_t size = sizeof(*this) + getBytesUsedLedger_unlocked() +
getBytesUsedTransaction_unlocked();
return static_cast<std::uint32_t>(size / 1024);
}
std::uint32_t
getKBUsedLedger() override
{
std::shared_lock<std::shared_mutex> lock(mutex_);
std::uint32_t size = 0;
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
size +=
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
return size / 1024;
return static_cast<std::uint32_t>(getBytesUsedLedger_unlocked() / 1024);
}
std::uint32_t
getKBUsedTransaction() override
{
if (!useTxTables_)
return 0;
std::shared_lock<std::shared_mutex> lock(mutex_);
std::uint32_t size = 0;
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
for (const auto& [_, accountData] : accountTxMap_)
{
size += sizeof(AccountID) + sizeof(AccountTxData);
size += accountData.transactions.size() * sizeof(AccountTx);
for (const auto& [_, innerMap] : accountData.ledgerTxMap)
{
size += sizeof(uint32_t) +
innerMap.size() * (sizeof(uint32_t) + sizeof(size_t));
}
}
return size / 1024;
return static_cast<std::uint32_t>(
getBytesUsedTransaction_unlocked() / 1024);
}
void
@@ -605,14 +648,13 @@ public:
(options.bUnlimited || result.size() < options.limit);
++txIt)
{
for (const auto& [txSeq, txIndex] : txIt->second)
for (const auto& accountTx : txIt->second)
{
if (skipped < options.offset)
{
++skipped;
continue;
}
AccountTx const accountTx = accountData.transactions[txIndex];
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
accountTx.second->getLgrSeq());
accountTx.first->setStatus(COMMITTED);
@@ -657,8 +699,7 @@ public:
++skipped;
continue;
}
AccountTx const accountTx =
accountData.transactions[innerRIt->second];
AccountTx const accountTx = *innerRIt;
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
accountTx.second->getLgrSeq());
accountTx.first->setLedger(inLedger);
@@ -692,14 +733,14 @@ public:
(options.bUnlimited || result.size() < options.limit);
++txIt)
{
for (const auto& [txSeq, txIndex] : txIt->second)
for (const auto& accountTx : txIt->second)
{
if (skipped < options.offset)
{
++skipped;
continue;
}
const auto& [txn, txMeta] = accountData.transactions[txIndex];
const auto& [txn, txMeta] = accountTx;
result.emplace_back(
txn->getSTransaction()->getSerializer().peekData(),
txMeta->getAsObject().getSerializer().peekData(),
@@ -743,8 +784,7 @@ public:
++skipped;
continue;
}
const auto& [txn, txMeta] =
accountData.transactions[innerRIt->second];
const auto& [txn, txMeta] = *innerRIt;
result.emplace_back(
txn->getSTransaction()->getSerializer().peekData(),
txMeta->getAsObject().getSerializer().peekData(),
@@ -816,11 +856,9 @@ public:
for (; txIt != txEnd; ++txIt)
{
std::uint32_t const ledgerSeq = txIt->first;
for (auto seqIt = txIt->second.begin();
seqIt != txIt->second.end();
++seqIt)
std::uint32_t txnSeq = 0;
for (const auto& accountTx : txIt->second)
{
const auto& [txnSeq, index] = *seqIt;
if (lookingForMarker)
{
if (findLedger == ledgerSeq && findSeq == txnSeq)
@@ -828,7 +866,10 @@ public:
lookingForMarker = false;
}
else
{
++txnSeq;
continue;
}
}
else if (numberOfResults == 0)
{
@@ -837,12 +878,10 @@ public:
return {newmarker, total};
}
Blob rawTxn = accountData.transactions[index]
.first->getSTransaction()
Blob rawTxn = accountTx.first->getSTransaction()
->getSerializer()
.peekData();
Blob rawMeta = accountData.transactions[index]
.second->getAsObject()
Blob rawMeta = accountTx.second->getAsObject()
.getSerializer()
.peekData();
@@ -856,6 +895,7 @@ public:
std::move(rawMeta));
--numberOfResults;
++total;
++txnSeq;
}
}
}
@@ -871,11 +911,11 @@ public:
for (; rtxIt != rtxEnd; ++rtxIt)
{
std::uint32_t const ledgerSeq = rtxIt->first;
std::uint32_t txnSeq = rtxIt->second.size() - 1;
for (auto innerRIt = rtxIt->second.rbegin();
innerRIt != rtxIt->second.rend();
++innerRIt)
{
const auto& [txnSeq, index] = *innerRIt;
if (lookingForMarker)
{
if (findLedger == ledgerSeq && findSeq == txnSeq)
@@ -883,7 +923,10 @@ public:
lookingForMarker = false;
}
else
{
--txnSeq;
continue;
}
}
else if (numberOfResults == 0)
{
@@ -892,12 +935,11 @@ public:
return {newmarker, total};
}
Blob rawTxn = accountData.transactions[index]
.first->getSTransaction()
const auto& accountTx = *innerRIt;
Blob rawTxn = accountTx.first->getSTransaction()
->getSerializer()
.peekData();
Blob rawMeta = accountData.transactions[index]
.second->getAsObject()
Blob rawMeta = accountTx.second->getAsObject()
.getSerializer()
.peekData();
@@ -911,6 +953,7 @@ public:
std::move(rawMeta));
--numberOfResults;
++total;
--txnSeq;
}
}
}

View File

@@ -19,7 +19,6 @@
#include <ripple/app/main/Application.h>
#include <ripple/app/rdb/RelationalDatabase.h>
#include <ripple/app/rdb/backend/FlatmapDatabase.h>
#include <ripple/app/rdb/backend/RWDBDatabase.h>
#include <ripple/core/ConfigSections.h>
#include <ripple/nodestore/DatabaseShard.h>
@@ -41,7 +40,6 @@ RelationalDatabase::init(
bool use_sqlite = false;
bool use_postgres = false;
bool use_rwdb = false;
bool use_flatmap = false;
if (config.reporting())
{
@@ -60,10 +58,6 @@ RelationalDatabase::init(
{
use_rwdb = true;
}
else if (boost::iequals(get(rdb_section, "backend"), "flatmap"))
{
use_flatmap = true;
}
else
{
Throw<std::runtime_error>(
@@ -89,10 +83,6 @@ RelationalDatabase::init(
{
return getRWDBDatabase(app, config, jobQueue);
}
else if (use_flatmap)
{
return getFlatmapDatabase(app, config, jobQueue);
}
return std::unique_ptr<RelationalDatabase>();
}

View File

@@ -361,9 +361,7 @@ public:
boost::beast::iequals(
get(section(SECTION_RELATIONAL_DB), "backend"), "rwdb")) ||
(!section("node_db").empty() &&
(boost::beast::iequals(get(section("node_db"), "type"), "rwdb") ||
boost::beast::iequals(
get(section("node_db"), "type"), "flatmap")));
boost::beast::iequals(get(section("node_db"), "type"), "rwdb"));
// RHNOTE: memory type is not selected for here because it breaks
// tests
return isMem;

View File

@@ -45,7 +45,6 @@
namespace ripple {
namespace detail {
[[nodiscard]] std::uint64_t
getMemorySize()
{
@@ -54,7 +53,6 @@ getMemorySize()
return 0;
}
} // namespace detail
} // namespace ripple
#endif
@@ -64,7 +62,6 @@ getMemorySize()
namespace ripple {
namespace detail {
[[nodiscard]] std::uint64_t
getMemorySize()
{
@@ -73,7 +70,6 @@ getMemorySize()
return 0;
}
} // namespace detail
} // namespace ripple
@@ -85,7 +81,6 @@ getMemorySize()
namespace ripple {
namespace detail {
[[nodiscard]] std::uint64_t
getMemorySize()
{
@@ -98,13 +93,11 @@ getMemorySize()
return 0;
}
} // namespace detail
} // namespace ripple
#endif
namespace ripple {
// clang-format off
// The configurable node sizes are "tiny", "small", "medium", "large", "huge"
inline constexpr std::array<std::pair<SizedItem, std::array<int, 5>>, 13>
@@ -1007,6 +1000,23 @@ Config::loadFromString(std::string const& fileContents)
"the maximum number of allowed peers (peers_max)");
}
}
if (!RUN_STANDALONE)
{
auto db_section = section(ConfigSection::nodeDatabase());
if (auto type = get(db_section, "type", ""); type == "rwdb")
{
if (auto delete_interval = get(db_section, "online_delete", 0);
delete_interval == 0)
{
Throw<std::runtime_error>(
"RWDB (in-memory backend) requires online_delete to "
"prevent OOM "
"Exception: standalone mode (used by tests) doesn't need "
"online_delete");
}
}
}
}
boost::filesystem::path
@@ -1071,5 +1081,4 @@ setup_FeeVote(Section const& section)
}
return setup;
}
} // namespace ripple

View File

@@ -1,235 +0,0 @@
#include <ripple/basics/contract.h>
#include <ripple/nodestore/Factory.h>
#include <ripple/nodestore/Manager.h>
#include <ripple/nodestore/impl/DecodedBlob.h>
#include <ripple/nodestore/impl/EncodedBlob.h>
#include <ripple/nodestore/impl/codec.h>
#include <boost/beast/core/string.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/unordered/concurrent_flat_map.hpp>
#include <memory>
#include <mutex>
namespace ripple {
namespace NodeStore {
class FlatmapBackend : public Backend
{
private:
std::string name_;
beast::Journal journal_;
bool isOpen_{false};
struct base_uint_hasher
{
using result_type = std::size_t;
result_type
operator()(base_uint<256> const& value) const
{
return hardened_hash<>{}(value);
}
};
using DataStore = boost::unordered::concurrent_flat_map<
uint256,
std::vector<std::uint8_t>, // Store compressed blob data
base_uint_hasher>;
DataStore table_;
public:
FlatmapBackend(
size_t keyBytes,
Section const& keyValues,
beast::Journal journal)
: name_(get(keyValues, "path")), journal_(journal)
{
boost::ignore_unused(journal_);
if (name_.empty())
name_ = "node_db";
}
~FlatmapBackend() override
{
close();
}
std::string
getName() override
{
return name_;
}
void
open(bool createIfMissing) override
{
if (isOpen_)
Throw<std::runtime_error>("already open");
isOpen_ = true;
}
bool
isOpen() override
{
return isOpen_;
}
void
close() override
{
table_.clear();
isOpen_ = false;
}
Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override
{
if (!isOpen_)
return notFound;
uint256 const hash(uint256::fromVoid(key));
bool found = table_.visit(hash, [&](const auto& key_value_pair) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(
key_value_pair.second.data(), key_value_pair.second.size(), bf);
DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk())
{
*pObject = nullptr;
return;
}
*pObject = decoded.createObject();
});
return found ? (*pObject ? ok : dataCorrupt) : notFound;
}
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override
{
std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size());
for (auto const& h : hashes)
{
std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj);
if (status != ok)
results.push_back({});
else
results.push_back(nObj);
}
return {results, ok};
}
void
store(std::shared_ptr<NodeObject> const& object) override
{
if (!isOpen_)
return;
if (!object)
return;
EncodedBlob encoded(object);
nudb::detail::buffer bf;
auto const result =
nodeobject_compress(encoded.getData(), encoded.getSize(), bf);
std::vector<std::uint8_t> compressed(
static_cast<const std::uint8_t*>(result.first),
static_cast<const std::uint8_t*>(result.first) + result.second);
table_.insert_or_assign(object->getHash(), std::move(compressed));
}
void
storeBatch(Batch const& batch) override
{
for (auto const& e : batch)
store(e);
}
void
sync() override
{
}
void
for_each(std::function<void(std::shared_ptr<NodeObject>)> f) override
{
if (!isOpen_)
return;
table_.visit_all([&f](const auto& entry) {
nudb::detail::buffer bf;
auto const result = nodeobject_decompress(
entry.second.data(), entry.second.size(), bf);
DecodedBlob decoded(
entry.first.data(), result.first, result.second);
if (decoded.wasOk())
f(decoded.createObject());
});
}
int
getWriteLoad() override
{
return 0;
}
void
setDeletePath() override
{
close();
}
int
fdRequired() const override
{
return 0;
}
private:
size_t
size() const
{
return table_.size();
}
};
class FlatmapFactory : public Factory
{
public:
FlatmapFactory()
{
Manager::instance().insert(*this);
}
~FlatmapFactory() override
{
Manager::instance().erase(*this);
}
std::string
getName() const override
{
return "Flatmap";
}
std::unique_ptr<Backend>
createInstance(
size_t keyBytes,
Section const& keyValues,
std::size_t burstSize,
Scheduler& scheduler,
beast::Journal journal) override
{
return std::make_unique<FlatmapBackend>(keyBytes, keyValues, journal);
}
};
static FlatmapFactory flatmapFactory;
} // namespace NodeStore
} // namespace ripple

View File

@@ -216,6 +216,10 @@ public:
}
BEAST_EXPECT(store.getLastRotated() == lastRotated);
SQLiteDatabase* const db =
dynamic_cast<SQLiteDatabase*>(&env.app().getRelationalDatabase());
BEAST_EXPECT(*db->getTransactionsMinLedgerSeq() == 3);
for (auto i = 3; i < deleteInterval + lastRotated; ++i)
{
ledgers.emplace(

View File

@@ -1206,6 +1206,97 @@ r.ripple.com:51235
}
}
void
testRWDBOnlineDelete()
{
testcase("RWDB online_delete validation");
// Test 1: RWDB without online_delete in standalone mode (should
// succeed)
{
Config c;
std::string toLoad =
"[node_db]\n"
"type=rwdb\n"
"path=main\n";
c.setupControl(true, true, true); // standalone = true
try
{
c.loadFromString(toLoad);
pass(); // Should succeed
}
catch (std::runtime_error const& e)
{
fail("Should not throw in standalone mode");
}
}
// Test 2: RWDB without online_delete NOT in standalone mode (should
// throw)
{
Config c;
std::string toLoad =
"[node_db]\n"
"type=rwdb\n"
"path=main\n";
c.setupControl(true, true, false); // standalone = false
try
{
c.loadFromString(toLoad);
fail("Expected exception for RWDB without online_delete");
}
catch (std::runtime_error const& e)
{
BEAST_EXPECT(
std::string(e.what()).find(
"RWDB (in-memory backend) requires online_delete") !=
std::string::npos);
pass();
}
}
// Test 3: RWDB with online_delete NOT in standalone mode (should
// succeed)
{
Config c;
std::string toLoad =
"[node_db]\n"
"type=rwdb\n"
"path=main\n"
"online_delete=256\n";
c.setupControl(true, true, false); // standalone = false
try
{
c.loadFromString(toLoad);
pass(); // Should succeed
}
catch (std::runtime_error const& e)
{
fail("Should not throw when online_delete is configured");
}
}
// Test 4: Non-RWDB without online_delete NOT in standalone mode (should
// succeed)
{
Config c;
std::string toLoad =
"[node_db]\n"
"type=NuDB\n"
"path=main\n";
c.setupControl(true, true, false); // standalone = false
try
{
c.loadFromString(toLoad);
pass(); // Should succeed
}
catch (std::runtime_error const& e)
{
fail("Should not throw for non-RWDB backends");
}
}
}
void
testOverlay()
{
@@ -1295,6 +1386,7 @@ r.ripple.com:51235
testComments();
testGetters();
testAmendment();
testRWDBOnlineDelete();
testOverlay();
testNetworkID();
}

View File

@@ -0,0 +1,756 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/app/rdb/RelationalDatabase.h>
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
#include <ripple/core/ConfigSections.h>
#include <boost/filesystem.hpp>
#include <chrono>
#include <test/jtx.h>
#include <test/jtx/envconfig.h>
namespace ripple {
namespace test {
class RelationalDatabase_test : public beast::unit_test::suite
{
private:
// Helper to get SQLiteDatabase* (works for both SQLite and RWDB since RWDB
// inherits from SQLiteDatabase)
static SQLiteDatabase*
getInterface(Application& app)
{
return dynamic_cast<SQLiteDatabase*>(&app.getRelationalDatabase());
}
static SQLiteDatabase*
getInterface(RelationalDatabase& db)
{
return dynamic_cast<SQLiteDatabase*>(&db);
}
static std::unique_ptr<Config>
makeConfig(std::string const& backend)
{
auto config = test::jtx::envconfig();
// Sqlite backend doesn't need a database_path as it will just use
// in-memory databases when in standalone mode anyway.
config->overwrite(SECTION_RELATIONAL_DB, "backend", backend);
return config;
}
public:
RelationalDatabase_test() = default;
void
testBasicInitialization(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Basic initialization and empty database - " + backend);
using namespace test::jtx;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
// Test empty database state
BEAST_EXPECT(db.getMinLedgerSeq() == 2);
BEAST_EXPECT(db.getMaxLedgerSeq() == 2);
BEAST_EXPECT(db.getNewestLedgerInfo()->seq == 2);
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (sqliteDb)
{
BEAST_EXPECT(!sqliteDb->getTransactionsMinLedgerSeq().has_value());
BEAST_EXPECT(
!sqliteDb->getAccountTransactionsMinLedgerSeq().has_value());
auto ledgerCount = sqliteDb->getLedgerCountMinMax();
BEAST_EXPECT(ledgerCount.numberOfRows == 1);
BEAST_EXPECT(ledgerCount.minLedgerSequence == 2);
BEAST_EXPECT(ledgerCount.maxLedgerSequence == 2);
}
}
void
testLedgerSequenceOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Ledger sequence operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
// Create initial ledger
Account alice("alice");
env.fund(XRP(10000), alice);
env.close();
// Test basic sequence operations
auto minSeq = db.getMinLedgerSeq();
auto maxSeq = db.getMaxLedgerSeq();
BEAST_EXPECT(minSeq.has_value());
BEAST_EXPECT(maxSeq.has_value());
BEAST_EXPECT(*minSeq == 2);
BEAST_EXPECT(*maxSeq == 3);
// Create more ledgers
env(pay(alice, Account("bob"), XRP(1000)));
env.close();
env(pay(alice, Account("carol"), XRP(500)));
env.close();
// Verify sequence updates
minSeq = db.getMinLedgerSeq();
maxSeq = db.getMaxLedgerSeq();
BEAST_EXPECT(*minSeq == 2);
BEAST_EXPECT(*maxSeq == 5);
auto* sqliteDb = getInterface(db);
if (sqliteDb)
{
auto ledgerCount = sqliteDb->getLedgerCountMinMax();
BEAST_EXPECT(ledgerCount.numberOfRows == 4);
BEAST_EXPECT(ledgerCount.minLedgerSequence == 2);
BEAST_EXPECT(ledgerCount.maxLedgerSequence == 5);
}
}
void
testLedgerInfoOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Ledger info retrieval operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto* db = getInterface(env.app());
Account alice("alice");
env.fund(XRP(10000), alice);
env.close();
// Test getNewestLedgerInfo
auto newestLedger = db->getNewestLedgerInfo();
BEAST_EXPECT(newestLedger.has_value());
BEAST_EXPECT(newestLedger->seq == 3);
// Test getLedgerInfoByIndex
auto ledgerByIndex = db->getLedgerInfoByIndex(3);
BEAST_EXPECT(ledgerByIndex.has_value());
BEAST_EXPECT(ledgerByIndex->seq == 3);
BEAST_EXPECT(ledgerByIndex->hash == newestLedger->hash);
// Test getLedgerInfoByHash
auto ledgerByHash = db->getLedgerInfoByHash(newestLedger->hash);
BEAST_EXPECT(ledgerByHash.has_value());
BEAST_EXPECT(ledgerByHash->seq == 3);
BEAST_EXPECT(ledgerByHash->hash == newestLedger->hash);
// Test getLimitedOldestLedgerInfo
auto oldestLedger = db->getLimitedOldestLedgerInfo(2);
BEAST_EXPECT(oldestLedger.has_value());
BEAST_EXPECT(oldestLedger->seq == 2);
// Test getLimitedNewestLedgerInfo
auto limitedNewest = db->getLimitedNewestLedgerInfo(2);
BEAST_EXPECT(limitedNewest.has_value());
BEAST_EXPECT(limitedNewest->seq == 3);
// Test invalid queries
auto invalidLedger = db->getLedgerInfoByIndex(999);
BEAST_EXPECT(!invalidLedger.has_value());
uint256 invalidHash;
auto invalidHashLedger = db->getLedgerInfoByHash(invalidHash);
BEAST_EXPECT(!invalidHashLedger.has_value());
}
void
testHashOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Hash retrieval operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
env.fund(XRP(10000), alice);
env.close();
env(pay(alice, Account("bob"), XRP(1000)));
env.close();
// Test getHashByIndex
auto hash1 = db.getHashByIndex(3);
auto hash2 = db.getHashByIndex(4);
BEAST_EXPECT(hash1 != uint256());
BEAST_EXPECT(hash2 != uint256());
BEAST_EXPECT(hash1 != hash2);
// Test getHashesByIndex (single)
auto hashPair = db.getHashesByIndex(4);
BEAST_EXPECT(hashPair.has_value());
BEAST_EXPECT(hashPair->ledgerHash == hash2);
BEAST_EXPECT(hashPair->parentHash == hash1);
// Test getHashesByIndex (range)
auto hashRange = db.getHashesByIndex(3, 4);
BEAST_EXPECT(hashRange.size() == 2);
BEAST_EXPECT(hashRange[3].ledgerHash == hash1);
BEAST_EXPECT(hashRange[4].ledgerHash == hash2);
BEAST_EXPECT(hashRange[4].parentHash == hash1);
// Test invalid hash queries
auto invalidHash = db.getHashByIndex(999);
BEAST_EXPECT(invalidHash == uint256());
auto invalidHashPair = db.getHashesByIndex(999);
BEAST_EXPECT(!invalidHashPair.has_value());
auto emptyRange = db.getHashesByIndex(10, 5); // max < min
BEAST_EXPECT(emptyRange.empty());
}
void
testTransactionOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Transaction storage and retrieval - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
Account bob("bob");
env.fund(XRP(10000), alice, bob);
env.close();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Test initial transaction counts after funding
auto initialTxCount = sqliteDb->getTransactionCount();
auto initialAcctTxCount = sqliteDb->getAccountTransactionCount();
BEAST_EXPECT(initialTxCount == 4);
BEAST_EXPECT(initialAcctTxCount == 6);
// Create transactions
env(pay(alice, bob, XRP(1000)));
env.close();
env(pay(bob, alice, XRP(500)));
env.close();
// Test transaction counts after creation
auto txCount = sqliteDb->getTransactionCount();
auto acctTxCount = sqliteDb->getAccountTransactionCount();
BEAST_EXPECT(txCount == 6);
BEAST_EXPECT(acctTxCount == 10);
// Test transaction retrieval
uint256 invalidTxId;
error_code_i ec;
auto invalidTxResult =
sqliteDb->getTransaction(invalidTxId, std::nullopt, ec);
BEAST_EXPECT(std::holds_alternative<TxSearched>(invalidTxResult));
// Test transaction history
auto txHistory = db.getTxHistory(0);
BEAST_EXPECT(!txHistory.empty());
BEAST_EXPECT(txHistory.size() == 6);
// Test with valid transaction range
auto minSeq = sqliteDb->getTransactionsMinLedgerSeq();
auto maxSeq = db.getMaxLedgerSeq();
if (minSeq && maxSeq)
{
ClosedInterval<std::uint32_t> range(*minSeq, *maxSeq);
auto rangeResult = sqliteDb->getTransaction(invalidTxId, range, ec);
auto searched = std::get<TxSearched>(rangeResult);
BEAST_EXPECT(
searched == TxSearched::all || searched == TxSearched::some);
}
}
void
testAccountTransactionOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Account transaction operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
Account bob("bob");
Account carol("carol");
env.fund(XRP(10000), alice, bob, carol);
env.close();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Create multiple transactions involving alice
env(pay(alice, bob, XRP(1000)));
env.close();
env(pay(bob, alice, XRP(500)));
env.close();
env(pay(alice, carol, XRP(250)));
env.close();
auto minSeq = db.getMinLedgerSeq();
auto maxSeq = db.getMaxLedgerSeq();
if (!minSeq || !maxSeq)
return;
// Test getOldestAccountTxs
RelationalDatabase::AccountTxOptions options{
alice.id(), *minSeq, *maxSeq, 0, 10, false};
auto oldestTxs = sqliteDb->getOldestAccountTxs(options);
BEAST_EXPECT(oldestTxs.size() == 5);
// Test getNewestAccountTxs
auto newestTxs = sqliteDb->getNewestAccountTxs(options);
BEAST_EXPECT(newestTxs.size() == 5);
// Test binary format versions
auto oldestTxsB = sqliteDb->getOldestAccountTxsB(options);
BEAST_EXPECT(oldestTxsB.size() == 5);
auto newestTxsB = sqliteDb->getNewestAccountTxsB(options);
BEAST_EXPECT(newestTxsB.size() == 5);
// Test with limit
options.limit = 1;
auto limitedTxs = sqliteDb->getOldestAccountTxs(options);
BEAST_EXPECT(limitedTxs.size() == 1);
// Test with offset
options.limit = 10;
options.offset = 1;
auto offsetTxs = sqliteDb->getOldestAccountTxs(options);
BEAST_EXPECT(offsetTxs.size() == 4);
// Test with invalid account
{
Account invalidAccount("invalid");
RelationalDatabase::AccountTxOptions invalidOptions{
invalidAccount.id(), *minSeq, *maxSeq, 0, 10, false};
auto invalidAccountTxs =
sqliteDb->getOldestAccountTxs(invalidOptions);
BEAST_EXPECT(invalidAccountTxs.empty());
}
}
void
testAccountTransactionPaging(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Account transaction paging operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
Account bob("bob");
env.fund(XRP(10000), alice, bob);
env.close();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Create multiple transactions for paging
for (int i = 0; i < 5; ++i)
{
env(pay(alice, bob, XRP(100 + i)));
env.close();
}
auto minSeq = db.getMinLedgerSeq();
auto maxSeq = db.getMaxLedgerSeq();
if (!minSeq || !maxSeq)
return;
RelationalDatabase::AccountTxPageOptions pageOptions{
alice.id(), *minSeq, *maxSeq, std::nullopt, 2, false};
// Test oldestAccountTxPage
auto [oldestPage, oldestMarker] =
sqliteDb->oldestAccountTxPage(pageOptions);
BEAST_EXPECT(oldestPage.size() == 2);
BEAST_EXPECT(oldestMarker.has_value() == true);
// Test newestAccountTxPage
auto [newestPage, newestMarker] =
sqliteDb->newestAccountTxPage(pageOptions);
BEAST_EXPECT(newestPage.size() == 2);
BEAST_EXPECT(newestMarker.has_value() == true);
// Test binary versions
auto [oldestPageB, oldestMarkerB] =
sqliteDb->oldestAccountTxPageB(pageOptions);
BEAST_EXPECT(oldestPageB.size() == 2);
auto [newestPageB, newestMarkerB] =
sqliteDb->newestAccountTxPageB(pageOptions);
BEAST_EXPECT(newestPageB.size() == 2);
// Test with marker continuation
if (oldestMarker.has_value())
{
pageOptions.marker = oldestMarker;
auto [continuedPage, continuedMarker] =
sqliteDb->oldestAccountTxPage(pageOptions);
BEAST_EXPECT(continuedPage.size() == 2);
}
}
void
testDeletionOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Deletion operations - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
Account alice("alice");
Account bob("bob");
env.fund(XRP(10000), alice, bob);
env.close();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Create multiple ledgers and transactions
for (int i = 0; i < 3; ++i)
{
env(pay(alice, bob, XRP(100 + i)));
env.close();
}
auto initialTxCount = sqliteDb->getTransactionCount();
BEAST_EXPECT(initialTxCount == 7);
auto initialAcctTxCount = sqliteDb->getAccountTransactionCount();
BEAST_EXPECT(initialAcctTxCount == 12);
auto initialLedgerCount = sqliteDb->getLedgerCountMinMax();
BEAST_EXPECT(initialLedgerCount.numberOfRows == 5);
auto maxSeq = db.getMaxLedgerSeq();
if (!maxSeq || *maxSeq <= 2)
return;
// Test deleteTransactionByLedgerSeq
sqliteDb->deleteTransactionByLedgerSeq(*maxSeq);
auto txCountAfterDelete = sqliteDb->getTransactionCount();
BEAST_EXPECT(txCountAfterDelete == 6);
// Test deleteTransactionsBeforeLedgerSeq
sqliteDb->deleteTransactionsBeforeLedgerSeq(*maxSeq - 1);
auto txCountAfterBulkDelete = sqliteDb->getTransactionCount();
BEAST_EXPECT(txCountAfterBulkDelete == 1);
// Test deleteAccountTransactionsBeforeLedgerSeq
sqliteDb->deleteAccountTransactionsBeforeLedgerSeq(*maxSeq - 1);
auto acctTxCountAfterDelete = sqliteDb->getAccountTransactionCount();
BEAST_EXPECT(acctTxCountAfterDelete == 4);
// Test deleteBeforeLedgerSeq
auto minSeq = db.getMinLedgerSeq();
if (minSeq)
{
sqliteDb->deleteBeforeLedgerSeq(*minSeq + 1);
auto ledgerCountAfterDelete = sqliteDb->getLedgerCountMinMax();
BEAST_EXPECT(ledgerCountAfterDelete.numberOfRows == 4);
}
}
void
testDatabaseSpaceOperations(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Database space and size operations - " + backend);
using namespace test::jtx;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Test size queries
auto allKB = sqliteDb->getKBUsedAll();
auto ledgerKB = sqliteDb->getKBUsedLedger();
auto txKB = sqliteDb->getKBUsedTransaction();
if (backend == "rwdb")
{
// RWDB reports actual data memory (rounded down to KB)
// Initially should be < 1KB, so rounds down to 0
// Note: These are 0 due to rounding, not because there's literally
// no data
BEAST_EXPECT(allKB == 0); // < 1024 bytes rounds to 0 KB
BEAST_EXPECT(ledgerKB == 0); // < 1024 bytes rounds to 0 KB
BEAST_EXPECT(txKB == 0); // < 1024 bytes rounds to 0 KB
}
else
{
// SQLite reports cache/engine memory which has overhead even when
// empty Just verify the functions return reasonable values
BEAST_EXPECT(allKB >= 0);
BEAST_EXPECT(ledgerKB >= 0);
BEAST_EXPECT(txKB >= 0);
}
// Create some data and verify size increases
Account alice("alice");
env.fund(XRP(10000), alice);
env.close();
auto newAllKB = sqliteDb->getKBUsedAll();
auto newLedgerKB = sqliteDb->getKBUsedLedger();
auto newTxKB = sqliteDb->getKBUsedTransaction();
if (backend == "rwdb")
{
// RWDB reports actual data memory
// After adding data, should see some increase
BEAST_EXPECT(newAllKB >= 1); // Should have at least 1KB total
BEAST_EXPECT(
newTxKB >= 0); // Transactions added (might still be < 1KB)
BEAST_EXPECT(
newLedgerKB >= 0); // Ledger data (might still be < 1KB)
// Key relationships
BEAST_EXPECT(newAllKB >= newLedgerKB + newTxKB); // Total >= parts
BEAST_EXPECT(newAllKB >= allKB); // Should increase or stay same
BEAST_EXPECT(newTxKB >= txKB); // Should increase or stay same
}
else
{
// SQLite: Memory usage should not decrease after adding data
// Values might increase due to cache growth
BEAST_EXPECT(newAllKB >= allKB);
BEAST_EXPECT(newLedgerKB >= ledgerKB);
BEAST_EXPECT(newTxKB >= txKB);
// SQLite's getKBUsedAll is global memory, should be >= parts
BEAST_EXPECT(newAllKB >= newLedgerKB);
BEAST_EXPECT(newAllKB >= newTxKB);
}
// Test space availability
// Both SQLite and RWDB use in-memory databases in standalone mode,
// so file-based space checks don't apply to either backend.
// Skip these checks for both.
// if (backend == "rwdb")
// {
// BEAST_EXPECT(db.ledgerDbHasSpace(env.app().config()));
// BEAST_EXPECT(db.transactionDbHasSpace(env.app().config()));
// }
// Test database closure operations (should not throw)
try
{
sqliteDb->closeLedgerDB();
sqliteDb->closeTransactionDB();
}
catch (std::exception const& e)
{
BEAST_EXPECT(false); // Should not throw
}
}
void
testTransactionMinLedgerSeq(
std::string const& backend,
std::unique_ptr<Config> config)
{
testcase("Transaction minimum ledger sequence tracking - " + backend);
using namespace test::jtx;
config->LEDGER_HISTORY = 1000;
Env env(*this, std::move(config));
auto& db = env.app().getRelationalDatabase();
auto* sqliteDb = getInterface(db);
BEAST_EXPECT(sqliteDb != nullptr);
if (!sqliteDb)
return;
// Initially should have no transactions
BEAST_EXPECT(!sqliteDb->getTransactionsMinLedgerSeq().has_value());
BEAST_EXPECT(
!sqliteDb->getAccountTransactionsMinLedgerSeq().has_value());
Account alice("alice");
Account bob("bob");
env.fund(XRP(10000), alice, bob);
env.close();
// Create first transaction
env(pay(alice, bob, XRP(1000)));
env.close();
auto txMinSeq = sqliteDb->getTransactionsMinLedgerSeq();
auto acctTxMinSeq = sqliteDb->getAccountTransactionsMinLedgerSeq();
BEAST_EXPECT(txMinSeq.has_value());
BEAST_EXPECT(acctTxMinSeq.has_value());
BEAST_EXPECT(*txMinSeq == 3);
BEAST_EXPECT(*acctTxMinSeq == 3);
// Create more transactions
env(pay(bob, alice, XRP(500)));
env.close();
env(pay(alice, bob, XRP(250)));
env.close();
// Min sequences should remain the same (first transaction ledger)
auto newTxMinSeq = sqliteDb->getTransactionsMinLedgerSeq();
auto newAcctTxMinSeq = sqliteDb->getAccountTransactionsMinLedgerSeq();
BEAST_EXPECT(newTxMinSeq == txMinSeq);
BEAST_EXPECT(newAcctTxMinSeq == acctTxMinSeq);
}
std::vector<std::string> static getBackends(std::string const& unittest_arg)
{
// Valid backends
static const std::set<std::string> validBackends = {"sqlite", "rwdb"};
// Default to all valid backends if no arg specified
if (unittest_arg.empty())
return {validBackends.begin(), validBackends.end()};
std::set<std::string> backends; // Use set to avoid duplicates
std::stringstream ss(unittest_arg);
std::string backend;
while (std::getline(ss, backend, ','))
{
if (!backend.empty())
{
// Validate backend
if (validBackends.contains(backend))
{
backends.insert(backend);
}
}
}
// Return as vector (sorted due to set)
return {backends.begin(), backends.end()};
}
void
run() override
{
auto backends = getBackends(arg());
if (backends.empty())
{
fail("no valid backend specified: '" + arg() + "'");
}
for (auto const& backend : backends)
{
testBasicInitialization(backend, makeConfig(backend));
testLedgerSequenceOperations(backend, makeConfig(backend));
testLedgerInfoOperations(backend, makeConfig(backend));
testHashOperations(backend, makeConfig(backend));
testTransactionOperations(backend, makeConfig(backend));
testAccountTransactionOperations(backend, makeConfig(backend));
testAccountTransactionPaging(backend, makeConfig(backend));
testDeletionOperations(backend, makeConfig(backend));
testDatabaseSpaceOperations(backend, makeConfig(backend));
testTransactionMinLedgerSeq(backend, makeConfig(backend));
}
}
};
BEAST_DEFINE_TESTSUITE(RelationalDatabase, rdb, ripple);
} // namespace test
} // namespace ripple

View File

@@ -19,9 +19,9 @@
#ifndef TEST_UNIT_TEST_SUITE_JOURNAL_H
#define TEST_UNIT_TEST_SUITE_JOURNAL_H
#include <ripple/beast/unit_test.h>
#include <ripple/beast/utility/Journal.h>
#include <mutex>
namespace ripple {
namespace test {
@@ -82,7 +82,13 @@ SuiteJournalSink::write(
// Only write the string if the level at least equals the threshold.
if (level >= threshold())
{
// std::endl flushes → sync() → str()/str("") race in shared buffer →
// crashes
static std::mutex log_mutex;
std::lock_guard lock(log_mutex);
suite_.log << s << partition_ << text << std::endl;
}
}
class SuiteJournal