mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-18 09:35:49 +00:00
Compare commits
9 Commits
nd-migrate
...
fix-online
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4089b7c4ba | ||
|
|
61b364ec82 | ||
|
|
7fae6c3eb7 | ||
|
|
70dd2a0f0e | ||
|
|
d15063bca4 | ||
|
|
998ae5535b | ||
|
|
4c41d32276 | ||
|
|
a2137d5436 | ||
|
|
a84d72a7f7 |
@@ -14,18 +14,6 @@ inputs:
|
||||
description: 'How to check compiler for changes'
|
||||
required: false
|
||||
default: 'content'
|
||||
is_main_branch:
|
||||
description: 'Whether the current branch is the main branch'
|
||||
required: false
|
||||
default: 'false'
|
||||
main_cache_dir:
|
||||
description: 'Path to the main branch cache directory'
|
||||
required: false
|
||||
default: '~/.ccache-main'
|
||||
current_cache_dir:
|
||||
description: 'Path to the current branch cache directory'
|
||||
required: false
|
||||
default: '~/.ccache-current'
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
@@ -33,31 +21,11 @@ runs:
|
||||
- name: Configure ccache
|
||||
shell: bash
|
||||
run: |
|
||||
# Create cache directories
|
||||
mkdir -p ${{ inputs.main_cache_dir }} ${{ inputs.current_cache_dir }}
|
||||
|
||||
# Set compiler check globally
|
||||
ccache -o compiler_check=${{ inputs.compiler_check }}
|
||||
|
||||
# Use a single config file location
|
||||
mkdir -p ~/.ccache
|
||||
export CONF_PATH="$HOME/.ccache/ccache.conf"
|
||||
|
||||
# Apply common settings
|
||||
export CONF_PATH="${CCACHE_CONFIGPATH:-${CCACHE_DIR:-$HOME/.ccache}/ccache.conf}"
|
||||
mkdir -p $(dirname "$CONF_PATH")
|
||||
echo "max_size = ${{ inputs.max_size }}" > "$CONF_PATH"
|
||||
echo "hash_dir = ${{ inputs.hash_dir }}" >> "$CONF_PATH"
|
||||
echo "compiler_check = ${{ inputs.compiler_check }}" >> "$CONF_PATH"
|
||||
|
||||
if [ "${{ inputs.is_main_branch }}" == "true" ]; then
|
||||
# Main branch: use main branch cache
|
||||
ccache --set-config=cache_dir="${{ inputs.main_cache_dir }}"
|
||||
echo "CCACHE_DIR=${{ inputs.main_cache_dir }}" >> $GITHUB_ENV
|
||||
else
|
||||
# Feature branch: use current branch cache with main as secondary
|
||||
ccache --set-config=cache_dir="${{ inputs.current_cache_dir }}"
|
||||
ccache --set-config=secondary_storage="file:${{ inputs.main_cache_dir }}"
|
||||
echo "CCACHE_DIR=${{ inputs.current_cache_dir }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
ccache -p # Print config for verification
|
||||
ccache -z # Zero statistics before the build
|
||||
36
.github/actions/xahau-ga-build/action.yml
vendored
36
.github/actions/xahau-ga-build/action.yml
vendored
@@ -48,28 +48,18 @@ runs:
|
||||
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
|
||||
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore ccache directory for default branch
|
||||
- name: Restore ccache directory
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
id: ccache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ~/.ccache-main
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
|
||||
- name: Restore ccache directory for current branch
|
||||
if: inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
|
||||
id: ccache-restore-current-branch
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ~/.ccache-current
|
||||
path: ~/.ccache
|
||||
key: ${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ steps.safe-branch.outputs.name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-${{ inputs.main_branch }}
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ inputs.configuration }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
${{ runner.os }}-ccache-v${{ inputs.cache_version }}-
|
||||
|
||||
- name: Configure project
|
||||
shell: bash
|
||||
@@ -86,7 +76,6 @@ runs:
|
||||
export CXX="${{ inputs.cxx }}"
|
||||
fi
|
||||
|
||||
|
||||
# Configure ccache launcher args
|
||||
CCACHE_ARGS=""
|
||||
if [ "${{ inputs.ccache_enabled }}" = "true" ]; then
|
||||
@@ -94,10 +83,6 @@ runs:
|
||||
fi
|
||||
|
||||
# Run CMake configure
|
||||
# Note: conanfile.py hardcodes 'build/generators' as the output path.
|
||||
# If we're in a 'build' folder, Conan detects this and uses just 'generators/'
|
||||
# If we're in '.build' (non-standard), Conan adds the full 'build/generators/'
|
||||
# So we get: .build/build/generators/ with our non-standard folder name
|
||||
cmake .. \
|
||||
-G "${{ inputs.generator }}" \
|
||||
$CCACHE_ARGS \
|
||||
@@ -115,16 +100,9 @@ runs:
|
||||
shell: bash
|
||||
run: ccache -s
|
||||
|
||||
- name: Save ccache directory for default branch
|
||||
if: always() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name == inputs.main_branch
|
||||
- name: Save ccache directory
|
||||
if: inputs.ccache_enabled == 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ~/.ccache-main
|
||||
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
|
||||
|
||||
- name: Save ccache directory for current branch
|
||||
if: always() && inputs.ccache_enabled == 'true' && steps.safe-branch.outputs.name != inputs.main_branch
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ~/.ccache-current
|
||||
key: ${{ steps.ccache-restore-current-branch.outputs.cache-primary-key }}
|
||||
path: ~/.ccache
|
||||
key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
|
||||
28
.github/actions/xahau-ga-dependencies/action.yml
vendored
28
.github/actions/xahau-ga-dependencies/action.yml
vendored
@@ -42,26 +42,6 @@ runs:
|
||||
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | tr -c 'a-zA-Z0-9_.-' '-')
|
||||
echo "name=${SAFE_BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check conanfile changes
|
||||
if: inputs.cache_enabled == 'true'
|
||||
id: check-conanfile-changes
|
||||
shell: bash
|
||||
run: |
|
||||
# Check if we're on the main branch
|
||||
if [ "${{ github.ref_name }}" == "${{ inputs.main_branch }}" ]; then
|
||||
echo "should-save-conan-cache=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
# Fetch main branch for comparison
|
||||
git fetch origin ${{ inputs.main_branch }}
|
||||
|
||||
# Check if conanfile.txt or conanfile.py has changed compared to main branch
|
||||
if git diff --quiet origin/${{ inputs.main_branch }}..HEAD -- '**/conanfile.txt' '**/conanfile.py'; then
|
||||
echo "should-save-conan-cache=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "should-save-conan-cache=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Restore Conan cache
|
||||
if: inputs.cache_enabled == 'true'
|
||||
id: cache-restore-conan
|
||||
@@ -74,13 +54,13 @@ runs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-${{ hashFiles('**/conanfile.txt', '**/conanfile.py') }}-
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-${{ inputs.compiler-id }}-
|
||||
${{ runner.os }}-conan-v${{ inputs.cache_version }}-
|
||||
|
||||
- name: Export custom recipes
|
||||
shell: bash
|
||||
run: |
|
||||
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
|
||||
conan export external/soci --version 4.0.3 --user xahaud --channel stable
|
||||
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
|
||||
conan export external/snappy snappy/1.1.10@xahaud/stable
|
||||
conan export external/soci soci/4.0.3@xahaud/stable
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
@@ -97,7 +77,7 @@ runs:
|
||||
..
|
||||
|
||||
- name: Save Conan cache
|
||||
if: always() && inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true' && steps.check-conanfile-changes.outputs.should-save-conan-cache == 'true'
|
||||
if: inputs.cache_enabled == 'true' && steps.cache-restore-conan.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
|
||||
57
.github/workflows/xahau-ga-macos.yml
vendored
57
.github/workflows/xahau-ga-macos.yml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
pull_request:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -32,9 +30,9 @@ jobs:
|
||||
|
||||
- name: Install Conan
|
||||
run: |
|
||||
brew install conan
|
||||
# Verify Conan 2 is installed
|
||||
conan --version
|
||||
brew install conan@1
|
||||
# Add Conan 1 to the PATH for this job
|
||||
echo "$(brew --prefix conan@1)/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install Coreutils
|
||||
run: |
|
||||
@@ -60,20 +58,12 @@ jobs:
|
||||
|
||||
- name: Install CMake
|
||||
run: |
|
||||
# Install CMake 3.x to match local dev environments
|
||||
# With Conan 2 and the policy args passed to CMake, newer versions
|
||||
# can have issues with dependencies that require cmake_minimum_required < 3.5
|
||||
brew uninstall cmake --ignore-dependencies 2>/dev/null || true
|
||||
|
||||
# Download and install CMake 3.31.7 directly
|
||||
curl -L https://github.com/Kitware/CMake/releases/download/v3.31.7/cmake-3.31.7-macos-universal.tar.gz -o cmake.tar.gz
|
||||
tar -xzf cmake.tar.gz
|
||||
|
||||
# Move the entire CMake.app to /Applications
|
||||
sudo mv cmake-3.31.7-macos-universal/CMake.app /Applications/
|
||||
|
||||
echo "/Applications/CMake.app/Contents/bin" >> $GITHUB_PATH
|
||||
/Applications/CMake.app/Contents/bin/cmake --version
|
||||
if which cmake > /dev/null 2>&1; then
|
||||
echo "cmake executable exists"
|
||||
cmake --version
|
||||
else
|
||||
brew install cmake
|
||||
fi
|
||||
|
||||
- name: Install ccache
|
||||
run: brew install ccache
|
||||
@@ -84,7 +74,6 @@ jobs:
|
||||
max_size: 2G
|
||||
hash_dir: true
|
||||
compiler_check: content
|
||||
is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }}
|
||||
|
||||
- name: Check environment
|
||||
run: |
|
||||
@@ -100,30 +89,8 @@ jobs:
|
||||
|
||||
- name: Configure Conan
|
||||
run: |
|
||||
# Create the default profile directory if it doesn't exist
|
||||
mkdir -p ~/.conan2/profiles
|
||||
|
||||
# Detect compiler version
|
||||
COMPILER_VERSION=$(clang --version | grep -oE 'version [0-9]+' | grep -oE '[0-9]+')
|
||||
|
||||
# Create profile with our specific settings
|
||||
cat > ~/.conan2/profiles/default <<EOF
|
||||
[settings]
|
||||
arch=armv8
|
||||
build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=${COMPILER_VERSION}
|
||||
os=Macos
|
||||
|
||||
[conf]
|
||||
# Workaround for gRPC with newer Apple Clang
|
||||
tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
EOF
|
||||
|
||||
# Display profile for verification
|
||||
conan profile show
|
||||
conan profile new default --detect || true # Ignore error if profile exists
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/xahau-ga-dependencies
|
||||
@@ -146,4 +113,4 @@ jobs:
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc)
|
||||
${{ env.build_dir }}/rippled --unittest --unittest-jobs $(nproc)
|
||||
54
.github/workflows/xahau-ga-nix.yml
vendored
54
.github/workflows/xahau-ga-nix.yml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
pull_request:
|
||||
branches: ["dev", "candidate", "release"]
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -24,14 +22,13 @@ jobs:
|
||||
configuration: [Debug]
|
||||
include:
|
||||
- compiler: gcc
|
||||
cc: gcc-13
|
||||
cxx: g++-13
|
||||
compiler_id: gcc-13
|
||||
compiler_version: 13
|
||||
cc: gcc-11
|
||||
cxx: g++-11
|
||||
compiler_id: gcc-11
|
||||
env:
|
||||
build_dir: .build
|
||||
# Bump this number to invalidate all caches globally.
|
||||
CACHE_VERSION: 2
|
||||
CACHE_VERSION: 1
|
||||
MAIN_BRANCH_NAME: dev
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -41,8 +38,8 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y ninja-build ${{ matrix.cc }} ${{ matrix.cxx }} ccache
|
||||
# Install Conan 2
|
||||
pip install --upgrade "conan>=2.0"
|
||||
# Install specific Conan version needed
|
||||
pip install --upgrade "conan<2"
|
||||
|
||||
- name: Configure ccache
|
||||
uses: ./.github/actions/xahau-configure-ccache
|
||||
@@ -50,34 +47,25 @@ jobs:
|
||||
max_size: 2G
|
||||
hash_dir: true
|
||||
compiler_check: content
|
||||
is_main_branch: ${{ github.ref_name == env.MAIN_BRANCH_NAME }}
|
||||
|
||||
- name: Configure Conan
|
||||
run: |
|
||||
# Create the default profile directory if it doesn't exist
|
||||
mkdir -p ~/.conan2/profiles
|
||||
|
||||
# Create profile with our specific settings
|
||||
cat > ~/.conan2/profiles/default <<EOF
|
||||
[settings]
|
||||
arch=x86_64
|
||||
build_type=Release
|
||||
compiler=${{ matrix.compiler }}
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libstdc++11
|
||||
compiler.version=${{ matrix.compiler_version }}
|
||||
os=Linux
|
||||
|
||||
[buildenv]
|
||||
CC=/usr/bin/${{ matrix.cc }}
|
||||
CXX=/usr/bin/${{ matrix.cxx }}
|
||||
|
||||
[conf]
|
||||
tools.build:compiler_executables={"c": "/usr/bin/${{ matrix.cc }}", "cpp": "/usr/bin/${{ matrix.cxx }}"}
|
||||
EOF
|
||||
conan profile new default --detect || true # Ignore error if profile exists
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler=${{ matrix.compiler }} default
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
conan profile update env.CC=/usr/bin/${{ matrix.cc }} default
|
||||
conan profile update env.CXX=/usr/bin/${{ matrix.cxx }} default
|
||||
conan profile update conf.tools.build:compiler_executables='{"c": "/usr/bin/${{ matrix.cc }}", "cpp": "/usr/bin/${{ matrix.cxx }}"}' default
|
||||
|
||||
# Set correct compiler version based on matrix.compiler
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
conan profile update settings.compiler.version=11 default
|
||||
elif [ "${{ matrix.compiler }}" = "clang" ]; then
|
||||
conan profile update settings.compiler.version=14 default
|
||||
fi
|
||||
# Display profile for verification
|
||||
conan profile show
|
||||
conan profile show default
|
||||
|
||||
- name: Check environment
|
||||
run: |
|
||||
@@ -132,4 +120,4 @@ jobs:
|
||||
else
|
||||
echo "Error: rippled executable not found in ${{ env.build_dir }}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
72
BUILD.md
72
BUILD.md
@@ -33,7 +33,7 @@ git checkout develop
|
||||
## Minimum Requirements
|
||||
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 2.x](https://conan.io/downloads)
|
||||
- [Conan 1.55](https://conan.io/downloads.html)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
|
||||
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
|
||||
@@ -65,24 +65,13 @@ can't build earlier Boost versions.
|
||||
1. (Optional) If you've never used Conan, use autodetect to set up a default profile.
|
||||
|
||||
```
|
||||
conan profile detect --force
|
||||
conan profile new default --detect
|
||||
```
|
||||
|
||||
2. Update the compiler settings.
|
||||
|
||||
For Conan 2, you can edit the profile directly at `~/.conan2/profiles/default`,
|
||||
or use the Conan CLI. Ensure C++20 is set:
|
||||
|
||||
```
|
||||
conan profile show
|
||||
```
|
||||
|
||||
Look for `compiler.cppstd=20` in the output. If it's not set, edit the profile:
|
||||
|
||||
```
|
||||
# Edit ~/.conan2/profiles/default and ensure these settings exist:
|
||||
[settings]
|
||||
compiler.cppstd=20
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
```
|
||||
|
||||
Linux developers will commonly have a default Conan [profile][] that compiles
|
||||
@@ -91,9 +80,7 @@ can't build earlier Boost versions.
|
||||
then you will need to choose the `libstdc++11` ABI.
|
||||
|
||||
```
|
||||
# In ~/.conan2/profiles/default, ensure:
|
||||
[settings]
|
||||
compiler.libcxx=libstdc++11
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
```
|
||||
|
||||
On Windows, you should use the x64 native build tools.
|
||||
@@ -104,9 +91,7 @@ can't build earlier Boost versions.
|
||||
architecture.
|
||||
|
||||
```
|
||||
# In ~/.conan2/profiles/default, ensure:
|
||||
[settings]
|
||||
arch=x86_64
|
||||
conan profile update settings.arch=x86_64 default
|
||||
```
|
||||
|
||||
3. (Optional) If you have multiple compilers installed on your platform,
|
||||
@@ -115,18 +100,16 @@ can't build earlier Boost versions.
|
||||
in the generated CMake toolchain file.
|
||||
|
||||
```
|
||||
# In ~/.conan2/profiles/default, add under [conf] section:
|
||||
[conf]
|
||||
tools.build:compiler_executables={"c": "<path>", "cpp": "<path>"}
|
||||
conan profile update 'conf.tools.build:compiler_executables={"c": "<path>", "cpp": "<path>"}' default
|
||||
```
|
||||
|
||||
For setting environment variables for dependencies:
|
||||
It should choose the compiler for dependencies as well,
|
||||
but not all of them have a Conan recipe that respects this setting (yet).
|
||||
For the rest, you can set these environment variables:
|
||||
|
||||
```
|
||||
# In ~/.conan2/profiles/default, add under [buildenv] section:
|
||||
[buildenv]
|
||||
CC=<path>
|
||||
CXX=<path>
|
||||
conan profile update env.CC=<path> default
|
||||
conan profile update env.CXX=<path> default
|
||||
```
|
||||
|
||||
4. Export our [Conan recipe for Snappy](./external/snappy).
|
||||
@@ -134,20 +117,14 @@ can't build earlier Boost versions.
|
||||
which allows you to statically link it with GCC, if you want.
|
||||
|
||||
```
|
||||
conan export external/snappy --version 1.1.10 --user xahaud --channel stable
|
||||
conan export external/snappy snappy/1.1.10@xahaud/stable
|
||||
```
|
||||
|
||||
5. Export our [Conan recipe for SOCI](./external/soci).
|
||||
It patches their CMake to correctly import its dependencies.
|
||||
|
||||
```
|
||||
conan export external/soci --version 4.0.3 --user xahaud --channel stable
|
||||
```
|
||||
|
||||
6. Export our [Conan recipe for WasmEdge](./external/wasmedge).
|
||||
|
||||
```
|
||||
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable
|
||||
conan export external/soci soci/4.0.3@xahaud/stable
|
||||
```
|
||||
|
||||
### Build and Test
|
||||
@@ -282,26 +259,23 @@ and can be helpful for detecting `#include` omissions.
|
||||
If you have trouble building dependencies after changing Conan settings,
|
||||
try removing the Conan cache.
|
||||
|
||||
For Conan 2:
|
||||
```
|
||||
rm -rf ~/.conan2/p
|
||||
```
|
||||
|
||||
Or clear the entire Conan 2 cache:
|
||||
```
|
||||
conan cache clean "*"
|
||||
rm -rf ~/.conan/data
|
||||
```
|
||||
|
||||
|
||||
### macOS compilation with Apple Clang 17+
|
||||
### no std::result_of
|
||||
|
||||
If you're on macOS with Apple Clang 17 or newer, you need to add a compiler flag to work around a compilation error in gRPC dependencies.
|
||||
|
||||
Edit `~/.conan2/profiles/default` and add under the `[conf]` section:
|
||||
If your compiler version is recent enough to have removed `std::result_of` as
|
||||
part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor
|
||||
definition to your build.
|
||||
|
||||
```
|
||||
[conf]
|
||||
tools.build:cxxflags=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
|
||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -994,11 +994,6 @@ if (tests)
|
||||
subdir: resource
|
||||
#]===============================]
|
||||
src/test/resource/Logic_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: rdb
|
||||
#]===============================]
|
||||
src/test/rdb/RelationalDatabase_test.cpp
|
||||
#[===============================[
|
||||
test sources:
|
||||
subdir: rpc
|
||||
|
||||
@@ -186,10 +186,6 @@ test.protocol > ripple.crypto
|
||||
test.protocol > ripple.json
|
||||
test.protocol > ripple.protocol
|
||||
test.protocol > test.toplevel
|
||||
test.rdb > ripple.app
|
||||
test.rdb > ripple.core
|
||||
test.rdb > test.jtx
|
||||
test.rdb > test.toplevel
|
||||
test.resource > ripple.basics
|
||||
test.resource > ripple.beast
|
||||
test.resource > ripple.resource
|
||||
|
||||
@@ -12,13 +12,6 @@ echo "-- GITHUB_REPOSITORY: $1"
|
||||
echo "-- GITHUB_SHA: $2"
|
||||
echo "-- GITHUB_RUN_NUMBER: $4"
|
||||
|
||||
# Use mounted filesystem for temp files to avoid container space limits
|
||||
export TMPDIR=/io/tmp
|
||||
export TEMP=/io/tmp
|
||||
export TMP=/io/tmp
|
||||
mkdir -p /io/tmp
|
||||
echo "=== Using temp directory: /io/tmp ==="
|
||||
|
||||
umask 0000;
|
||||
|
||||
cd /io/ &&
|
||||
@@ -50,17 +43,10 @@ export LDFLAGS="-static-libstdc++"
|
||||
git config --global --add safe.directory /io &&
|
||||
git checkout src/ripple/protocol/impl/BuildInfo.cpp &&
|
||||
sed -i s/\"0.0.0\"/\"$(date +%Y).$(date +%-m).$(date +%-d)-$(git rev-parse --abbrev-ref HEAD)$(if [ -n "$4" ]; then echo "+$4"; fi)\"/g src/ripple/protocol/impl/BuildInfo.cpp &&
|
||||
conan export external/snappy --version 1.1.10 --user xahaud --channel stable &&
|
||||
conan export external/soci --version 4.0.3 --user xahaud --channel stable &&
|
||||
conan export external/wasmedge --version 0.11.2 --user xahaud --channel stable &&
|
||||
conan export external/snappy snappy/1.1.10@xahaud/stable &&
|
||||
conan export external/soci soci/4.0.3@xahaud/stable &&
|
||||
cd release-build &&
|
||||
# Install dependencies - tool_requires in conanfile.py handles glibc 2.28 compatibility
|
||||
# for build tools (protoc, grpc plugins, b2) in HBB environment
|
||||
# The tool_requires('b2/5.3.2') in conanfile.py should force b2 to build from source
|
||||
# with the correct toolchain, avoiding the GLIBCXX_3.4.29 issue
|
||||
echo "=== Installing dependencies ===" &&
|
||||
conan install .. --output-folder . --build missing --settings build_type=$BUILD_TYPE \
|
||||
-o with_wasmedge=False -o tool_requires_b2=True &&
|
||||
conan install .. --output-folder . --build missing --settings build_type=$BUILD_TYPE &&
|
||||
cmake .. -G Ninja \
|
||||
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
@@ -70,13 +56,10 @@ cmake .. -G Ninja \
|
||||
-Dxrpld=TRUE \
|
||||
-Dtests=TRUE &&
|
||||
ccache -z &&
|
||||
ninja -j $3 && echo "=== Re-running final link with verbose output ===" && rm -f rippled && ninja -v rippled &&
|
||||
ninja -j $3 &&
|
||||
ccache -s &&
|
||||
strip -s rippled &&
|
||||
strip -s rippled &&
|
||||
mv rippled xahaud &&
|
||||
echo "=== Full ldd output ===" &&
|
||||
ldd xahaud &&
|
||||
echo "=== Running libcheck ===" &&
|
||||
libcheck xahaud &&
|
||||
echo "Build host: `hostname`" > release.info &&
|
||||
echo "Build date: `date`" >> release.info &&
|
||||
|
||||
@@ -1063,16 +1063,14 @@
|
||||
# RWDB is recommended for Validator and Peer nodes that are not required to
|
||||
# store history.
|
||||
#
|
||||
# Required keys for NuDB and RocksDB:
|
||||
# RWDB maintains its high speed regardless of the amount of history
|
||||
# stored. Online delete should NOT be used instead RWDB will use the
|
||||
# ledger_history config value to determine how many ledgers to keep in memory.
|
||||
#
|
||||
# Required keys for NuDB, RWDB and RocksDB:
|
||||
#
|
||||
# path Location to store the database
|
||||
#
|
||||
# Required keys for RWDB:
|
||||
#
|
||||
# online_delete Required. RWDB stores data in memory and will
|
||||
# grow unbounded without online_delete. See the
|
||||
# online_delete section below.
|
||||
#
|
||||
# Required keys for Cassandra:
|
||||
#
|
||||
# contact_points IP of a node in the Cassandra cluster
|
||||
@@ -1112,17 +1110,7 @@
|
||||
# if sufficient IOPS capacity is available.
|
||||
# Default 0.
|
||||
#
|
||||
# online_delete for RWDB, NuDB and RocksDB:
|
||||
#
|
||||
# online_delete Minimum value of 256. Enable automatic purging
|
||||
# of older ledger information. Maintain at least this
|
||||
# number of ledger records online. Must be greater
|
||||
# than or equal to ledger_history.
|
||||
#
|
||||
# REQUIRED for RWDB to prevent out-of-memory errors.
|
||||
# Optional for NuDB and RocksDB.
|
||||
#
|
||||
# Optional keys for NuDB and RocksDB:
|
||||
# Optional keys for NuDB or RocksDB:
|
||||
#
|
||||
# earliest_seq The default is 32570 to match the XRP ledger
|
||||
# network's earliest allowed sequence. Alternate
|
||||
@@ -1132,7 +1120,12 @@
|
||||
# it must be defined with the same value in both
|
||||
# sections.
|
||||
#
|
||||
|
||||
# online_delete Minimum value of 256. Enable automatic purging
|
||||
# of older ledger information. Maintain at least this
|
||||
# number of ledger records online. Must be greater
|
||||
# than or equal to ledger_history. If using RWDB
|
||||
# this value is ignored.
|
||||
#
|
||||
# These keys modify the behavior of online_delete, and thus are only
|
||||
# relevant if online_delete is defined and non-zero:
|
||||
#
|
||||
|
||||
103
conanfile.py
103
conanfile.py
@@ -21,20 +21,22 @@ class Xrpl(ConanFile):
|
||||
'static': [True, False],
|
||||
'tests': [True, False],
|
||||
'unity': [True, False],
|
||||
'with_wasmedge': [True, False],
|
||||
'tool_requires_b2': [True, False],
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.86.0',
|
||||
'date/3.0.1',
|
||||
'libarchive/3.6.0',
|
||||
'lz4/1.9.4',
|
||||
'lz4/1.9.3',
|
||||
'grpc/1.50.1',
|
||||
'nudb/2.0.8',
|
||||
'openssl/1.1.1u',
|
||||
'protobuf/3.21.12',
|
||||
'protobuf/3.21.9',
|
||||
'snappy/1.1.10@xahaud/stable',
|
||||
'soci/4.0.3@xahaud/stable',
|
||||
'zlib/1.3.1',
|
||||
'sqlite3/3.42.0',
|
||||
'zlib/1.2.13',
|
||||
'wasmedge/0.11.2',
|
||||
]
|
||||
|
||||
default_options = {
|
||||
@@ -48,44 +50,42 @@ class Xrpl(ConanFile):
|
||||
'static': True,
|
||||
'tests': True,
|
||||
'unity': False,
|
||||
'with_wasmedge': True,
|
||||
'tool_requires_b2': False,
|
||||
|
||||
'cassandra-cpp-driver/*:shared': False,
|
||||
'date/*:header_only': True,
|
||||
'grpc/*:shared': False,
|
||||
'grpc/*:secure': True,
|
||||
'libarchive/*:shared': False,
|
||||
'libarchive/*:with_acl': False,
|
||||
'libarchive/*:with_bzip2': False,
|
||||
'libarchive/*:with_cng': False,
|
||||
'libarchive/*:with_expat': False,
|
||||
'libarchive/*:with_iconv': False,
|
||||
'libarchive/*:with_libxml2': False,
|
||||
'libarchive/*:with_lz4': True,
|
||||
'libarchive/*:with_lzma': False,
|
||||
'libarchive/*:with_lzo': False,
|
||||
'libarchive/*:with_nettle': False,
|
||||
'libarchive/*:with_openssl': False,
|
||||
'libarchive/*:with_pcreposix': False,
|
||||
'libarchive/*:with_xattr': False,
|
||||
'libarchive/*:with_zlib': False,
|
||||
'libpq/*:shared': False,
|
||||
'lz4/*:shared': False,
|
||||
'openssl/*:shared': False,
|
||||
'protobuf/*:shared': False,
|
||||
'protobuf/*:with_zlib': True,
|
||||
'rocksdb/*:enable_sse': False,
|
||||
'rocksdb/*:lite': False,
|
||||
'rocksdb/*:shared': False,
|
||||
'rocksdb/*:use_rtti': True,
|
||||
'rocksdb/*:with_jemalloc': False,
|
||||
'rocksdb/*:with_lz4': True,
|
||||
'rocksdb/*:with_snappy': True,
|
||||
'snappy/*:shared': False,
|
||||
'soci/*:shared': False,
|
||||
'soci/*:with_sqlite3': True,
|
||||
'soci/*:with_boost': True,
|
||||
'cassandra-cpp-driver:shared': False,
|
||||
'date:header_only': True,
|
||||
'grpc:shared': False,
|
||||
'grpc:secure': True,
|
||||
'libarchive:shared': False,
|
||||
'libarchive:with_acl': False,
|
||||
'libarchive:with_bzip2': False,
|
||||
'libarchive:with_cng': False,
|
||||
'libarchive:with_expat': False,
|
||||
'libarchive:with_iconv': False,
|
||||
'libarchive:with_libxml2': False,
|
||||
'libarchive:with_lz4': True,
|
||||
'libarchive:with_lzma': False,
|
||||
'libarchive:with_lzo': False,
|
||||
'libarchive:with_nettle': False,
|
||||
'libarchive:with_openssl': False,
|
||||
'libarchive:with_pcreposix': False,
|
||||
'libarchive:with_xattr': False,
|
||||
'libarchive:with_zlib': False,
|
||||
'libpq:shared': False,
|
||||
'lz4:shared': False,
|
||||
'openssl:shared': False,
|
||||
'protobuf:shared': False,
|
||||
'protobuf:with_zlib': True,
|
||||
'rocksdb:enable_sse': False,
|
||||
'rocksdb:lite': False,
|
||||
'rocksdb:shared': False,
|
||||
'rocksdb:use_rtti': True,
|
||||
'rocksdb:with_jemalloc': False,
|
||||
'rocksdb:with_lz4': True,
|
||||
'rocksdb:with_snappy': True,
|
||||
'snappy:shared': False,
|
||||
'soci:shared': False,
|
||||
'soci:with_sqlite3': True,
|
||||
'soci:with_boost': True,
|
||||
}
|
||||
|
||||
def set_version(self):
|
||||
@@ -96,28 +96,11 @@ class Xrpl(ConanFile):
|
||||
match = next(m for m in matches if m)
|
||||
self.version = match.group(1)
|
||||
|
||||
def build_requirements(self):
|
||||
# These provide build tools (protoc, grpc plugins) that run during build
|
||||
self.tool_requires('protobuf/3.21.12')
|
||||
self.tool_requires('grpc/1.50.1')
|
||||
# Explicitly require b2 (e.g. for building from source for glibc compatibility)
|
||||
if self.options.tool_requires_b2:
|
||||
self.tool_requires('b2/5.3.2')
|
||||
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
self.options['boost/*'].visibility = 'global'
|
||||
self.options['boost'].visibility = 'global'
|
||||
|
||||
def requirements(self):
|
||||
# Force sqlite3 version to avoid conflicts with soci
|
||||
self.requires('sqlite3/3.42.0', override=True)
|
||||
# Force our custom snappy build for all dependencies
|
||||
self.requires('snappy/1.1.10@xahaud/stable', override=True)
|
||||
# Force boost version for all dependencies to avoid conflicts
|
||||
self.requires('boost/1.86.0', override=True)
|
||||
|
||||
if self.options.with_wasmedge:
|
||||
self.requires('wasmedge/0.11.2@xahaud/stable')
|
||||
if self.options.jemalloc:
|
||||
self.requires('jemalloc/5.2.1')
|
||||
if self.options.reporting:
|
||||
|
||||
11
external/wasmedge/conanfile.py
vendored
11
external/wasmedge/conanfile.py
vendored
@@ -38,15 +38,8 @@ class WasmedgeConan(ConanFile):
|
||||
raise ConanInvalidConfiguration("Binaries for this combination of version/os/arch/compiler are not available")
|
||||
|
||||
def package_id(self):
|
||||
# Make binary compatible across compiler versions (since we're downloading prebuilt)
|
||||
self.info.settings.rm_safe("compiler.version")
|
||||
# Group compilers by their binary compatibility
|
||||
# Note: We must use self.info.settings here, not self.settings (forbidden in Conan 2)
|
||||
compiler_name = str(self.info.settings.compiler)
|
||||
if compiler_name in ["Visual Studio", "msvc"]:
|
||||
self.info.settings.compiler = "Visual Studio"
|
||||
else:
|
||||
self.info.settings.compiler = "gcc"
|
||||
del self.info.settings.compiler.version
|
||||
self.info.settings.compiler = self._compiler_alias
|
||||
|
||||
def build(self):
|
||||
# This is packaging binaries so the download needs to be in build
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
#!/bin/bash
|
||||
#!/bin/bash
|
||||
# We use set -e and bash with -u to bail on first non zero exit code of any
|
||||
# processes launched or upon any unbound variable.
|
||||
# We use set -x to print commands before running them to help with
|
||||
# debugging.
|
||||
|
||||
set -ex
|
||||
|
||||
echo "START BUILDING (HOST)"
|
||||
|
||||
echo "Cleaning previously built binary"
|
||||
@@ -92,37 +90,29 @@ RUN /hbb_exe/activate-exec bash -c "dnf install -y epel-release && \
|
||||
llvm14-static llvm14-devel && \
|
||||
dnf clean all"
|
||||
|
||||
# Install Conan 2 and CMake
|
||||
RUN /hbb_exe/activate-exec pip3 install "conan>=2.0,<3.0" && \
|
||||
# Install Conan and CMake
|
||||
RUN /hbb_exe/activate-exec pip3 install "conan==1.66.0" && \
|
||||
/hbb_exe/activate-exec wget -q https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1-linux-x86_64.tar.gz -O cmake.tar.gz && \
|
||||
mkdir cmake && \
|
||||
tar -xzf cmake.tar.gz --strip-components=1 -C cmake && \
|
||||
rm cmake.tar.gz
|
||||
|
||||
# Dual Boost configuration in HBB environment:
|
||||
# - Manual Boost in /usr/local (minimal: for WasmEdge which is pre-built in Docker)
|
||||
# - Conan Boost (full: for the application and all dependencies via toolchain)
|
||||
#
|
||||
# Install minimal Boost 1.86.0 for WasmEdge only (filesystem and its dependencies)
|
||||
# The main application will use Conan-provided Boost for all other components
|
||||
# IMPORTANT: Understanding Boost linking options:
|
||||
# - link=static: Creates static Boost libraries (.a files) instead of shared (.so files)
|
||||
# - runtime-link=shared: Links Boost libraries against shared libc (glibc)
|
||||
# WasmEdge only needs boost::filesystem and boost::system
|
||||
RUN /hbb_exe/activate-exec bash -c "echo 'Boost cache bust: v5-minimal' && \
|
||||
rm -rf /usr/local/lib/libboost* /usr/local/include/boost && \
|
||||
cd /tmp && \
|
||||
# Install Boost 1.86.0
|
||||
RUN /hbb_exe/activate-exec bash -c "cd /tmp && \
|
||||
wget -q https://archives.boost.io/release/1.86.0/source/boost_1_86_0.tar.gz -O boost.tar.gz && \
|
||||
mkdir boost && \
|
||||
tar -xzf boost.tar.gz --strip-components=1 -C boost && \
|
||||
cd boost && \
|
||||
./bootstrap.sh && \
|
||||
./b2 install \
|
||||
link=static runtime-link=shared -j${BUILD_CORES} \
|
||||
--with-filesystem --with-system && \
|
||||
./b2 link=static -j${BUILD_CORES} && \
|
||||
./b2 install && \
|
||||
cd /tmp && \
|
||||
rm -rf boost boost.tar.gz"
|
||||
|
||||
ENV BOOST_ROOT=/usr/local/src/boost_1_86_0
|
||||
ENV Boost_LIBRARY_DIRS=/usr/local/lib
|
||||
ENV BOOST_INCLUDEDIR=/usr/local/src/boost_1_86_0
|
||||
|
||||
ENV CMAKE_EXE_LINKER_FLAGS="-static-libstdc++"
|
||||
|
||||
ENV LLVM_DIR=/usr/lib64/llvm14/lib/cmake/llvm
|
||||
@@ -165,10 +155,6 @@ RUN cd /tmp && \
|
||||
cd build && \
|
||||
/hbb_exe/activate-exec bash -c "source /opt/rh/gcc-toolset-11/enable && \
|
||||
ln -sf /opt/rh/gcc-toolset-11/root/usr/bin/ar /usr/bin/ar && \
|
||||
ln -sf /opt/rh/gcc-toolset-11/root/usr/bin/ranlib /usr/bin/ranlib && \
|
||||
echo '=== Binutils version check ===' && \
|
||||
ar --version | head -1 && \
|
||||
ranlib --version | head -1 && \
|
||||
cmake .. \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/local \
|
||||
@@ -190,28 +176,14 @@ RUN cd /tmp && \
|
||||
# Set environment variables
|
||||
ENV PATH=/usr/local/bin:$PATH
|
||||
|
||||
# Configure ccache and Conan 2
|
||||
# NOTE: Using echo commands instead of heredocs because heredocs in Docker RUN commands are finnicky
|
||||
# Configure ccache and Conan
|
||||
RUN /hbb_exe/activate-exec bash -c "ccache -M 10G && \
|
||||
ccache -o cache_dir=/cache/ccache && \
|
||||
ccache -o compiler_check=content && \
|
||||
mkdir -p ~/.conan2 /cache/conan2 /cache/conan2_download /cache/conan2_sources && \
|
||||
echo 'core.cache:storage_path=/cache/conan2' > ~/.conan2/global.conf && \
|
||||
echo 'core.download:download_cache=/cache/conan2_download' >> ~/.conan2/global.conf && \
|
||||
echo 'core.sources:download_cache=/cache/conan2_sources' >> ~/.conan2/global.conf && \
|
||||
conan profile detect --force && \
|
||||
echo '[settings]' > ~/.conan2/profiles/default && \
|
||||
echo 'arch=x86_64' >> ~/.conan2/profiles/default && \
|
||||
echo 'build_type=Release' >> ~/.conan2/profiles/default && \
|
||||
echo 'compiler=gcc' >> ~/.conan2/profiles/default && \
|
||||
echo 'compiler.cppstd=20' >> ~/.conan2/profiles/default && \
|
||||
echo 'compiler.libcxx=libstdc++11' >> ~/.conan2/profiles/default && \
|
||||
echo 'compiler.version=11' >> ~/.conan2/profiles/default && \
|
||||
echo 'os=Linux' >> ~/.conan2/profiles/default && \
|
||||
echo '' >> ~/.conan2/profiles/default && \
|
||||
echo '[conf]' >> ~/.conan2/profiles/default && \
|
||||
echo '# Force building from source for packages with binary compatibility issues' >> ~/.conan2/profiles/default && \
|
||||
echo '*:tools.system.package_manager:mode=build' >> ~/.conan2/profiles/default"
|
||||
conan config set storage.path=/cache/conan && \
|
||||
(conan profile new default --detect || true) && \
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default && \
|
||||
conan profile update settings.compiler.cppstd=20 default"
|
||||
|
||||
DOCKERFILE_EOF
|
||||
)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
||||
@@ -3150,15 +3150,15 @@ DEFINE_HOOK_FUNCTION(
|
||||
if (a == 0 || b == 0 || c == 0 || d == 0 || e == 0 || f == 0)
|
||||
return INVALID_ARGUMENT;
|
||||
|
||||
uint32_t acc1_ptr = a, acc1_len = b, acc2_ptr = c, acc2_len = d,
|
||||
uint32_t hi_ptr = a, hi_len = b, lo_ptr = c, lo_len = d,
|
||||
cu_ptr = e, cu_len = f;
|
||||
|
||||
if (NOT_IN_BOUNDS(acc1_ptr, acc1_len, memory_length) ||
|
||||
NOT_IN_BOUNDS(acc2_ptr, acc2_len, memory_length) ||
|
||||
if (NOT_IN_BOUNDS(hi_ptr, hi_len, memory_length) ||
|
||||
NOT_IN_BOUNDS(lo_ptr, lo_len, memory_length) ||
|
||||
NOT_IN_BOUNDS(cu_ptr, cu_len, memory_length))
|
||||
return OUT_OF_BOUNDS;
|
||||
|
||||
if (acc1_len != 20 || acc2_len != 20)
|
||||
if (hi_len != 20 || lo_len != 20)
|
||||
return INVALID_ARGUMENT;
|
||||
|
||||
std::optional<Currency> cur =
|
||||
@@ -3167,8 +3167,8 @@ DEFINE_HOOK_FUNCTION(
|
||||
return INVALID_ARGUMENT;
|
||||
|
||||
auto kl = ripple::keylet::line(
|
||||
AccountID::fromVoid(memory + acc1_ptr),
|
||||
AccountID::fromVoid(memory + acc2_ptr),
|
||||
AccountID::fromVoid(memory + hi_ptr),
|
||||
AccountID::fromVoid(memory + lo_ptr),
|
||||
*cur);
|
||||
return serialize_keylet(kl, memory, write_ptr, write_len);
|
||||
}
|
||||
|
||||
@@ -28,8 +28,8 @@ private:
|
||||
|
||||
struct AccountTxData
|
||||
{
|
||||
std::map<uint32_t, std::vector<AccountTx>>
|
||||
ledgerTxMap; // ledgerSeq -> vector of transactions
|
||||
std::map<uint32_t, AccountTxs>
|
||||
ledgerTxMap; // ledgerSeq -> vector of AccountTx
|
||||
};
|
||||
|
||||
Application& app_;
|
||||
@@ -304,6 +304,46 @@ public:
|
||||
// Overwrite Current Ledger
|
||||
ledgers_[seq] = std::move(ledgerData);
|
||||
ledgerHashToSeq_[ledger->info().hash] = seq;
|
||||
|
||||
// Automatic cleanup based on LEDGER_HISTORY (ported from
|
||||
// FlatmapDatabase)
|
||||
if (current)
|
||||
{
|
||||
auto const cutoffSeq =
|
||||
ledger->info().seq > app_.config().LEDGER_HISTORY
|
||||
? ledger->info().seq - app_.config().LEDGER_HISTORY
|
||||
: 0;
|
||||
|
||||
if (cutoffSeq > 0)
|
||||
{
|
||||
// Delete old ledgers before cutoff
|
||||
auto it = ledgers_.begin();
|
||||
while (it != ledgers_.end() && it->first < cutoffSeq)
|
||||
{
|
||||
// Clean up transactions from this ledger
|
||||
for (const auto& [txHash, _] : it->second.transactions)
|
||||
{
|
||||
transactionMap_.erase(txHash);
|
||||
}
|
||||
ledgerHashToSeq_.erase(it->second.info.hash);
|
||||
it = ledgers_.erase(it);
|
||||
}
|
||||
|
||||
// Clean up account transactions before cutoff
|
||||
for (auto& [_, accountData] : accountTxMap_)
|
||||
{
|
||||
auto txIt = accountData.ledgerTxMap.begin();
|
||||
while (txIt != accountData.ledgerTxMap.end() &&
|
||||
txIt->first < cutoffSeq)
|
||||
{
|
||||
txIt = accountData.ledgerTxMap.erase(txIt);
|
||||
}
|
||||
}
|
||||
|
||||
app_.getLedgerMaster().clearPriorLedgers(cutoffSeq);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -445,108 +485,55 @@ public:
|
||||
return true; // In-memory database always has space
|
||||
}
|
||||
|
||||
// Red-black tree node overhead per map entry
|
||||
static constexpr size_t MAP_NODE_OVERHEAD = 40;
|
||||
|
||||
private:
|
||||
std::uint64_t
|
||||
getBytesUsedLedger_unlocked() const
|
||||
{
|
||||
std::uint64_t size = 0;
|
||||
|
||||
// Count structural overhead of ledger storage including map node
|
||||
// overhead Note: sizeof(LedgerData) includes the map container for
|
||||
// transactions, but not the actual transaction data
|
||||
size += ledgers_.size() *
|
||||
(sizeof(LedgerIndex) + sizeof(LedgerData) + MAP_NODE_OVERHEAD);
|
||||
|
||||
// Add the transaction map nodes inside each ledger (ledger's view of
|
||||
// its transactions)
|
||||
for (const auto& [_, ledgerData] : ledgers_)
|
||||
{
|
||||
size += ledgerData.transactions.size() *
|
||||
(sizeof(uint256) + sizeof(AccountTx) + MAP_NODE_OVERHEAD);
|
||||
}
|
||||
|
||||
// Count the ledger hash to sequence lookup map
|
||||
size += ledgerHashToSeq_.size() *
|
||||
(sizeof(uint256) + sizeof(LedgerIndex) + MAP_NODE_OVERHEAD);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
std::uint64_t
|
||||
getBytesUsedTransaction_unlocked() const
|
||||
{
|
||||
if (!useTxTables_)
|
||||
return 0;
|
||||
|
||||
std::uint64_t size = 0;
|
||||
|
||||
// Count structural overhead of transaction map
|
||||
// sizeof(AccountTx) is just the size of two shared_ptrs (~32 bytes)
|
||||
size += transactionMap_.size() *
|
||||
(sizeof(uint256) + sizeof(AccountTx) + MAP_NODE_OVERHEAD);
|
||||
|
||||
// Add actual transaction and metadata data sizes
|
||||
for (const auto& [_, accountTx] : transactionMap_)
|
||||
{
|
||||
if (accountTx.first)
|
||||
size += accountTx.first->getSTransaction()
|
||||
->getSerializer()
|
||||
.peekData()
|
||||
.size();
|
||||
if (accountTx.second)
|
||||
size += accountTx.second->getAsObject()
|
||||
.getSerializer()
|
||||
.peekData()
|
||||
.size();
|
||||
}
|
||||
|
||||
// Count structural overhead of account transaction index
|
||||
// The actual transaction data is already counted above from
|
||||
// transactionMap_
|
||||
for (const auto& [accountId, accountData] : accountTxMap_)
|
||||
{
|
||||
size +=
|
||||
sizeof(accountId) + sizeof(AccountTxData) + MAP_NODE_OVERHEAD;
|
||||
for (const auto& [ledgerSeq, txVector] : accountData.ledgerTxMap)
|
||||
{
|
||||
// Use capacity() to account for actual allocated memory
|
||||
size += sizeof(ledgerSeq) + MAP_NODE_OVERHEAD;
|
||||
size += txVector.capacity() * sizeof(AccountTx);
|
||||
}
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
public:
|
||||
std::uint32_t
|
||||
getKBUsedAll() override
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> lock(mutex_);
|
||||
|
||||
// Total = base object + ledger infrastructure + transaction data
|
||||
std::uint64_t size = sizeof(*this) + getBytesUsedLedger_unlocked() +
|
||||
getBytesUsedTransaction_unlocked();
|
||||
|
||||
return static_cast<std::uint32_t>(size / 1024);
|
||||
std::uint32_t size = sizeof(*this);
|
||||
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
|
||||
size +=
|
||||
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
|
||||
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
|
||||
for (const auto& [_, accountData] : accountTxMap_)
|
||||
{
|
||||
size += sizeof(AccountID) + sizeof(AccountTxData);
|
||||
for (const auto& [_, txVector] : accountData.ledgerTxMap)
|
||||
{
|
||||
size += sizeof(uint32_t) + txVector.size() * sizeof(AccountTx);
|
||||
}
|
||||
}
|
||||
return size / 1024;
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
getKBUsedLedger() override
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> lock(mutex_);
|
||||
return static_cast<std::uint32_t>(getBytesUsedLedger_unlocked() / 1024);
|
||||
std::uint32_t size = 0;
|
||||
size += ledgers_.size() * (sizeof(LedgerIndex) + sizeof(LedgerData));
|
||||
size +=
|
||||
ledgerHashToSeq_.size() * (sizeof(uint256) + sizeof(LedgerIndex));
|
||||
return size / 1024;
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
getKBUsedTransaction() override
|
||||
{
|
||||
if (!useTxTables_)
|
||||
return 0;
|
||||
|
||||
std::shared_lock<std::shared_mutex> lock(mutex_);
|
||||
return static_cast<std::uint32_t>(
|
||||
getBytesUsedTransaction_unlocked() / 1024);
|
||||
std::uint32_t size = 0;
|
||||
size += transactionMap_.size() * (sizeof(uint256) + sizeof(AccountTx));
|
||||
for (const auto& [_, accountData] : accountTxMap_)
|
||||
{
|
||||
size += sizeof(AccountID) + sizeof(AccountTxData);
|
||||
for (const auto& [_, txVector] : accountData.ledgerTxMap)
|
||||
{
|
||||
size += sizeof(uint32_t) + txVector.size() * sizeof(AccountTx);
|
||||
}
|
||||
}
|
||||
return size / 1024;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -699,7 +686,7 @@ public:
|
||||
++skipped;
|
||||
continue;
|
||||
}
|
||||
AccountTx const accountTx = *innerRIt;
|
||||
const AccountTx& accountTx = *innerRIt;
|
||||
std::uint32_t const inLedger = rangeCheckedCast<std::uint32_t>(
|
||||
accountTx.second->getLgrSeq());
|
||||
accountTx.first->setLedger(inLedger);
|
||||
@@ -856,9 +843,9 @@ public:
|
||||
for (; txIt != txEnd; ++txIt)
|
||||
{
|
||||
std::uint32_t const ledgerSeq = txIt->first;
|
||||
std::uint32_t txnSeq = 0;
|
||||
for (const auto& accountTx : txIt->second)
|
||||
for (size_t txnSeq = 0; txnSeq < txIt->second.size(); ++txnSeq)
|
||||
{
|
||||
const auto& accountTx = txIt->second[txnSeq];
|
||||
if (lookingForMarker)
|
||||
{
|
||||
if (findLedger == ledgerSeq && findSeq == txnSeq)
|
||||
@@ -866,15 +853,13 @@ public:
|
||||
lookingForMarker = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
++txnSeq;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else if (numberOfResults == 0)
|
||||
{
|
||||
newmarker = {
|
||||
rangeCheckedCast<std::uint32_t>(ledgerSeq), txnSeq};
|
||||
rangeCheckedCast<std::uint32_t>(ledgerSeq),
|
||||
static_cast<std::uint32_t>(txnSeq)};
|
||||
return {newmarker, total};
|
||||
}
|
||||
|
||||
@@ -895,7 +880,6 @@ public:
|
||||
std::move(rawMeta));
|
||||
--numberOfResults;
|
||||
++total;
|
||||
++txnSeq;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -911,11 +895,10 @@ public:
|
||||
for (; rtxIt != rtxEnd; ++rtxIt)
|
||||
{
|
||||
std::uint32_t const ledgerSeq = rtxIt->first;
|
||||
std::uint32_t txnSeq = rtxIt->second.size() - 1;
|
||||
for (auto innerRIt = rtxIt->second.rbegin();
|
||||
innerRIt != rtxIt->second.rend();
|
||||
++innerRIt)
|
||||
for (int txnSeq = rtxIt->second.size() - 1; txnSeq >= 0;
|
||||
--txnSeq)
|
||||
{
|
||||
const auto& accountTx = rtxIt->second[txnSeq];
|
||||
if (lookingForMarker)
|
||||
{
|
||||
if (findLedger == ledgerSeq && findSeq == txnSeq)
|
||||
@@ -923,19 +906,16 @@ public:
|
||||
lookingForMarker = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
--txnSeq;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else if (numberOfResults == 0)
|
||||
{
|
||||
newmarker = {
|
||||
rangeCheckedCast<std::uint32_t>(ledgerSeq), txnSeq};
|
||||
rangeCheckedCast<std::uint32_t>(ledgerSeq),
|
||||
static_cast<std::uint32_t>(txnSeq)};
|
||||
return {newmarker, total};
|
||||
}
|
||||
|
||||
const auto& accountTx = *innerRIt;
|
||||
Blob rawTxn = accountTx.first->getSTransaction()
|
||||
->getSerializer()
|
||||
.peekData();
|
||||
@@ -953,7 +933,6 @@ public:
|
||||
std::move(rawMeta));
|
||||
--numberOfResults;
|
||||
++total;
|
||||
--txnSeq;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
|
||||
namespace ripple {
|
||||
namespace detail {
|
||||
|
||||
[[nodiscard]] std::uint64_t
|
||||
getMemorySize()
|
||||
{
|
||||
@@ -53,6 +54,7 @@ getMemorySize()
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace ripple
|
||||
#endif
|
||||
@@ -62,6 +64,7 @@ getMemorySize()
|
||||
|
||||
namespace ripple {
|
||||
namespace detail {
|
||||
|
||||
[[nodiscard]] std::uint64_t
|
||||
getMemorySize()
|
||||
{
|
||||
@@ -70,6 +73,7 @@ getMemorySize()
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace ripple
|
||||
|
||||
@@ -81,6 +85,7 @@ getMemorySize()
|
||||
|
||||
namespace ripple {
|
||||
namespace detail {
|
||||
|
||||
[[nodiscard]] std::uint64_t
|
||||
getMemorySize()
|
||||
{
|
||||
@@ -93,11 +98,13 @@ getMemorySize()
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace ripple
|
||||
#endif
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// clang-format off
|
||||
// The configurable node sizes are "tiny", "small", "medium", "large", "huge"
|
||||
inline constexpr std::array<std::pair<SizedItem, std::array<int, 5>>, 13>
|
||||
@@ -1000,23 +1007,6 @@ Config::loadFromString(std::string const& fileContents)
|
||||
"the maximum number of allowed peers (peers_max)");
|
||||
}
|
||||
}
|
||||
|
||||
if (!RUN_STANDALONE)
|
||||
{
|
||||
auto db_section = section(ConfigSection::nodeDatabase());
|
||||
if (auto type = get(db_section, "type", ""); type == "rwdb")
|
||||
{
|
||||
if (auto delete_interval = get(db_section, "online_delete", 0);
|
||||
delete_interval == 0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"RWDB (in-memory backend) requires online_delete to "
|
||||
"prevent OOM "
|
||||
"Exception: standalone mode (used by tests) doesn't need "
|
||||
"online_delete");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
boost::filesystem::path
|
||||
@@ -1081,4 +1071,5 @@ setup_FeeVote(Section const& section)
|
||||
}
|
||||
return setup;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -321,7 +321,7 @@ public:
|
||||
HSFEE,
|
||||
ter(temMALFORMED));
|
||||
|
||||
env(ripple::test::jtx::hook(alice, std::vector<Json::Value>{}, 0),
|
||||
env(ripple::test::jtx::hook(alice, {{}}, 0),
|
||||
M("Must have a non-empty hooks field"),
|
||||
HSFEE,
|
||||
ter(temMALFORMED));
|
||||
|
||||
@@ -1206,97 +1206,6 @@ r.ripple.com:51235
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testRWDBOnlineDelete()
|
||||
{
|
||||
testcase("RWDB online_delete validation");
|
||||
|
||||
// Test 1: RWDB without online_delete in standalone mode (should
|
||||
// succeed)
|
||||
{
|
||||
Config c;
|
||||
std::string toLoad =
|
||||
"[node_db]\n"
|
||||
"type=rwdb\n"
|
||||
"path=main\n";
|
||||
c.setupControl(true, true, true); // standalone = true
|
||||
try
|
||||
{
|
||||
c.loadFromString(toLoad);
|
||||
pass(); // Should succeed
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
fail("Should not throw in standalone mode");
|
||||
}
|
||||
}
|
||||
|
||||
// Test 2: RWDB without online_delete NOT in standalone mode (should
|
||||
// throw)
|
||||
{
|
||||
Config c;
|
||||
std::string toLoad =
|
||||
"[node_db]\n"
|
||||
"type=rwdb\n"
|
||||
"path=main\n";
|
||||
c.setupControl(true, true, false); // standalone = false
|
||||
try
|
||||
{
|
||||
c.loadFromString(toLoad);
|
||||
fail("Expected exception for RWDB without online_delete");
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
std::string(e.what()).find(
|
||||
"RWDB (in-memory backend) requires online_delete") !=
|
||||
std::string::npos);
|
||||
pass();
|
||||
}
|
||||
}
|
||||
|
||||
// Test 3: RWDB with online_delete NOT in standalone mode (should
|
||||
// succeed)
|
||||
{
|
||||
Config c;
|
||||
std::string toLoad =
|
||||
"[node_db]\n"
|
||||
"type=rwdb\n"
|
||||
"path=main\n"
|
||||
"online_delete=256\n";
|
||||
c.setupControl(true, true, false); // standalone = false
|
||||
try
|
||||
{
|
||||
c.loadFromString(toLoad);
|
||||
pass(); // Should succeed
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
fail("Should not throw when online_delete is configured");
|
||||
}
|
||||
}
|
||||
|
||||
// Test 4: Non-RWDB without online_delete NOT in standalone mode (should
|
||||
// succeed)
|
||||
{
|
||||
Config c;
|
||||
std::string toLoad =
|
||||
"[node_db]\n"
|
||||
"type=NuDB\n"
|
||||
"path=main\n";
|
||||
c.setupControl(true, true, false); // standalone = false
|
||||
try
|
||||
{
|
||||
c.loadFromString(toLoad);
|
||||
pass(); // Should succeed
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
fail("Should not throw for non-RWDB backends");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testOverlay()
|
||||
{
|
||||
@@ -1386,7 +1295,6 @@ r.ripple.com:51235
|
||||
testComments();
|
||||
testGetters();
|
||||
testAmendment();
|
||||
testRWDBOnlineDelete();
|
||||
testOverlay();
|
||||
testNetworkID();
|
||||
}
|
||||
|
||||
@@ -1,756 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2025 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/app/rdb/RelationalDatabase.h>
|
||||
#include <ripple/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <chrono>
|
||||
#include <test/jtx.h>
|
||||
#include <test/jtx/envconfig.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
class RelationalDatabase_test : public beast::unit_test::suite
|
||||
{
|
||||
private:
|
||||
// Helper to get SQLiteDatabase* (works for both SQLite and RWDB since RWDB
|
||||
// inherits from SQLiteDatabase)
|
||||
static SQLiteDatabase*
|
||||
getInterface(Application& app)
|
||||
{
|
||||
return dynamic_cast<SQLiteDatabase*>(&app.getRelationalDatabase());
|
||||
}
|
||||
|
||||
static SQLiteDatabase*
|
||||
getInterface(RelationalDatabase& db)
|
||||
{
|
||||
return dynamic_cast<SQLiteDatabase*>(&db);
|
||||
}
|
||||
|
||||
static std::unique_ptr<Config>
|
||||
makeConfig(std::string const& backend)
|
||||
{
|
||||
auto config = test::jtx::envconfig();
|
||||
// Sqlite backend doesn't need a database_path as it will just use
|
||||
// in-memory databases when in standalone mode anyway.
|
||||
config->overwrite(SECTION_RELATIONAL_DB, "backend", backend);
|
||||
return config;
|
||||
}
|
||||
|
||||
public:
|
||||
RelationalDatabase_test() = default;
|
||||
|
||||
void
|
||||
testBasicInitialization(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Basic initialization and empty database - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
// Test empty database state
|
||||
BEAST_EXPECT(db.getMinLedgerSeq() == 2);
|
||||
BEAST_EXPECT(db.getMaxLedgerSeq() == 2);
|
||||
BEAST_EXPECT(db.getNewestLedgerInfo()->seq == 2);
|
||||
|
||||
auto* sqliteDb = getInterface(db);
|
||||
BEAST_EXPECT(sqliteDb != nullptr);
|
||||
|
||||
if (sqliteDb)
|
||||
{
|
||||
BEAST_EXPECT(!sqliteDb->getTransactionsMinLedgerSeq().has_value());
|
||||
BEAST_EXPECT(
|
||||
!sqliteDb->getAccountTransactionsMinLedgerSeq().has_value());
|
||||
|
||||
auto ledgerCount = sqliteDb->getLedgerCountMinMax();
|
||||
BEAST_EXPECT(ledgerCount.numberOfRows == 1);
|
||||
BEAST_EXPECT(ledgerCount.minLedgerSequence == 2);
|
||||
BEAST_EXPECT(ledgerCount.maxLedgerSequence == 2);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerSequenceOperations(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Ledger sequence operations - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
config->LEDGER_HISTORY = 1000;
|
||||
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
// Create initial ledger
|
||||
Account alice("alice");
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
// Test basic sequence operations
|
||||
auto minSeq = db.getMinLedgerSeq();
|
||||
auto maxSeq = db.getMaxLedgerSeq();
|
||||
|
||||
BEAST_EXPECT(minSeq.has_value());
|
||||
BEAST_EXPECT(maxSeq.has_value());
|
||||
BEAST_EXPECT(*minSeq == 2);
|
||||
BEAST_EXPECT(*maxSeq == 3);
|
||||
|
||||
// Create more ledgers
|
||||
env(pay(alice, Account("bob"), XRP(1000)));
|
||||
env.close();
|
||||
|
||||
env(pay(alice, Account("carol"), XRP(500)));
|
||||
env.close();
|
||||
|
||||
// Verify sequence updates
|
||||
minSeq = db.getMinLedgerSeq();
|
||||
maxSeq = db.getMaxLedgerSeq();
|
||||
|
||||
BEAST_EXPECT(*minSeq == 2);
|
||||
BEAST_EXPECT(*maxSeq == 5);
|
||||
|
||||
auto* sqliteDb = getInterface(db);
|
||||
if (sqliteDb)
|
||||
{
|
||||
auto ledgerCount = sqliteDb->getLedgerCountMinMax();
|
||||
BEAST_EXPECT(ledgerCount.numberOfRows == 4);
|
||||
BEAST_EXPECT(ledgerCount.minLedgerSequence == 2);
|
||||
BEAST_EXPECT(ledgerCount.maxLedgerSequence == 5);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testLedgerInfoOperations(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Ledger info retrieval operations - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
config->LEDGER_HISTORY = 1000;
|
||||
|
||||
Env env(*this, std::move(config));
|
||||
auto* db = getInterface(env.app());
|
||||
|
||||
Account alice("alice");
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
// Test getNewestLedgerInfo
|
||||
auto newestLedger = db->getNewestLedgerInfo();
|
||||
BEAST_EXPECT(newestLedger.has_value());
|
||||
BEAST_EXPECT(newestLedger->seq == 3);
|
||||
|
||||
// Test getLedgerInfoByIndex
|
||||
auto ledgerByIndex = db->getLedgerInfoByIndex(3);
|
||||
BEAST_EXPECT(ledgerByIndex.has_value());
|
||||
BEAST_EXPECT(ledgerByIndex->seq == 3);
|
||||
BEAST_EXPECT(ledgerByIndex->hash == newestLedger->hash);
|
||||
|
||||
// Test getLedgerInfoByHash
|
||||
auto ledgerByHash = db->getLedgerInfoByHash(newestLedger->hash);
|
||||
BEAST_EXPECT(ledgerByHash.has_value());
|
||||
BEAST_EXPECT(ledgerByHash->seq == 3);
|
||||
BEAST_EXPECT(ledgerByHash->hash == newestLedger->hash);
|
||||
|
||||
// Test getLimitedOldestLedgerInfo
|
||||
auto oldestLedger = db->getLimitedOldestLedgerInfo(2);
|
||||
BEAST_EXPECT(oldestLedger.has_value());
|
||||
BEAST_EXPECT(oldestLedger->seq == 2);
|
||||
|
||||
// Test getLimitedNewestLedgerInfo
|
||||
auto limitedNewest = db->getLimitedNewestLedgerInfo(2);
|
||||
BEAST_EXPECT(limitedNewest.has_value());
|
||||
BEAST_EXPECT(limitedNewest->seq == 3);
|
||||
|
||||
// Test invalid queries
|
||||
auto invalidLedger = db->getLedgerInfoByIndex(999);
|
||||
BEAST_EXPECT(!invalidLedger.has_value());
|
||||
|
||||
uint256 invalidHash;
|
||||
auto invalidHashLedger = db->getLedgerInfoByHash(invalidHash);
|
||||
BEAST_EXPECT(!invalidHashLedger.has_value());
|
||||
}
|
||||
|
||||
void
|
||||
testHashOperations(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Hash retrieval operations - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
config->LEDGER_HISTORY = 1000;
|
||||
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
Account alice("alice");
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
env(pay(alice, Account("bob"), XRP(1000)));
|
||||
env.close();
|
||||
|
||||
// Test getHashByIndex
|
||||
auto hash1 = db.getHashByIndex(3);
|
||||
auto hash2 = db.getHashByIndex(4);
|
||||
|
||||
BEAST_EXPECT(hash1 != uint256());
|
||||
BEAST_EXPECT(hash2 != uint256());
|
||||
BEAST_EXPECT(hash1 != hash2);
|
||||
|
||||
// Test getHashesByIndex (single)
|
||||
auto hashPair = db.getHashesByIndex(4);
|
||||
BEAST_EXPECT(hashPair.has_value());
|
||||
BEAST_EXPECT(hashPair->ledgerHash == hash2);
|
||||
BEAST_EXPECT(hashPair->parentHash == hash1);
|
||||
|
||||
// Test getHashesByIndex (range)
|
||||
auto hashRange = db.getHashesByIndex(3, 4);
|
||||
BEAST_EXPECT(hashRange.size() == 2);
|
||||
BEAST_EXPECT(hashRange[3].ledgerHash == hash1);
|
||||
BEAST_EXPECT(hashRange[4].ledgerHash == hash2);
|
||||
BEAST_EXPECT(hashRange[4].parentHash == hash1);
|
||||
|
||||
// Test invalid hash queries
|
||||
auto invalidHash = db.getHashByIndex(999);
|
||||
BEAST_EXPECT(invalidHash == uint256());
|
||||
|
||||
auto invalidHashPair = db.getHashesByIndex(999);
|
||||
BEAST_EXPECT(!invalidHashPair.has_value());
|
||||
|
||||
auto emptyRange = db.getHashesByIndex(10, 5); // max < min
|
||||
BEAST_EXPECT(emptyRange.empty());
|
||||
}
|
||||
|
||||
void
|
||||
testTransactionOperations(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Transaction storage and retrieval - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
config->LEDGER_HISTORY = 1000;
|
||||
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
Account alice("alice");
|
||||
Account bob("bob");
|
||||
|
||||
env.fund(XRP(10000), alice, bob);
|
||||
env.close();
|
||||
|
||||
auto* sqliteDb = getInterface(db);
|
||||
BEAST_EXPECT(sqliteDb != nullptr);
|
||||
|
||||
if (!sqliteDb)
|
||||
return;
|
||||
|
||||
// Test initial transaction counts after funding
|
||||
auto initialTxCount = sqliteDb->getTransactionCount();
|
||||
auto initialAcctTxCount = sqliteDb->getAccountTransactionCount();
|
||||
|
||||
BEAST_EXPECT(initialTxCount == 4);
|
||||
BEAST_EXPECT(initialAcctTxCount == 6);
|
||||
|
||||
// Create transactions
|
||||
env(pay(alice, bob, XRP(1000)));
|
||||
env.close();
|
||||
|
||||
env(pay(bob, alice, XRP(500)));
|
||||
env.close();
|
||||
|
||||
// Test transaction counts after creation
|
||||
auto txCount = sqliteDb->getTransactionCount();
|
||||
auto acctTxCount = sqliteDb->getAccountTransactionCount();
|
||||
|
||||
BEAST_EXPECT(txCount == 6);
|
||||
BEAST_EXPECT(acctTxCount == 10);
|
||||
|
||||
// Test transaction retrieval
|
||||
uint256 invalidTxId;
|
||||
error_code_i ec;
|
||||
auto invalidTxResult =
|
||||
sqliteDb->getTransaction(invalidTxId, std::nullopt, ec);
|
||||
BEAST_EXPECT(std::holds_alternative<TxSearched>(invalidTxResult));
|
||||
|
||||
// Test transaction history
|
||||
auto txHistory = db.getTxHistory(0);
|
||||
|
||||
BEAST_EXPECT(!txHistory.empty());
|
||||
BEAST_EXPECT(txHistory.size() == 6);
|
||||
|
||||
// Test with valid transaction range
|
||||
auto minSeq = sqliteDb->getTransactionsMinLedgerSeq();
|
||||
auto maxSeq = db.getMaxLedgerSeq();
|
||||
|
||||
if (minSeq && maxSeq)
|
||||
{
|
||||
ClosedInterval<std::uint32_t> range(*minSeq, *maxSeq);
|
||||
auto rangeResult = sqliteDb->getTransaction(invalidTxId, range, ec);
|
||||
auto searched = std::get<TxSearched>(rangeResult);
|
||||
BEAST_EXPECT(
|
||||
searched == TxSearched::all || searched == TxSearched::some);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testAccountTransactionOperations(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Account transaction operations - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
config->LEDGER_HISTORY = 1000;
|
||||
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
Account alice("alice");
|
||||
Account bob("bob");
|
||||
Account carol("carol");
|
||||
|
||||
env.fund(XRP(10000), alice, bob, carol);
|
||||
env.close();
|
||||
|
||||
auto* sqliteDb = getInterface(db);
|
||||
BEAST_EXPECT(sqliteDb != nullptr);
|
||||
|
||||
if (!sqliteDb)
|
||||
return;
|
||||
|
||||
// Create multiple transactions involving alice
|
||||
env(pay(alice, bob, XRP(1000)));
|
||||
env.close();
|
||||
|
||||
env(pay(bob, alice, XRP(500)));
|
||||
env.close();
|
||||
|
||||
env(pay(alice, carol, XRP(250)));
|
||||
env.close();
|
||||
|
||||
auto minSeq = db.getMinLedgerSeq();
|
||||
auto maxSeq = db.getMaxLedgerSeq();
|
||||
|
||||
if (!minSeq || !maxSeq)
|
||||
return;
|
||||
|
||||
// Test getOldestAccountTxs
|
||||
RelationalDatabase::AccountTxOptions options{
|
||||
alice.id(), *minSeq, *maxSeq, 0, 10, false};
|
||||
|
||||
auto oldestTxs = sqliteDb->getOldestAccountTxs(options);
|
||||
BEAST_EXPECT(oldestTxs.size() == 5);
|
||||
|
||||
// Test getNewestAccountTxs
|
||||
auto newestTxs = sqliteDb->getNewestAccountTxs(options);
|
||||
BEAST_EXPECT(newestTxs.size() == 5);
|
||||
|
||||
// Test binary format versions
|
||||
auto oldestTxsB = sqliteDb->getOldestAccountTxsB(options);
|
||||
BEAST_EXPECT(oldestTxsB.size() == 5);
|
||||
|
||||
auto newestTxsB = sqliteDb->getNewestAccountTxsB(options);
|
||||
BEAST_EXPECT(newestTxsB.size() == 5);
|
||||
|
||||
// Test with limit
|
||||
options.limit = 1;
|
||||
auto limitedTxs = sqliteDb->getOldestAccountTxs(options);
|
||||
BEAST_EXPECT(limitedTxs.size() == 1);
|
||||
|
||||
// Test with offset
|
||||
options.limit = 10;
|
||||
options.offset = 1;
|
||||
auto offsetTxs = sqliteDb->getOldestAccountTxs(options);
|
||||
BEAST_EXPECT(offsetTxs.size() == 4);
|
||||
|
||||
// Test with invalid account
|
||||
{
|
||||
Account invalidAccount("invalid");
|
||||
RelationalDatabase::AccountTxOptions invalidOptions{
|
||||
invalidAccount.id(), *minSeq, *maxSeq, 0, 10, false};
|
||||
auto invalidAccountTxs =
|
||||
sqliteDb->getOldestAccountTxs(invalidOptions);
|
||||
BEAST_EXPECT(invalidAccountTxs.empty());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testAccountTransactionPaging(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Account transaction paging operations - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
config->LEDGER_HISTORY = 1000;
|
||||
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
Account alice("alice");
|
||||
Account bob("bob");
|
||||
|
||||
env.fund(XRP(10000), alice, bob);
|
||||
env.close();
|
||||
|
||||
auto* sqliteDb = getInterface(db);
|
||||
BEAST_EXPECT(sqliteDb != nullptr);
|
||||
if (!sqliteDb)
|
||||
return;
|
||||
|
||||
// Create multiple transactions for paging
|
||||
for (int i = 0; i < 5; ++i)
|
||||
{
|
||||
env(pay(alice, bob, XRP(100 + i)));
|
||||
env.close();
|
||||
}
|
||||
|
||||
auto minSeq = db.getMinLedgerSeq();
|
||||
auto maxSeq = db.getMaxLedgerSeq();
|
||||
|
||||
if (!minSeq || !maxSeq)
|
||||
return;
|
||||
|
||||
RelationalDatabase::AccountTxPageOptions pageOptions{
|
||||
alice.id(), *minSeq, *maxSeq, std::nullopt, 2, false};
|
||||
|
||||
// Test oldestAccountTxPage
|
||||
auto [oldestPage, oldestMarker] =
|
||||
sqliteDb->oldestAccountTxPage(pageOptions);
|
||||
|
||||
BEAST_EXPECT(oldestPage.size() == 2);
|
||||
BEAST_EXPECT(oldestMarker.has_value() == true);
|
||||
|
||||
// Test newestAccountTxPage
|
||||
auto [newestPage, newestMarker] =
|
||||
sqliteDb->newestAccountTxPage(pageOptions);
|
||||
|
||||
BEAST_EXPECT(newestPage.size() == 2);
|
||||
BEAST_EXPECT(newestMarker.has_value() == true);
|
||||
|
||||
// Test binary versions
|
||||
auto [oldestPageB, oldestMarkerB] =
|
||||
sqliteDb->oldestAccountTxPageB(pageOptions);
|
||||
BEAST_EXPECT(oldestPageB.size() == 2);
|
||||
|
||||
auto [newestPageB, newestMarkerB] =
|
||||
sqliteDb->newestAccountTxPageB(pageOptions);
|
||||
BEAST_EXPECT(newestPageB.size() == 2);
|
||||
|
||||
// Test with marker continuation
|
||||
if (oldestMarker.has_value())
|
||||
{
|
||||
pageOptions.marker = oldestMarker;
|
||||
auto [continuedPage, continuedMarker] =
|
||||
sqliteDb->oldestAccountTxPage(pageOptions);
|
||||
BEAST_EXPECT(continuedPage.size() == 2);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testDeletionOperations(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Deletion operations - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
config->LEDGER_HISTORY = 1000;
|
||||
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
Account alice("alice");
|
||||
Account bob("bob");
|
||||
|
||||
env.fund(XRP(10000), alice, bob);
|
||||
env.close();
|
||||
|
||||
auto* sqliteDb = getInterface(db);
|
||||
BEAST_EXPECT(sqliteDb != nullptr);
|
||||
if (!sqliteDb)
|
||||
return;
|
||||
|
||||
// Create multiple ledgers and transactions
|
||||
for (int i = 0; i < 3; ++i)
|
||||
{
|
||||
env(pay(alice, bob, XRP(100 + i)));
|
||||
env.close();
|
||||
}
|
||||
|
||||
auto initialTxCount = sqliteDb->getTransactionCount();
|
||||
BEAST_EXPECT(initialTxCount == 7);
|
||||
auto initialAcctTxCount = sqliteDb->getAccountTransactionCount();
|
||||
BEAST_EXPECT(initialAcctTxCount == 12);
|
||||
auto initialLedgerCount = sqliteDb->getLedgerCountMinMax();
|
||||
BEAST_EXPECT(initialLedgerCount.numberOfRows == 5);
|
||||
|
||||
auto maxSeq = db.getMaxLedgerSeq();
|
||||
if (!maxSeq || *maxSeq <= 2)
|
||||
return;
|
||||
|
||||
// Test deleteTransactionByLedgerSeq
|
||||
sqliteDb->deleteTransactionByLedgerSeq(*maxSeq);
|
||||
auto txCountAfterDelete = sqliteDb->getTransactionCount();
|
||||
BEAST_EXPECT(txCountAfterDelete == 6);
|
||||
|
||||
// Test deleteTransactionsBeforeLedgerSeq
|
||||
sqliteDb->deleteTransactionsBeforeLedgerSeq(*maxSeq - 1);
|
||||
auto txCountAfterBulkDelete = sqliteDb->getTransactionCount();
|
||||
BEAST_EXPECT(txCountAfterBulkDelete == 1);
|
||||
|
||||
// Test deleteAccountTransactionsBeforeLedgerSeq
|
||||
sqliteDb->deleteAccountTransactionsBeforeLedgerSeq(*maxSeq - 1);
|
||||
auto acctTxCountAfterDelete = sqliteDb->getAccountTransactionCount();
|
||||
BEAST_EXPECT(acctTxCountAfterDelete == 4);
|
||||
|
||||
// Test deleteBeforeLedgerSeq
|
||||
auto minSeq = db.getMinLedgerSeq();
|
||||
if (minSeq)
|
||||
{
|
||||
sqliteDb->deleteBeforeLedgerSeq(*minSeq + 1);
|
||||
auto ledgerCountAfterDelete = sqliteDb->getLedgerCountMinMax();
|
||||
BEAST_EXPECT(ledgerCountAfterDelete.numberOfRows == 4);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testDatabaseSpaceOperations(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Database space and size operations - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
auto* sqliteDb = getInterface(db);
|
||||
BEAST_EXPECT(sqliteDb != nullptr);
|
||||
if (!sqliteDb)
|
||||
return;
|
||||
|
||||
// Test size queries
|
||||
auto allKB = sqliteDb->getKBUsedAll();
|
||||
auto ledgerKB = sqliteDb->getKBUsedLedger();
|
||||
auto txKB = sqliteDb->getKBUsedTransaction();
|
||||
|
||||
if (backend == "rwdb")
|
||||
{
|
||||
// RWDB reports actual data memory (rounded down to KB)
|
||||
// Initially should be < 1KB, so rounds down to 0
|
||||
// Note: These are 0 due to rounding, not because there's literally
|
||||
// no data
|
||||
BEAST_EXPECT(allKB == 0); // < 1024 bytes rounds to 0 KB
|
||||
BEAST_EXPECT(ledgerKB == 0); // < 1024 bytes rounds to 0 KB
|
||||
BEAST_EXPECT(txKB == 0); // < 1024 bytes rounds to 0 KB
|
||||
}
|
||||
else
|
||||
{
|
||||
// SQLite reports cache/engine memory which has overhead even when
|
||||
// empty Just verify the functions return reasonable values
|
||||
BEAST_EXPECT(allKB >= 0);
|
||||
BEAST_EXPECT(ledgerKB >= 0);
|
||||
BEAST_EXPECT(txKB >= 0);
|
||||
}
|
||||
|
||||
// Create some data and verify size increases
|
||||
Account alice("alice");
|
||||
env.fund(XRP(10000), alice);
|
||||
env.close();
|
||||
|
||||
auto newAllKB = sqliteDb->getKBUsedAll();
|
||||
auto newLedgerKB = sqliteDb->getKBUsedLedger();
|
||||
auto newTxKB = sqliteDb->getKBUsedTransaction();
|
||||
|
||||
if (backend == "rwdb")
|
||||
{
|
||||
// RWDB reports actual data memory
|
||||
// After adding data, should see some increase
|
||||
BEAST_EXPECT(newAllKB >= 1); // Should have at least 1KB total
|
||||
BEAST_EXPECT(
|
||||
newTxKB >= 0); // Transactions added (might still be < 1KB)
|
||||
BEAST_EXPECT(
|
||||
newLedgerKB >= 0); // Ledger data (might still be < 1KB)
|
||||
|
||||
// Key relationships
|
||||
BEAST_EXPECT(newAllKB >= newLedgerKB + newTxKB); // Total >= parts
|
||||
BEAST_EXPECT(newAllKB >= allKB); // Should increase or stay same
|
||||
BEAST_EXPECT(newTxKB >= txKB); // Should increase or stay same
|
||||
}
|
||||
else
|
||||
{
|
||||
// SQLite: Memory usage should not decrease after adding data
|
||||
// Values might increase due to cache growth
|
||||
BEAST_EXPECT(newAllKB >= allKB);
|
||||
BEAST_EXPECT(newLedgerKB >= ledgerKB);
|
||||
BEAST_EXPECT(newTxKB >= txKB);
|
||||
|
||||
// SQLite's getKBUsedAll is global memory, should be >= parts
|
||||
BEAST_EXPECT(newAllKB >= newLedgerKB);
|
||||
BEAST_EXPECT(newAllKB >= newTxKB);
|
||||
}
|
||||
|
||||
// Test space availability
|
||||
// Both SQLite and RWDB use in-memory databases in standalone mode,
|
||||
// so file-based space checks don't apply to either backend.
|
||||
// Skip these checks for both.
|
||||
|
||||
// if (backend == "rwdb")
|
||||
// {
|
||||
// BEAST_EXPECT(db.ledgerDbHasSpace(env.app().config()));
|
||||
// BEAST_EXPECT(db.transactionDbHasSpace(env.app().config()));
|
||||
// }
|
||||
|
||||
// Test database closure operations (should not throw)
|
||||
try
|
||||
{
|
||||
sqliteDb->closeLedgerDB();
|
||||
sqliteDb->closeTransactionDB();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
BEAST_EXPECT(false); // Should not throw
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testTransactionMinLedgerSeq(
|
||||
std::string const& backend,
|
||||
std::unique_ptr<Config> config)
|
||||
{
|
||||
testcase("Transaction minimum ledger sequence tracking - " + backend);
|
||||
|
||||
using namespace test::jtx;
|
||||
config->LEDGER_HISTORY = 1000;
|
||||
|
||||
Env env(*this, std::move(config));
|
||||
auto& db = env.app().getRelationalDatabase();
|
||||
|
||||
auto* sqliteDb = getInterface(db);
|
||||
BEAST_EXPECT(sqliteDb != nullptr);
|
||||
if (!sqliteDb)
|
||||
return;
|
||||
|
||||
// Initially should have no transactions
|
||||
BEAST_EXPECT(!sqliteDb->getTransactionsMinLedgerSeq().has_value());
|
||||
BEAST_EXPECT(
|
||||
!sqliteDb->getAccountTransactionsMinLedgerSeq().has_value());
|
||||
|
||||
Account alice("alice");
|
||||
Account bob("bob");
|
||||
|
||||
env.fund(XRP(10000), alice, bob);
|
||||
env.close();
|
||||
|
||||
// Create first transaction
|
||||
env(pay(alice, bob, XRP(1000)));
|
||||
env.close();
|
||||
|
||||
auto txMinSeq = sqliteDb->getTransactionsMinLedgerSeq();
|
||||
auto acctTxMinSeq = sqliteDb->getAccountTransactionsMinLedgerSeq();
|
||||
BEAST_EXPECT(txMinSeq.has_value());
|
||||
BEAST_EXPECT(acctTxMinSeq.has_value());
|
||||
BEAST_EXPECT(*txMinSeq == 3);
|
||||
BEAST_EXPECT(*acctTxMinSeq == 3);
|
||||
|
||||
// Create more transactions
|
||||
env(pay(bob, alice, XRP(500)));
|
||||
env.close();
|
||||
|
||||
env(pay(alice, bob, XRP(250)));
|
||||
env.close();
|
||||
|
||||
// Min sequences should remain the same (first transaction ledger)
|
||||
auto newTxMinSeq = sqliteDb->getTransactionsMinLedgerSeq();
|
||||
auto newAcctTxMinSeq = sqliteDb->getAccountTransactionsMinLedgerSeq();
|
||||
BEAST_EXPECT(newTxMinSeq == txMinSeq);
|
||||
BEAST_EXPECT(newAcctTxMinSeq == acctTxMinSeq);
|
||||
}
|
||||
|
||||
std::vector<std::string> static getBackends(std::string const& unittest_arg)
|
||||
{
|
||||
// Valid backends
|
||||
static const std::set<std::string> validBackends = {"sqlite", "rwdb"};
|
||||
|
||||
// Default to all valid backends if no arg specified
|
||||
if (unittest_arg.empty())
|
||||
return {validBackends.begin(), validBackends.end()};
|
||||
|
||||
std::set<std::string> backends; // Use set to avoid duplicates
|
||||
std::stringstream ss(unittest_arg);
|
||||
std::string backend;
|
||||
|
||||
while (std::getline(ss, backend, ','))
|
||||
{
|
||||
if (!backend.empty())
|
||||
{
|
||||
// Validate backend
|
||||
if (validBackends.contains(backend))
|
||||
{
|
||||
backends.insert(backend);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return as vector (sorted due to set)
|
||||
return {backends.begin(), backends.end()};
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
auto backends = getBackends(arg());
|
||||
|
||||
if (backends.empty())
|
||||
{
|
||||
fail("no valid backend specified: '" + arg() + "'");
|
||||
}
|
||||
|
||||
for (auto const& backend : backends)
|
||||
{
|
||||
testBasicInitialization(backend, makeConfig(backend));
|
||||
testLedgerSequenceOperations(backend, makeConfig(backend));
|
||||
testLedgerInfoOperations(backend, makeConfig(backend));
|
||||
testHashOperations(backend, makeConfig(backend));
|
||||
testTransactionOperations(backend, makeConfig(backend));
|
||||
testAccountTransactionOperations(backend, makeConfig(backend));
|
||||
testAccountTransactionPaging(backend, makeConfig(backend));
|
||||
testDeletionOperations(backend, makeConfig(backend));
|
||||
testDatabaseSpaceOperations(backend, makeConfig(backend));
|
||||
testTransactionMinLedgerSeq(backend, makeConfig(backend));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(RelationalDatabase, rdb, ripple);
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
@@ -19,9 +19,9 @@
|
||||
|
||||
#ifndef TEST_UNIT_TEST_SUITE_JOURNAL_H
|
||||
#define TEST_UNIT_TEST_SUITE_JOURNAL_H
|
||||
|
||||
#include <ripple/beast/unit_test.h>
|
||||
#include <ripple/beast/utility/Journal.h>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
@@ -82,13 +82,7 @@ SuiteJournalSink::write(
|
||||
|
||||
// Only write the string if the level at least equals the threshold.
|
||||
if (level >= threshold())
|
||||
{
|
||||
// std::endl flushes → sync() → str()/str("") race in shared buffer →
|
||||
// crashes
|
||||
static std::mutex log_mutex;
|
||||
std::lock_guard lock(log_mutex);
|
||||
suite_.log << s << partition_ << text << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
class SuiteJournal
|
||||
|
||||
Reference in New Issue
Block a user