Compare commits

..

6 Commits

Author SHA1 Message Date
Alex Kremer
d053a1f8c8 Merge branch 'develop' into release/2.6.0 2025-10-20 14:05:37 +01:00
Alex Kremer
c12601c0f5 Merge branch 'develop' into release/2.6.0 2025-10-09 16:58:32 +01:00
Alex Kremer
381009807f Merge remote-tracking branch 'origin/develop' into release/2.6.0 2025-10-08 13:52:49 +01:00
Alex Kremer
f530216688 Merge branch 'develop' into release/2.6.0 2025-10-03 16:30:41 +01:00
Alex Kremer
0cccb1a2c0 Merge branch 'develop' into release/2.6.0 2025-10-01 13:51:22 +01:00
Alex Kremer
1803990aa8 Use xrpl/2.6.1-rc1 2025-09-18 17:07:59 +01:00
364 changed files with 10412 additions and 16675 deletions

View File

@@ -49,7 +49,6 @@ IndentFunctionDeclarationAfterType: false
IndentWidth: 4
IndentWrappedFunctionNames: false
IndentRequiresClause: true
InsertNewlineAtEOF: true
RequiresClausePosition: OwnLine
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1

View File

@@ -54,7 +54,7 @@ format:
_help_max_pargs_hwrap:
- If a positional argument group contains more than this many
- arguments, then force it to a vertical layout.
max_pargs_hwrap: 5
max_pargs_hwrap: 6
_help_max_rows_cmdline:
- If a cmdline positional group consumes more than this many
- lines without nesting, then invalidate the layout (and nest)

View File

@@ -5,27 +5,25 @@ inputs:
targets:
description: Space-separated build target names
default: all
nproc_subtract:
description: The number of processors to subtract when calculating parallelism.
subtract_threads:
description: An option for the action get-threads-number.
required: true
default: "0"
runs:
using: composite
steps:
- name: Get number of processors
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
id: nproc
- name: Get number of threads
uses: ./.github/actions/get-threads-number
id: number_of_threads
with:
subtract: ${{ inputs.nproc_subtract }}
subtract_threads: ${{ inputs.subtract_threads }}
- name: Build targets
shell: bash
env:
CMAKE_TARGETS: ${{ inputs.targets }}
run: |
cd build
cmake \
--build . \
--parallel "${{ steps.nproc.outputs.nproc }}" \
--target ${CMAKE_TARGETS}
--parallel "${{ steps.number_of_threads.outputs.threads_number }}" \
--target ${{ inputs.targets }}

View File

@@ -47,12 +47,12 @@ runs:
username: ${{ github.repository_owner }}
password: ${{ env.GITHUB_TOKEN }}
- uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
with:
cache-image: false
- uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
- uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
id: meta
with:
images: ${{ inputs.images }}

View File

@@ -1,41 +0,0 @@
name: Cache key
description: Generate cache key for ccache
inputs:
conan_profile:
description: Conan profile name
required: true
build_type:
description: Current build type (e.g. Release, Debug)
required: true
default: Release
code_coverage:
description: Whether code coverage is on
required: true
default: "false"
outputs:
key:
description: Generated cache key for ccache
value: ${{ steps.key_without_commit.outputs.key }}-${{ steps.git_common_ancestor.outputs.commit }}
restore_keys:
description: Cache restore keys for fallback
value: ${{ steps.key_without_commit.outputs.key }}
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git-common-ancestor
- name: Set cache key without commit
id: key_without_commit
shell: bash
env:
RUNNER_OS: ${{ runner.os }}
BUILD_TYPE: ${{ inputs.build_type }}
CODE_COVERAGE: ${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}
CONAN_PROFILE: ${{ inputs.conan_profile }}
run: |
echo "key=clio-ccache-${RUNNER_OS}-${BUILD_TYPE}${CODE_COVERAGE}-${CONAN_PROFILE}-develop" >> "${GITHUB_OUTPUT}"

View File

@@ -37,10 +37,6 @@ inputs:
description: Whether to generate Debian package
required: true
default: "false"
version:
description: Version of the clio_server binary
required: false
default: ""
runs:
using: composite
@@ -48,7 +44,6 @@ runs:
- name: Run cmake
shell: bash
env:
BUILD_DIR: "${{ inputs.build_dir }}"
BUILD_TYPE: "${{ inputs.build_type }}"
SANITIZER_OPTION: |-
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
@@ -61,22 +56,9 @@ runs:
STATIC: "${{ inputs.static == 'true' && 'ON' || 'OFF' }}"
TIME_TRACE: "${{ inputs.time_trace == 'true' && 'ON' || 'OFF' }}"
PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}"
# GitHub creates a merge commit for a PR
# https://www.kenmuse.com/blog/the-many-shas-of-a-github-pull-request/
#
# We:
# - explicitly provide branch name
# - use `github.head_ref` to get the SHA of last commit in the PR branch
#
# This way it works both for PRs and pushes to branches.
GITHUB_BRANCH_NAME: "${{ github.head_ref || github.ref_name }}"
GITHUB_HEAD_SHA: "${{ github.event.pull_request.head.sha || github.sha }}"
#
# If tag is being pushed, or it's a nightly release, we use that version.
FORCE_CLIO_VERSION: ${{ inputs.version }}
run: |
cmake \
-B "${BUILD_DIR}" \
-B ${{inputs.build_dir}} \
-S . \
-G Ninja \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \

View File

@@ -24,7 +24,7 @@ runs:
-j8 --exclude-throw-branches
- name: Archive coverage report
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: coverage-report.xml
path: build/coverage_report.xml

View File

@@ -21,17 +21,18 @@ inputs:
runs:
using: composite
steps:
- name: Create build directory
shell: bash
run: mkdir -p "${{ inputs.build_dir }}"
- name: Run conan
shell: bash
env:
BUILD_DIR: "${{ inputs.build_dir }}"
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
BUILD_TYPE: "${{ inputs.build_type }}"
CONAN_PROFILE: "${{ inputs.conan_profile }}"
run: |
conan \
install . \
-of "${BUILD_DIR}" \
-b "${CONAN_BUILD_OPTION}" \
-s "build_type=${BUILD_TYPE}" \
--profile:all "${CONAN_PROFILE}"
-of build \
-b "$CONAN_BUILD_OPTION" \
-s "build_type=${{ inputs.build_type }}" \
--profile:all "${{ inputs.conan_profile }}"

View File

@@ -28,17 +28,12 @@ runs:
- name: Create an issue
id: create_issue
shell: bash
env:
ISSUE_BODY: ${{ inputs.body }}
ISSUE_ASSIGNEES: ${{ inputs.assignees }}
ISSUE_LABELS: ${{ inputs.labels }}
ISSUE_TITLE: ${{ inputs.title }}
run: |
echo -e "${ISSUE_BODY}" > issue.md
echo -e '${{ inputs.body }}' > issue.md
gh issue create \
--assignee "${ISSUE_ASSIGNEES}" \
--label "${ISSUE_LABELS}" \
--title "${ISSUE_TITLE}" \
--assignee '${{ inputs.assignees }}' \
--label '${{ inputs.labels }}' \
--title '${{ inputs.title }}' \
--body-file ./issue.md \
> create_issue.log
created_issue="$(sed 's|.*/||' create_issue.log)"

View File

@@ -0,0 +1,36 @@
name: Get number of threads
description: Determines number of threads to use on macOS and Linux
inputs:
subtract_threads:
description: How many threads to subtract from the calculated number
required: true
default: "0"
outputs:
threads_number:
description: Number of threads to use
value: ${{ steps.number_of_threads_export.outputs.num }}
runs:
using: composite
steps:
- name: Get number of threads on mac
id: mac_threads
if: ${{ runner.os == 'macOS' }}
shell: bash
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
- name: Get number of threads on Linux
id: linux_threads
if: ${{ runner.os == 'Linux' }}
shell: bash
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
- name: Shift and export number of threads
id: number_of_threads_export
shell: bash
run: |
num_of_threads="${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}"
shift_by="${{ inputs.subtract_threads }}"
shifted="$((num_of_threads - shift_by))"
echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT

View File

@@ -0,0 +1,38 @@
name: Restore cache
description: Find and restores ccache cache
inputs:
conan_profile:
description: Conan profile name
required: true
ccache_dir:
description: Path to .ccache directory
required: true
build_type:
description: Current build type (e.g. Release, Debug)
required: true
default: Release
code_coverage:
description: Whether code coverage is on
required: true
default: "false"
outputs:
ccache_cache_hit:
description: True if ccache cache has been downloaded
value: ${{ steps.ccache_cache.outputs.cache-hit }}
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git-common-ancestor
- name: Restore ccache cache
uses: actions/cache/restore@v4
id: ccache_cache
if: ${{ env.CCACHE_DISABLE != '1' }}
with:
path: ${{ inputs.ccache_dir }}
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}

38
.github/actions/save-cache/action.yml vendored Normal file
View File

@@ -0,0 +1,38 @@
name: Save cache
description: Save ccache cache for develop branch
inputs:
conan_profile:
description: Conan profile name
required: true
ccache_dir:
description: Path to .ccache directory
required: true
build_type:
description: Current build type (e.g. Release, Debug)
required: true
default: Release
code_coverage:
description: Whether code coverage is on
required: true
default: "false"
ccache_cache_hit:
description: Whether ccache cache has been downloaded
required: true
ccache_cache_miss_rate:
description: How many ccache cache misses happened
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git-common-ancestor
- name: Save ccache cache
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
uses: actions/cache/save@v4
with:
path: ${{ inputs.ccache_dir }}
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}

View File

@@ -91,6 +91,19 @@ updates:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/get-threads-number/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
reviewers:
- XRPLF/clio-dev-team
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/git-common-ancestor/
schedule:
@@ -105,7 +118,20 @@ updates:
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/cache-key/
directory: .github/actions/restore-cache/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
reviewers:
- XRPLF/clio-dev-team
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/save-cache/
schedule:
interval: weekly
day: monday

View File

@@ -4,7 +4,7 @@ build_type=Release
compiler=apple-clang
compiler.cppstd=20
compiler.libcxx=libc++
compiler.version=17.0
compiler.version=17
os=Macos
[conf]

View File

@@ -4,7 +4,7 @@ import json
LINUX_OS = ["heavy", "heavy-arm64"]
LINUX_CONTAINERS = [
'{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
'{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
]
LINUX_COMPILERS = ["gcc", "clang"]

View File

@@ -40,9 +40,9 @@ mkdir -p "$PROFILES_DIR"
if [[ "$(uname)" == "Darwin" ]]; then
create_profile_with_sanitizers "apple-clang" "$APPLE_CLANG_PROFILE"
echo "include(apple-clang)" >"$PROFILES_DIR/default"
echo "include(apple-clang)" > "$PROFILES_DIR/default"
else
create_profile_with_sanitizers "clang" "$CLANG_PROFILE"
create_profile_with_sanitizers "gcc" "$GCC_PROFILE"
echo "include(gcc)" >"$PROFILES_DIR/default"
echo "include(gcc)" > "$PROFILES_DIR/default"
fi

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bash
set -ex
TEMP_DIR=$(mktemp -d)
trap "rm -rf $TEMP_DIR" EXIT
echo "Using temporary CONAN_HOME: $TEMP_DIR"
# We use a temporary Conan home to avoid polluting the user's existing Conan
# configuration and to not use local cache (which leads to non-reproducible lockfiles).
export CONAN_HOME="$TEMP_DIR"
# Ensure that the xrplf remote is the first to be consulted, so any recipes we
# patched are used. We also add it there to not created huge diff when the
# official Conan Center Index is updated.
conan remote add --force --index 0 xrplf https://conan.ripplex.io
# Delete any existing lockfile.
rm -f conan.lock
# Create a new lockfile that is compatible with macOS.
# It should also work on Linux.
conan lock create . \
--profile:all=.github/scripts/conan/apple-clang-17.profile

View File

@@ -22,8 +22,8 @@ fi
TEST_BINARY=$1
if [[ ! -f "$TEST_BINARY" ]]; then
echo "Test binary not found: $TEST_BINARY"
exit 1
echo "Test binary not found: $TEST_BINARY"
exit 1
fi
TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {suite=$1}')
@@ -31,16 +31,15 @@ TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {su
OUTPUT_DIR="./.sanitizer-report"
mkdir -p "$OUTPUT_DIR"
export TSAN_OPTIONS="die_after_fork=0"
export MallocNanoZone='0' # for MacOSX
for TEST in $TESTS; do
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}.log"
$TEST_BINARY --gtest_filter="$TEST" >"$OUTPUT_FILE" 2>&1
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}"
export TSAN_OPTIONS="log_path=\"$OUTPUT_FILE\" die_after_fork=0"
export ASAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
export UBSAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
export MallocNanoZone='0' # for MacOSX
$TEST_BINARY --gtest_filter="$TEST" > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "'$TEST' failed a sanitizer check."
else
rm "$OUTPUT_FILE"
fi
if [ $? -ne 0 ]; then
echo "'$TEST' failed a sanitizer check."
fi
done

View File

@@ -20,5 +20,5 @@ for artifact_name in $(ls); do
rm "${artifact_name}/${BINARY_NAME}"
rm -r "${artifact_name}"
sha256sum "./${artifact_name}.zip" >"./${artifact_name}.zip.sha256sum"
sha256sum "./${artifact_name}.zip" > "./${artifact_name}.zip.sha256sum"
done

View File

@@ -38,37 +38,32 @@ on:
description: Whether to strip clio binary
default: true
defaults:
run:
shell: bash
jobs:
build_and_publish_image:
name: Build and publish image
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Download Clio binary from artifact
if: ${{ inputs.artifact_name != null }}
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@v5
with:
name: ${{ inputs.artifact_name }}
path: ./docker/clio/artifact/
- name: Download Clio binary from url
if: ${{ inputs.clio_server_binary_url != null }}
env:
BINARY_URL: ${{ inputs.clio_server_binary_url }}
BINARY_SHA256: ${{ inputs.binary_sha256 }}
shell: bash
run: |
wget "${BINARY_URL}" -P ./docker/clio/artifact/
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${BINARY_SHA256}" ]; then
wget "${{inputs.clio_server_binary_url}}" -P ./docker/clio/artifact/
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${{inputs.binary_sha256}}" ]; then
echo "Binary sha256 sum doesn't match"
exit 1
fi
- name: Unpack binary
shell: bash
run: |
sudo apt update && sudo apt install -y tar unzip
cd docker/clio/artifact
@@ -85,6 +80,7 @@ jobs:
- name: Strip binary
if: ${{ inputs.strip_binary }}
shell: bash
run: strip ./docker/clio/clio_server
- name: Set GHCR_REPO

View File

@@ -23,7 +23,6 @@ on:
- "cmake/**"
- "src/**"
- "tests/**"
- "benchmarks/**"
- docs/config-description.md
workflow_dispatch:
@@ -34,10 +33,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/develop' && github.run_number || 'branch' }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
build-and-test:
name: Build and Test
@@ -50,7 +45,7 @@ jobs:
build_type: [Release, Debug]
container:
[
'{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }',
'{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }',
]
static: [true]
@@ -80,11 +75,11 @@ jobs:
uses: ./.github/workflows/reusable-build.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
conan_profile: gcc
build_type: Debug
download_ccache: true
upload_ccache: true
upload_ccache: false
code_coverage: true
static: true
upload_clio_server: false
@@ -93,21 +88,40 @@ jobs:
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
package:
name: Build packages
uses: ./.github/workflows/reusable-build.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
conan_profile: gcc
build_type: Release
download_ccache: true
upload_ccache: false
code_coverage: false
static: true
upload_clio_server: false
package: true
targets: package
analyze_build_time: false
check_config:
name: Check Config Description
needs: build-and-test
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@v5
with:
name: clio_server_Linux_Release_gcc
- name: Compare Config Description
shell: bash
run: |
repoConfigFile=docs/config-description.md
configDescriptionFile=config_description_new.md

View File

@@ -12,33 +12,31 @@ concurrency:
env:
CONAN_PROFILE: gcc
defaults:
run:
shell: bash
jobs:
build:
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with:
enable_ccache: false
disable_ccache: true
- name: Update libXRPL version requirement
shell: bash
run: |
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.conan_ref }}'|g" conanfile.py
rm -f conanfile.py.bak
- name: Update conan lockfile
shell: bash
run: |
conan lock create . --profile:all ${{ env.CONAN_PROFILE }}
@@ -59,7 +57,7 @@ jobs:
run: strip build/clio_tests
- name: Upload clio_tests
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: clio_tests_check_libxrpl
path: build/clio_tests
@@ -69,10 +67,10 @@ jobs:
needs: build
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
steps:
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@v5
with:
name: clio_tests_check_libxrpl
@@ -92,7 +90,7 @@ jobs:
issues: write
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Create an issue
uses: ./.github/actions/create-issue

View File

@@ -5,26 +5,20 @@ on:
types: [opened, edited, reopened, synchronize]
branches: [develop]
defaults:
run:
shell: bash
jobs:
check_title:
runs-on: ubuntu-latest
steps:
- uses: ytanikin/pr-conventional-commits@fda730cb152c05a849d6d84325e50c6182d9d1e9 # 1.5.1
- uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # v1.4.2
with:
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
add_label: false
custom_labels: '{"build":"build", "feat":"enhancement", "fix":"bug", "docs":"documentation", "test":"testability", "ci":"ci", "style":"refactoring", "refactor":"refactoring", "perf":"performance", "chore":"tooling"}'
- name: Check if message starts with upper-case letter
env:
PR_TITLE: ${{ github.event.pull_request.title }}
run: |
if [[ ! "${PR_TITLE}" =~ ^[a-z]+:\ [\[A-Z] ]]; then
if [[ ! "${{ github.event.pull_request.title }}" =~ ^[a-z]+:\ [\[A-Z] ]]; then
echo "Error: PR title must start with an upper-case letter."
exit 1
fi

View File

@@ -22,16 +22,12 @@ env:
CONAN_PROFILE: clang
LLVM_TOOLS_VERSION: 20
defaults:
run:
shell: bash
jobs:
clang_tidy:
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
permissions:
contents: write
@@ -39,14 +35,21 @@ jobs:
pull-requests: write
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with:
enable_ccache: false
disable_ccache: true
- name: Restore cache
uses: ./.github/actions/restore-cache
id: restore_cache
with:
conan_profile: ${{ env.CONAN_PROFILE }}
ccache_dir: ${{ env.CCACHE_DIR }}
- name: Run conan
uses: ./.github/actions/conan
@@ -58,34 +61,34 @@ jobs:
with:
conan_profile: ${{ env.CONAN_PROFILE }}
- name: Get number of processors
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
id: nproc
- name: Get number of threads
uses: ./.github/actions/get-threads-number
id: number_of_threads
- name: Run clang-tidy (several times)
- name: Run clang-tidy
continue-on-error: true
id: clang_tidy
shell: bash
id: run_clang_tidy
run: |
# We run clang-tidy several times, because some fixes may enable new fixes in subsequent runs.
CLANG_TIDY_COMMAND="run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j ${{ steps.nproc.outputs.nproc }} -fix -quiet"
${CLANG_TIDY_COMMAND} ||
${CLANG_TIDY_COMMAND} ||
${CLANG_TIDY_COMMAND}
- name: Check for changes
id: files_changed
continue-on-error: true
run: |
git diff --exit-code
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
- name: Fix local includes and clang-format style
if: ${{ steps.files_changed.outcome != 'success' }}
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
shell: bash
run: |
pre-commit run --all-files fix-local-includes || true
pre-commit run --all-files clang-format || true
- name: Print issues found
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
shell: bash
run: |
sed -i '/error\||/!d' ./output.txt
cat output.txt
rm output.txt
- name: Create an issue
if: ${{ (steps.clang_tidy.outcome != 'success' || steps.files_changed.outcome != 'success') && github.event_name != 'pull_request' }}
if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
id: create_issue
uses: ./.github/actions/create-issue
env:
@@ -98,7 +101,7 @@ jobs:
List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
- uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0
if: ${{ steps.files_changed.outcome != 'success' && github.event_name != 'pull_request' }}
if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
with:
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
@@ -106,8 +109,8 @@ jobs:
git_commit_gpgsign: true
- name: Create PR with fixes
if: ${{ steps.files_changed.outcome != 'success' && github.event_name != 'pull_request' }}
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}
@@ -122,5 +125,6 @@ jobs:
reviewers: "godexsoft,kuznetsss,PeterChen13579,mathbunnyru"
- name: Fail the job
if: ${{ steps.clang_tidy.outcome != 'success' || steps.files_changed.outcome != 'success' }}
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
shell: bash
run: exit 1

View File

@@ -10,26 +10,22 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
build:
runs-on: ubuntu-latest
container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@v4
with:
lfs: true
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with:
enable_ccache: false
disable_ccache: true
- name: Create build directory
run: mkdir build_docs
@@ -43,10 +39,10 @@ jobs:
run: cmake --build . --target docs
- name: Setup Pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.0.0
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0
uses: actions/upload-pages-artifact@v4
with:
path: build_docs/html
name: docs-develop
@@ -66,6 +62,6 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5
uses: actions/deploy-pages@v4
with:
artifact_name: docs-develop

View File

@@ -23,25 +23,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
get_date:
name: Get Date
runs-on: ubuntu-latest
outputs:
date: ${{ steps.get_date.outputs.date }}
steps:
- name: Get current date
id: get_date
run: |
echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
build-and-test:
name: Build and Test
needs: get_date
strategy:
fail-fast: false
@@ -55,17 +39,17 @@ jobs:
conan_profile: gcc
build_type: Release
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
- os: heavy
conan_profile: gcc
build_type: Debug
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
- os: heavy
conan_profile: gcc.ubsan
build_type: Release
static: false
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
uses: ./.github/workflows/reusable-build-test.yml
with:
@@ -79,31 +63,9 @@ jobs:
upload_clio_server: true
download_ccache: false
upload_ccache: false
version: nightly-${{ needs.get_date.outputs.date }}
package:
name: Build debian package
needs: get_date
uses: ./.github/workflows/reusable-build.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
conan_profile: gcc
build_type: Release
download_ccache: false
upload_ccache: false
code_coverage: false
static: true
upload_clio_server: false
package: true
version: nightly-${{ needs.get_date.outputs.date }}
targets: package
analyze_build_time: false
analyze_build_time:
name: Analyze Build Time
needs: get_date
strategy:
fail-fast: false
@@ -111,7 +73,7 @@ jobs:
include:
- os: heavy
conan_profile: clang
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
static: true
- os: macos15
conan_profile: apple-clang
@@ -130,20 +92,19 @@ jobs:
upload_clio_server: false
targets: all
analyze_build_time: true
version: nightly-${{ needs.get_date.outputs.date }}
nightly_release:
needs: [build-and-test, package, get_date]
needs: build-and-test
uses: ./.github/workflows/reusable-release.yml
with:
delete_pattern: "nightly-*"
overwrite_release: true
prerelease: true
title: "Clio development build (nightly-${{ needs.get_date.outputs.date }})"
version: nightly-${{ needs.get_date.outputs.date }}
title: "Clio development (nightly) build"
version: nightly
header: >
> **Note:** Please remember that this is a development release and it is not recommended for production use.
Changelog (including previous releases): <https://github.com/XRPLF/clio/commits/nightly-${{ needs.get_date.outputs.date }}>
Changelog (including previous releases): <https://github.com/XRPLF/clio/commits/nightly>
generate_changelog: false
draft: false
@@ -169,7 +130,7 @@ jobs:
issues: write
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Create an issue
uses: ./.github/actions/create-issue

View File

@@ -1,8 +1,8 @@
name: Pre-commit auto-update
on:
# every first day of the month
schedule:
# every first day of the month
- cron: "0 0 1 * *"
pull_request:
branches: [release/*, develop]
@@ -12,7 +12,7 @@ on:
jobs:
auto-update:
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@ad4ab1ae5a54a4bab0e87294c31fc0729f788b2b
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@afbcbdafbe0ce5439492fb87eda6441371086386
with:
sign_commit: true
committer: "Clio CI <skuznetsov@ripple.com>"

View File

@@ -8,7 +8,7 @@ on:
jobs:
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@01163508e81d7dd63d4601d4090b297a260b18c2
uses: XRPLF/actions/.github/workflows/pre-commit.yml@a8d7472b450eb53a1e5228f64552e5974457a21a
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:14342e087ceb8b593027198bf9ef06a43833c696" }'
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'

View File

@@ -29,7 +29,7 @@ jobs:
conan_profile: gcc
build_type: Release
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
uses: ./.github/workflows/reusable-build-test.yml
with:
@@ -43,32 +43,13 @@ jobs:
upload_clio_server: true
download_ccache: false
upload_ccache: false
version: ${{ github.event_name == 'push' && github.ref_name || '' }}
package:
name: Build debian package
uses: ./.github/workflows/reusable-build.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
conan_profile: gcc
build_type: Release
download_ccache: false
upload_ccache: false
code_coverage: false
static: true
upload_clio_server: false
package: true
version: ${{ github.event_name == 'push' && github.ref_name || '' }}
targets: package
analyze_build_time: false
expected_version: ${{ github.event_name == 'push' && github.ref_name || '' }}
release:
needs: [build-and-test, package]
needs: build-and-test
uses: ./.github/workflows/reusable-release.yml
with:
delete_pattern: ""
overwrite_release: false
prerelease: ${{ contains(github.ref_name, '-') }}
title: "${{ github.ref_name }}"
version: "${{ github.ref_name }}"

View File

@@ -63,18 +63,18 @@ on:
type: string
default: all
expected_version:
description: Expected version of the clio_server binary
required: false
type: string
default: ""
package:
description: Whether to generate Debian package
required: false
type: boolean
default: false
version:
description: Version of the clio_server binary
required: false
type: string
default: ""
jobs:
build:
uses: ./.github/workflows/reusable-build.yml
@@ -90,8 +90,8 @@ jobs:
upload_clio_server: ${{ inputs.upload_clio_server }}
targets: ${{ inputs.targets }}
analyze_build_time: false
expected_version: ${{ inputs.expected_version }}
package: ${{ inputs.package }}
version: ${{ inputs.version }}
test:
needs: build

View File

@@ -60,25 +60,21 @@ on:
required: true
type: boolean
expected_version:
description: Expected version of the clio_server binary
required: false
type: string
default: ""
package:
description: Whether to generate Debian package
required: false
type: boolean
version:
description: Version of the clio_server binary
required: false
type: string
default: ""
secrets:
CODECOV_TOKEN:
required: false
defaults:
run:
shell: bash
jobs:
build:
name: Build
@@ -88,38 +84,36 @@ jobs:
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
with:
fetch-depth: 0
# We need to fetch tags to have correct version in the release
# The workaround is based on https://github.com/actions/checkout/issues/1467
fetch-tags: true
ref: ${{ github.ref }}
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with:
enable_ccache: ${{ inputs.download_ccache }}
disable_ccache: ${{ !inputs.download_ccache }}
- name: Setup conan on macOS
if: ${{ runner.os == 'macOS' }}
shell: bash
run: ./.github/scripts/conan/init.sh
- name: Generate cache key
uses: ./.github/actions/cache-key
id: cache_key
- name: Restore cache
if: ${{ inputs.download_ccache }}
uses: ./.github/actions/restore-cache
id: restore_cache
with:
conan_profile: ${{ inputs.conan_profile }}
ccache_dir: ${{ env.CCACHE_DIR }}
build_type: ${{ inputs.build_type }}
code_coverage: ${{ inputs.code_coverage }}
- name: Restore ccache cache
if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }}
uses: actions/cache/restore@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ steps.cache_key.outputs.key }}
restore-keys: |
${{ steps.cache_key.outputs.restore_keys }}
- name: Run conan
uses: ./.github/actions/conan
with:
@@ -135,7 +129,6 @@ jobs:
static: ${{ inputs.static }}
time_trace: ${{ inputs.analyze_build_time }}
package: ${{ inputs.package }}
version: ${{ inputs.version }}
- name: Build Clio
uses: ./.github/actions/build-clio
@@ -148,26 +141,24 @@ jobs:
ClangBuildAnalyzer --all build/ build_time_report.bin
ClangBuildAnalyzer --analyze build_time_report.bin > build_time_report.txt
cat build_time_report.txt
shell: bash
- name: Upload build time analyze report
if: ${{ inputs.analyze_build_time }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: build_time_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build_time_report.txt
- name: Show ccache's statistics and zero it
- name: Show ccache's statistics
if: ${{ inputs.download_ccache }}
shell: bash
id: ccache_stats
run: |
ccache --show-stats -vv
ccache --zero-stats
- name: Save ccache cache
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
uses: actions/cache/save@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ steps.cache_key.outputs.key }}
ccache -s > /tmp/ccache.stats
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
cat /tmp/ccache.stats
- name: Strip unit_tests
if: ${{ !endsWith(inputs.conan_profile, 'san') && !inputs.code_coverage && !inputs.analyze_build_time }}
@@ -179,32 +170,44 @@ jobs:
- name: Upload clio_server
if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_server
- name: Upload clio_tests
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_tests
- name: Upload clio_integration_tests
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_integration_tests
- name: Upload Clio Linux package
if: ${{ inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/*.deb
- name: Save cache
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
uses: ./.github/actions/save-cache
with:
conan_profile: ${{ inputs.conan_profile }}
ccache_dir: ${{ env.CCACHE_DIR }}
build_type: ${{ inputs.build_type }}
code_coverage: ${{ inputs.code_coverage }}
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
# This is run as part of the build job, because it requires the following:
# - source code
# - conan packages
@@ -215,21 +218,15 @@ jobs:
if: ${{ inputs.code_coverage }}
uses: ./.github/actions/code-coverage
- name: Verify version is expected
if: ${{ inputs.version != '' }}
env:
INPUT_VERSION: ${{ inputs.version }}
BUILD_TYPE: ${{ inputs.build_type }}
- name: Verify expected version
if: ${{ inputs.expected_version != '' }}
shell: bash
run: |
set -e
EXPECTED_VERSION="clio-${INPUT_VERSION}"
if [[ "${BUILD_TYPE}" == "Debug" ]]; then
EXPECTED_VERSION="${EXPECTED_VERSION}+DEBUG"
fi
actual_version=$(./build/clio_server --version | head -n 1)
if [[ "${actual_version}" != "${EXPECTED_VERSION}" ]]; then
echo "Expected version '${EXPECTED_VERSION}', but got '${actual_version}'"
EXPECTED_VERSION="clio-${{ inputs.expected_version }}"
actual_version=$(./build/clio_server --version)
if [[ "$actual_version" != "$EXPECTED_VERSION" ]]; then
echo "Expected version '$EXPECTED_VERSION', but got '$actual_version'"
exit 1
fi

View File

@@ -3,10 +3,10 @@ name: Make release
on:
workflow_call:
inputs:
delete_pattern:
description: "Pattern to delete previous releases"
overwrite_release:
description: "Overwrite the current release and tag"
required: true
type: string
type: boolean
prerelease:
description: "Create a prerelease"
@@ -38,15 +38,11 @@ on:
required: true
type: boolean
defaults:
run:
shell: bash
jobs:
release:
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}
@@ -55,75 +51,62 @@ jobs:
contents: write
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with:
enable_ccache: false
disable_ccache: true
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@v5
with:
path: release_artifacts
pattern: clio_server_*
- name: Prepare release artifacts
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with:
path: release_artifacts
pattern: clio_deb_package_*
- name: Create release notes
env:
RELEASE_HEADER: ${{ inputs.header }}
shell: bash
run: |
echo "# Release notes" > "${RUNNER_TEMP}/release_notes.md"
echo "" >> "${RUNNER_TEMP}/release_notes.md"
printf '%s\n' "${RELEASE_HEADER}" >> "${RUNNER_TEMP}/release_notes.md"
printf '%s\n' "${{ inputs.header }}" >> "${RUNNER_TEMP}/release_notes.md"
- name: Generate changelog
shell: bash
if: ${{ inputs.generate_changelog }}
run: |
LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)"
LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)"
BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)"
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc" >> "${RUNNER_TEMP}/release_notes.md"
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc"
cat CHANGELOG.md >> "${RUNNER_TEMP}/release_notes.md"
- name: Prepare release artifacts
shell: bash
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
- name: Upload release notes
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: release_notes_${{ inputs.version }}
path: "${RUNNER_TEMP}/release_notes.md"
- name: Remove previous release with a pattern
if: ${{ github.event_name != 'pull_request' && inputs.delete_pattern != '' }}
env:
DELETE_PATTERN: ${{ inputs.delete_pattern }}
- name: Remove current release and tag
if: ${{ github.event_name != 'pull_request' && inputs.overwrite_release }}
shell: bash
run: |
RELEASES_TO_DELETE=$(gh release list --limit 50 --repo "${GH_REPO}" | grep -E "${DELETE_PATTERN}" | awk -F'\t' '{print $3}' || true)
if [ -n "$RELEASES_TO_DELETE" ]; then
for RELEASE in $RELEASES_TO_DELETE; do
echo "Deleting release: $RELEASE"
gh release delete "$RELEASE" --repo "${GH_REPO}" --yes --cleanup-tag
done
fi
gh release delete ${{ inputs.version }} --yes || true
git push origin :${{ inputs.version }} || true
- name: Publish release
if: ${{ github.event_name != 'pull_request' }}
env:
RELEASE_VERSION: ${{ inputs.version }}
PRERELEASE_OPTION: ${{ inputs.prerelease && '--prerelease' || '' }}
RELEASE_TITLE: ${{ inputs.title }}
DRAFT_OPTION: ${{ inputs.draft && '--draft' || '' }}
shell: bash
run: |
gh release create "${RELEASE_VERSION}" \
${PRERELEASE_OPTION} \
--title "${RELEASE_TITLE}" \
gh release create "${{ inputs.version }}" \
${{ inputs.prerelease && '--prerelease' || '' }} \
--title "${{ inputs.title }}" \
--target "${GITHUB_SHA}" \
${DRAFT_OPTION} \
${{ inputs.draft && '--draft' || '' }} \
--notes-file "${RUNNER_TEMP}/release_notes.md" \
./release_artifacts/clio_*
./release_artifacts/clio_server*

View File

@@ -33,10 +33,6 @@ on:
required: true
type: boolean
defaults:
run:
shell: bash
jobs:
unit_tests:
name: Unit testing
@@ -47,22 +43,23 @@ jobs:
env:
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') }}
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || inputs.conan_profile == 'clang.asan' || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@v5
with:
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
- name: Make clio_tests executable
shell: bash
run: chmod +x ./clio_tests
- name: Run clio_tests (regular)
@@ -71,10 +68,11 @@ jobs:
- name: Run clio_tests (sanitizer errors ignored)
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
run: ./.github/scripts/execute-tests-under-sanitizer.sh ./clio_tests
run: ./.github/scripts/execute-tests-under-sanitizer ./clio_tests
- name: Check for sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
shell: bash
id: check_report
run: |
if ls .sanitizer-report/* 1> /dev/null 2>&1; then
@@ -85,7 +83,7 @@ jobs:
- name: Upload sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
uses: actions/upload-artifact@v4
with:
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: .sanitizer-report/*
@@ -124,19 +122,13 @@ jobs:
steps:
- name: Cleanup workspace
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
- name: Delete and start colima (macOS)
# This is a temporary workaround for colima issues on macOS runners
- name: Spin up scylladb
if: ${{ runner.os == 'macOS' }}
timeout-minutes: 3
run: |
colima delete --force
colima start
- name: Spin up scylladb (macOS)
if: ${{ runner.os == 'macOS' }}
timeout-minutes: 1
run: |
docker rm --force scylladb || true
docker run \
--detach \
--name scylladb \
@@ -148,15 +140,11 @@ jobs:
--memory 16G \
scylladb/scylla
- name: Wait for scylladb container to be healthy (macOS)
if: ${{ runner.os == 'macOS' }}
timeout-minutes: 1
run: |
until [ "$(docker inspect -f '{{.State.Health.Status}}' scylladb)" == "healthy" ]; do
sleep 1
sleep 5
done
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- uses: actions/download-artifact@v5
with:
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}

View File

@@ -6,29 +6,25 @@ on:
CODECOV_TOKEN:
required: true
defaults:
run:
shell: bash
jobs:
upload_report:
name: Upload report
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Download report artifact
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@v5
with:
name: coverage-report.xml
path: build
- name: Upload coverage report
if: ${{ hashFiles('build/coverage_report.xml') != '' }}
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
with:
files: build/coverage_report.xml
fail_ci_if_error: true

View File

@@ -15,7 +15,7 @@ on:
- ".github/actions/**"
- "!.github/actions/build-docker-image/**"
- "!.github/actions/create-issue/**"
- .github/scripts/execute-tests-under-sanitizer.sh
- .github/scripts/execute-tests-under-sanitizer
- CMakeLists.txt
- conanfile.py
@@ -44,13 +44,14 @@ jobs:
uses: ./.github/workflows/reusable-build-test.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
download_ccache: false
upload_ccache: false
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
build_type: ${{ matrix.build_type }}
static: false
run_unit_tests: true
# Currently, both gcc.tsan and clang.tsan unit tests hang
run_unit_tests: ${{ matrix.sanitizer_ext != '.tsan' }}
run_integration_tests: false
upload_clio_server: false
targets: clio_tests clio_integration_tests

View File

@@ -33,10 +33,6 @@ env:
GCC_MAJOR_VERSION: 15
GCC_VERSION: 15.2.0
defaults:
run:
shell: bash
jobs:
repo:
name: Calculate repo name
@@ -56,11 +52,11 @@ jobs:
needs: repo
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files: "docker/compilers/gcc/**"
@@ -94,11 +90,11 @@ jobs:
needs: repo
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/compilers/gcc/**"
@@ -132,16 +128,16 @@ jobs:
needs: [repo, gcc-amd64, gcc-arm64]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files: "docker/compilers/gcc/**"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }}
@@ -183,11 +179,11 @@ jobs:
needs: repo
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files: "docker/compilers/clang/**"
@@ -219,11 +215,11 @@ jobs:
needs: [repo, gcc-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files: "docker/tools/**"
@@ -250,11 +246,11 @@ jobs:
needs: [repo, gcc-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/tools/**"
@@ -281,16 +277,16 @@ jobs:
needs: [repo, tools-amd64, tools-arm64]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files: "docker/tools/**"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }}
@@ -316,7 +312,7 @@ jobs:
needs: [repo, tools-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- uses: ./.github/actions/build-docker-image
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -338,7 +334,7 @@ jobs:
needs: [repo, gcc-merge, clang, tools-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- uses: ./.github/actions/build-docker-image
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -22,7 +22,6 @@ on:
- .github/actions/conan/action.yml
- ".github/scripts/conan/**"
- "!.github/scripts/conan/regenerate_lockfile.sh"
- conanfile.py
- conan.lock
@@ -33,7 +32,6 @@ on:
- .github/actions/conan/action.yml
- ".github/scripts/conan/**"
- "!.github/scripts/conan/regenerate_lockfile.sh"
- conanfile.py
- conan.lock
@@ -42,17 +40,13 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Calculate conan matrix
id: set-matrix
@@ -75,15 +69,16 @@ jobs:
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/checkout@v4
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with:
enable_ccache: false
disable_ccache: true
- name: Setup conan on macOS
if: ${{ runner.os == 'macOS' }}
shell: bash
run: ./.github/scripts/conan/init.sh
- name: Show conan profile
@@ -104,6 +99,4 @@ jobs:
- name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
env:
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
run: conan upload "*" -r=xrplf --confirm ${FORCE_OPTION}
run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}

1
.gitignore vendored
View File

@@ -4,7 +4,6 @@
.build
.cache
.vscode
.zed
.python-version
.DS_Store
.sanitizer-report

View File

@@ -11,10 +11,7 @@
#
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
exclude: |
(?x)^(
docs/doxygen-awesome-theme/.*
)$
exclude: ^(docs/doxygen-awesome-theme/|conan\.lock$)
repos:
# `pre-commit sample-config` default hooks
@@ -29,12 +26,12 @@ repos:
# Autoformat: YAML, JSON, Markdown, etc.
- repo: https://github.com/rbubley/mirrors-prettier
rev: 14abee445aea04b39069c19b4bd54efff6775819 # frozen: v3.7.4
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
hooks:
- id: prettier
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: 76b3d32d3f4b965e1d6425253c59407420ae2c43 # frozen: v0.47.0
rev: 192ad822316c3a22fb3d3cc8aa6eafa0b8488360 # frozen: v0.45.0
hooks:
- id: markdownlint-fix
exclude: LICENSE.md
@@ -58,17 +55,6 @@ repos:
--ignore-words=pre-commit-hooks/codespell_ignore.txt,
]
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: 25.12.0
hooks:
- id: black
- repo: https://github.com/scop/pre-commit-shfmt
rev: 2a30809d16bc7a60d9b97353c797f42b510d3368 # frozen: v3.12.0-2
hooks:
- id: shfmt
args: ["-i", "4", "--write"]
# Running some C++ hooks before clang-format
# to ensure that the style is consistent.
- repo: local
@@ -94,7 +80,7 @@ repos:
language: script
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 75ca4ad908dc4a99f57921f29b7e6c1521e10b26 # frozen: v21.1.8
rev: 719856d56a62953b8d2839fb9e851f25c3cfeef8 # frozen: v21.1.2
hooks:
- id: clang-format
args: [--style=file]

View File

@@ -75,6 +75,10 @@ if (san)
endif ()
target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer)
target_compile_definitions(
clio_options INTERFACE $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
)
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
endif ()

View File

@@ -180,7 +180,6 @@ Existing maintainers can resign, or be subject to a vote for removal at the behe
- [kuznetsss](https://github.com/kuznetsss) (Ripple)
- [legleux](https://github.com/legleux) (Ripple)
- [PeterChen13579](https://github.com/PeterChen13579) (Ripple)
- [mathbunnyru](https://github.com/mathbunnyru) (Ripple)
### Honorable ex-Maintainers

View File

@@ -34,6 +34,7 @@ Below are some useful docs to learn more about Clio.
- [How to configure Clio and rippled](./docs/configure-clio.md)
- [How to run Clio](./docs/run-clio.md)
- [Logging](./docs/logging.md)
- [Troubleshooting guide](./docs/trouble_shooting.md)
**General reference material:**

View File

@@ -9,12 +9,10 @@ target_sources(
util/async/ExecutionContextBenchmarks.cpp
# Logger
util/log/LoggerBenchmark.cpp
# WorkQueue
rpc/WorkQueueBenchmarks.cpp
)
include(deps/gbench)
target_include_directories(clio_benchmark PRIVATE .)
target_link_libraries(clio_benchmark PRIVATE clio_rpc clio_util benchmark::benchmark_main spdlog::spdlog)
target_link_libraries(clio_benchmark PUBLIC clio_util benchmark::benchmark_main spdlog::spdlog)
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,145 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "rpc/WorkQueue.hpp"
#include "util/Assert.hpp"
#include "util/config/Array.hpp"
#include "util/config/ConfigConstraints.hpp"
#include "util/config/ConfigDefinition.hpp"
#include "util/config/ConfigValue.hpp"
#include "util/config/Types.hpp"
#include "util/log/Logger.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <benchmark/benchmark.h>
#include <boost/asio/steady_timer.hpp>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <mutex>
#include <thread>
#include <vector>
using namespace rpc;
using namespace util::config;
namespace {
auto const kCONFIG = ClioConfigDefinition{
{"prometheus.compress_reply", ConfigValue{ConfigType::Boolean}.defaultValue(true)},
{"prometheus.enabled", ConfigValue{ConfigType::Boolean}.defaultValue(true)},
{"log.channels.[].channel", Array{ConfigValue{ConfigType::String}}},
{"log.channels.[].level", Array{ConfigValue{ConfigType::String}}},
{"log.level", ConfigValue{ConfigType::String}.defaultValue("info")},
{"log.format", ConfigValue{ConfigType::String}.defaultValue(R"(%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v)")},
{"log.is_async", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
{"log.enable_console", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
{"log.directory", ConfigValue{ConfigType::String}.optional()},
{"log.rotation_size", ConfigValue{ConfigType::Integer}.defaultValue(2048).withConstraint(gValidateUint32)},
{"log.directory_max_files", ConfigValue{ConfigType::Integer}.defaultValue(25).withConstraint(gValidateUint32)},
{"log.tag_style", ConfigValue{ConfigType::String}.defaultValue("none")},
};
// this should be a fixture but it did not work with Args very well
void
init()
{
static std::once_flag kONCE;
std::call_once(kONCE, [] {
PrometheusService::init(kCONFIG);
(void)util::LogService::init(kCONFIG);
});
}
} // namespace
static void
benchmarkWorkQueue(benchmark::State& state)
{
init();
auto const wqThreads = static_cast<uint32_t>(state.range(0));
auto const maxQueueSize = static_cast<uint32_t>(state.range(1));
auto const clientThreads = static_cast<uint32_t>(state.range(2));
auto const itemsPerClient = static_cast<uint32_t>(state.range(3));
auto const clientProcessingMs = static_cast<uint32_t>(state.range(4));
for (auto _ : state) {
std::atomic_size_t totalExecuted = 0uz;
std::atomic_size_t totalQueued = 0uz;
state.PauseTiming();
WorkQueue queue(wqThreads, maxQueueSize);
state.ResumeTiming();
std::vector<std::thread> threads;
threads.reserve(clientThreads);
for (auto t = 0uz; t < clientThreads; ++t) {
threads.emplace_back([&] {
for (auto i = 0uz; i < itemsPerClient; ++i) {
totalQueued += static_cast<std::size_t>(queue.postCoro(
[&clientProcessingMs, &totalExecuted](auto yield) {
++totalExecuted;
boost::asio::steady_timer timer(
yield.get_executor(), std::chrono::milliseconds{clientProcessingMs}
);
timer.async_wait(yield);
std::this_thread::sleep_for(std::chrono::microseconds{10});
},
/* isWhiteListed = */ false
));
}
});
}
for (auto& t : threads)
t.join();
queue.stop();
ASSERT(totalExecuted == totalQueued, "Totals don't match");
ASSERT(totalQueued <= itemsPerClient * clientThreads, "Queued more than requested");
if (maxQueueSize == 0) {
ASSERT(totalQueued == itemsPerClient * clientThreads, "Queued exactly the expected amount");
} else {
ASSERT(totalQueued >= std::min(maxQueueSize, itemsPerClient * clientThreads), "Queued less than expected");
}
}
}
// Usage example:
/*
./clio_benchmark \
--benchmark_repetitions=10 \
--benchmark_display_aggregates_only=true \
--benchmark_min_time=1x \
--benchmark_filter="WorkQueue"
*/
// TODO: figure out what happens on 1 thread
BENCHMARK(benchmarkWorkQueue)
->ArgsProduct({{2, 4, 8, 16}, {0, 5'000}, {4, 8, 16}, {1'000, 10'000}, {10, 100, 250}})
->Unit(benchmark::kMillisecond);

View File

@@ -49,6 +49,8 @@ postprocessors = [
]
# render body even when there are no releases to process
# render_always = true
# output file path
output = "CHANGELOG.md"
[git]
# parse the commits based on https://www.conventionalcommits.org

View File

@@ -1,42 +1,42 @@
find_package(Git REQUIRED)
if (DEFINED ENV{GITHUB_BRANCH_NAME})
set(GIT_BUILD_BRANCH $ENV{GITHUB_BRANCH_NAME})
set(GIT_COMMIT_HASH $ENV{GITHUB_HEAD_SHA})
else ()
set(GIT_COMMAND branch --show-current)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_BUILD_BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(GIT_COMMAND rev-parse HEAD)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
endif ()
set(GIT_COMMAND describe --tags --exact-match)
execute_process(
COMMAND date +%Y%m%d%H%M%S WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BUILD_DATE
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE TAG
RESULT_VARIABLE RC
ERROR_VARIABLE ERR
OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE
)
message(STATUS "Git branch: ${GIT_BUILD_BRANCH}")
message(STATUS "Git commit hash: ${GIT_COMMIT_HASH}")
message(STATUS "Build date: ${BUILD_DATE}")
if (DEFINED ENV{FORCE_CLIO_VERSION} AND NOT "$ENV{FORCE_CLIO_VERSION}" STREQUAL "")
message(STATUS "Using explicitly provided '${FORCE_CLIO_VERSION}' as Clio version")
set(CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
set(DOC_CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
if (RC EQUAL 0)
message(STATUS "Found tag '${TAG}' in git. Will use it as Clio version")
set(CLIO_VERSION "${TAG}")
set(DOC_CLIO_VERSION "${TAG}")
else ()
message(STATUS "Using 'YYYYMMDDHMS-<branch>-<git short rev>' as Clio version")
message(STATUS "Error finding tag in git: ${ERR}")
message(STATUS "Will use 'YYYYMMDDHMS-<branch>-<git-rev>' as Clio version")
string(SUBSTRING ${GIT_COMMIT_HASH} 0 7 GIT_COMMIT_HASH_SHORT)
set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE DATE
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(CLIO_VERSION "${BUILD_DATE}-${GIT_BUILD_BRANCH}-${GIT_COMMIT_HASH_SHORT}")
set(GIT_COMMAND branch --show-current)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(GIT_COMMAND rev-parse --short HEAD)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE REV
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(CLIO_VERSION "${DATE}-${BRANCH}-${REV}")
set(DOC_CLIO_VERSION "develop")
endif ()

View File

@@ -1,17 +0,0 @@
[Unit]
Description=Clio XRPL API server
Documentation=https://github.com/XRPLF/clio.git
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json
Restart=on-failure
User=clio
Group=clio
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -11,6 +11,3 @@ file(READ docs/examples/config/example-config.json config)
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
configure_file("${CMAKE_SOURCE_DIR}/cmake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)

View File

@@ -10,36 +10,37 @@ CLIO_BIN="$CLIO_PREFIX/bin/${CLIO_EXECUTABLE}"
CLIO_CONFIG="$CLIO_PREFIX/etc/config.json"
case "$1" in
configure)
if ! id -u "$USER_NAME" >/dev/null 2>&1; then
# Users who should not have a home directory should have their home directory set to /nonexistent
# https://www.debian.org/doc/debian-policy/ch-opersys.html#non-existent-home-directories
useradd \
--system \
--home-dir /nonexistent \
--no-create-home \
--shell /usr/sbin/nologin \
--comment "system user for ${CLIO_EXECUTABLE}" \
--user-group \
${USER_NAME}
fi
configure)
if ! id -u "$USER_NAME" >/dev/null 2>&1; then
# Users who should not have a home directory should have their home directory set to /nonexistent
# https://www.debian.org/doc/debian-policy/ch-opersys.html#non-existent-home-directories
useradd \
--system \
--home-dir /nonexistent \
--no-create-home \
--shell /usr/sbin/nologin \
--comment "system user for ${CLIO_EXECUTABLE}" \
--user-group \
${USER_NAME}
fi
install -d -o "$USER_NAME" -g "$GROUP_NAME" /var/log/clio
install -d -o "$USER_NAME" -g "$GROUP_NAME" /var/log/clio
if [ -f "$CLIO_CONFIG" ]; then
chown "$USER_NAME:$GROUP_NAME" "$CLIO_CONFIG"
fi
if [ -f "$CLIO_CONFIG" ]; then
chown "$USER_NAME:$GROUP_NAME" "$CLIO_CONFIG"
fi
chown -R "$USER_NAME:$GROUP_NAME" "$CLIO_PREFIX"
chown -R "$USER_NAME:$GROUP_NAME" "$CLIO_PREFIX"
ln -sf "$CLIO_BIN" "/usr/bin/${CLIO_EXECUTABLE}"
ln -sf "$CLIO_BIN" "/usr/bin/${CLIO_EXECUTABLE}"
;;
abort-upgrade | abort-remove | abort-deconfigure) ;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
exit 0

View File

@@ -1,52 +1,51 @@
{
"version": "0.5",
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1765850149.987",
"xrpl/3.0.0#534d3f65a336109eee929b88962bae4e%1765375071.547",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1765850149.926",
"spdlog/1.17.0#bcbaaf7147bda6ad24ffbd1ac3d7142c%1767636069.964",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1765850149.46",
"re2/20230301#ca3b241baec15bd31ea9187150e0b333%1765850148.103",
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
"xrpl/2.6.1#973af2bf9631f239941dd9f5a100bb84%1759275059.342",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
"spdlog/1.15.3#3ca0e9e6b83af4d0151e26541d140c86%1754401846.61",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656",
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1765850143.957",
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
"libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1765842973.492",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03",
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385",
"gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1755784855.585",
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1754325000.842",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
"fmt/12.1.0#50abab23274d56bb8f42c94b3b9a40c7%1763984116.926",
"fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1754324999.086",
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
"date/3.0.4#862e11e80030356b53c2c38599ceb32b%1765850143.772",
"cassandra-cpp-driver/2.17.0#bd3934138689482102c265d01288a316%1764175359.611",
"c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1765850144.336",
"bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1765850143.837",
"boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1%1764175359.61",
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1754324997.363",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368%1754325043.336",
"benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1754578869.672",
"abseil/20230802.1#90ba607d4ee8fb5fb157c3db540671fc%1764175359.429"
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
],
"build_requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075",
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456",
"cmake/4.2.0#ae0a44f44a1ef9ab68fd4b3e9a1f8671%1765850153.937",
"cmake/3.31.10#313d16a1aa16bbdb2ca0792467214b76%1765850153.479",
"b2/5.3.3#107c15377719889654eb9a162a673975%1765850144.355"
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89",
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28"
],
"python_requires": [],
"overrides": {
"boost/1.83.0": [
null,
"boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1"
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368"
],
"protobuf/3.21.12": [
null,
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88"
"protobuf/3.21.12"
],
"lz4/1.9.4": [
"lz4/1.10.0"

View File

@@ -3,60 +3,62 @@ from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
class ClioConan(ConanFile):
name = "clio"
license = "ISC"
author = "Alex Kremer <akremer@ripple.com>, John Freeman <jfreeman@ripple.com>, Ayaz Salikhov <asalikhov@ripple.com>"
url = "https://github.com/xrplf/clio"
description = "Clio RPC server"
settings = "os", "compiler", "build_type", "arch"
name = 'clio'
license = 'ISC'
author = 'Alex Kremer <akremer@ripple.com>, John Freeman <jfreeman@ripple.com>, Ayaz Salikhov <asalikhov@ripple.com>'
url = 'https://github.com/xrplf/clio'
description = 'Clio RPC server'
settings = 'os', 'compiler', 'build_type', 'arch'
options = {}
requires = [
"boost/1.83.0",
"cassandra-cpp-driver/2.17.0",
"fmt/12.1.0",
"grpc/1.50.1",
"libbacktrace/cci.20210118",
"openssl/1.1.1w",
"protobuf/3.21.12",
"spdlog/1.17.0",
"xrpl/3.0.0",
"zlib/1.3.1",
'boost/1.83.0',
'cassandra-cpp-driver/2.17.0',
'fmt/11.2.0',
'protobuf/3.21.12',
'grpc/1.50.1',
'openssl/1.1.1w',
'xrpl/2.6.1',
'zlib/1.3.1',
'libbacktrace/cci.20210118',
'spdlog/1.15.3',
]
default_options = {
"cassandra-cpp-driver/*:shared": False,
"date/*:header_only": True,
"grpc/*:secure": True,
"grpc/*:shared": False,
"gtest/*:no_main": True,
"libpq/*:shared": False,
"lz4/*:shared": False,
"openssl/*:shared": False,
"protobuf/*:shared": False,
"protobuf/*:with_zlib": True,
"snappy/*:shared": False,
"xrpl/*:rocksdb": False,
"xrpl/*:tests": False,
'xrpl/*:tests': False,
'xrpl/*:rocksdb': False,
'cassandra-cpp-driver/*:shared': False,
'date/*:header_only': True,
'grpc/*:shared': False,
'grpc/*:secure': True,
'libpq/*:shared': False,
'lz4/*:shared': False,
'openssl/*:shared': False,
'protobuf/*:shared': False,
'protobuf/*:with_zlib': True,
'snappy/*:shared': False,
'gtest/*:no_main': True,
}
exports_sources = ("CMakeLists.txt", "cmake/*", "src/*")
exports_sources = (
'CMakeLists.txt', 'cmake/*', 'src/*'
)
def requirements(self):
self.requires("gtest/1.17.0")
self.requires("benchmark/1.9.4")
self.requires('gtest/1.14.0')
self.requires('benchmark/1.9.4')
def configure(self):
if self.settings.compiler == "apple-clang":
self.options["boost"].visibility = "global"
if self.settings.compiler == 'apple-clang':
self.options['boost'].visibility = 'global'
def layout(self):
cmake_layout(self)
# Fix this setting to follow the default introduced in Conan 1.48
# to align with our build instructions.
self.folders.generators = "build/generators"
self.folders.generators = 'build/generators'
generators = "CMakeDeps"
generators = 'CMakeDeps'
def generate(self):
tc = CMakeToolchain(self)

View File

@@ -36,6 +36,7 @@ RUN apt-get update \
libmpfr-dev \
libncurses-dev \
make \
ninja-build \
wget \
zip \
&& apt-get clean \
@@ -54,11 +55,8 @@ RUN pip install -q --no-cache-dir \
# lxml 6.0.0 is not compatible with our image
'lxml<6.0.0' \
cmake \
conan==2.24.0 \
gcovr \
# We're adding pre-commit to this image as well,
# because clang-tidy workflow requires it
pre-commit
conan==2.20.1 \
gcovr
# Install LLVM tools
ARG LLVM_TOOLS_VERSION=20
@@ -106,7 +104,6 @@ COPY --from=clio-tools \
/usr/local/bin/git-cliff \
/usr/local/bin/gh \
/usr/local/bin/gdb \
/usr/local/bin/ninja \
/usr/local/bin/
WORKDIR /root

View File

@@ -5,18 +5,17 @@ It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but c
The image is based on Ubuntu 20.04 and contains:
- ccache 4.12.2
- ccache 4.11.3
- Clang 19
- ClangBuildAnalyzer 1.6.0
- Conan 2.24.0
- Doxygen 1.16.1
- Conan 2.20.1
- Doxygen 1.14
- GCC 15.2.0
- GDB 17.1
- gh 2.83.2
- git-cliff 2.11.0
- mold 2.40.4
- Ninja 1.13.2
- Python 3.8
- GDB 16.3
- gh 2.74
- git-cliff 2.9.1
- mold 2.40.1
- Python 3.13
- and some other useful tools
Conan is set up to build Clio without any additional steps.

View File

@@ -3,13 +3,6 @@
{% set sanitizer_opt_map = {"asan": "address", "tsan": "thread", "ubsan": "undefined"} %}
{% set sanitizer = sanitizer_opt_map[sani] %}
{% set sanitizer_b2_flags_map = {
"address": "context-impl=ucontext address-sanitizer=norecover",
"thread": "context-impl=ucontext thread-sanitizer=norecover",
"undefined": "undefined-sanitizer=norecover"
} %}
{% set sanitizer_b2_flags_str = sanitizer_b2_flags_map[sanitizer] %}
{% set sanitizer_build_flags_str = "-fsanitize=" ~ sanitizer ~ " -g -O1 -fno-omit-frame-pointer" %}
{% set sanitizer_build_flags = sanitizer_build_flags_str.split(' ') %}
{% set sanitizer_link_flags_str = "-fsanitize=" ~ sanitizer %}
@@ -18,8 +11,7 @@
include({{ compiler }})
[options]
boost/*:extra_b2_flags="{{ sanitizer_b2_flags_str }}"
boost/*:without_context=False
boost/*:extra_b2_flags="cxxflags=\"{{ sanitizer_build_flags_str }}\" linkflags=\"{{ sanitizer_link_flags_str }}\""
boost/*:without_stacktrace=True
[conf]
@@ -28,10 +20,4 @@ tools.build:cxxflags+={{ sanitizer_build_flags }}
tools.build:exelinkflags+={{ sanitizer_link_flags }}
tools.build:sharedlinkflags+={{ sanitizer_link_flags }}
{% if sanitizer == "address" %}
tools.build:defines+=["BOOST_USE_ASAN", "BOOST_USE_UCONTEXT"]
{% elif sanitizer == "thread" %}
tools.build:defines+=["BOOST_USE_TSAN", "BOOST_USE_UCONTEXT"]
{% endif %}
tools.info.package_id:confs+=["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags", "tools.build:defines"]
tools.info.package_id:confs+=["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]

View File

@@ -8,7 +8,7 @@ ARG UBUNTU_VERSION
ARG GCC_MAJOR_VERSION
ARG BUILD_VERSION=0
ARG BUILD_VERSION=1
ARG DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
@@ -34,7 +34,6 @@ RUN wget --progress=dot:giga https://gcc.gnu.org/pub/gcc/releases/gcc-$GCC_VERSI
WORKDIR /gcc-$GCC_VERSION
RUN ./contrib/download_prerequisites
# hadolint ignore=DL3059
RUN mkdir /gcc-build
WORKDIR /gcc-build
RUN /gcc-$GCC_VERSION/configure \

View File

@@ -1,6 +1,6 @@
services:
clio_develop:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
volumes:
- clio_develop_conan_data:/root/.conan2/p
- clio_develop_ccache:/root/.ccache

View File

@@ -2,7 +2,7 @@
script_dir=$(dirname $0)
pushd $script_dir >/dev/null
pushd $script_dir > /dev/null
function start_container {
if [ -z "$(docker ps -q -f name=clio_develop)" ]; then
@@ -41,26 +41,21 @@ EOF
}
case $1 in
-h | --help)
print_help
;;
-h|--help)
print_help ;;
-t | --terminal)
open_terminal
;;
-t|--terminal)
open_terminal ;;
-s | --stop)
stop_container
;;
-s|--stop)
stop_container ;;
-*)
echo "Unknown option: $1"
print_help
;;
-*)
echo "Unknown option: $1"
print_help ;;
*)
run "$@"
;;
*)
run "$@" ;;
esac
popd >/dev/null
popd > /dev/null

View File

@@ -8,10 +8,11 @@ ARG TARGETARCH
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ARG BUILD_VERSION=0
ARG BUILD_VERSION=2
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
ninja-build \
python3 \
python3-pip \
software-properties-common \
@@ -23,16 +24,7 @@ RUN apt-get update \
WORKDIR /tmp
ARG NINJA_VERSION=1.13.2
RUN wget --progress=dot:giga "https://github.com/ninja-build/ninja/archive/refs/tags/v${NINJA_VERSION}.tar.gz" \
&& tar xf "v${NINJA_VERSION}.tar.gz" \
&& cd "ninja-${NINJA_VERSION}" \
&& ./configure.py --bootstrap \
&& mv ninja /usr/local/bin/ninja \
&& rm -rf /tmp/* /var/tmp/*
ARG MOLD_VERSION=2.40.4
ARG MOLD_VERSION=2.40.1
RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v${MOLD_VERSION}.tar.gz" \
&& tar xf "v${MOLD_VERSION}.tar.gz" \
&& cd "mold-${MOLD_VERSION}" \
@@ -42,7 +34,7 @@ RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v
&& ninja install \
&& rm -rf /tmp/* /var/tmp/*
ARG CCACHE_VERSION=4.12.2
ARG CCACHE_VERSION=4.11.3
RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
&& cd "ccache-${CCACHE_VERSION}" \
@@ -59,7 +51,7 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG DOXYGEN_VERSION=1.16.1
ARG DOXYGEN_VERSION=1.14.0
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
&& cd "doxygen-${DOXYGEN_VERSION}" \
@@ -79,13 +71,13 @@ RUN wget --progress=dot:giga "https://github.com/aras-p/ClangBuildAnalyzer/archi
&& ninja install \
&& rm -rf /tmp/* /var/tmp/*
ARG GIT_CLIFF_VERSION=2.11.0
ARG GIT_CLIFF_VERSION=2.9.1
RUN wget --progress=dot:giga "https://github.com/orhun/git-cliff/releases/download/v${GIT_CLIFF_VERSION}/git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz" \
&& tar xf git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz \
&& mv git-cliff-${GIT_CLIFF_VERSION}/git-cliff /usr/local/bin/git-cliff \
&& rm -rf /tmp/* /var/tmp/*
ARG GH_VERSION=2.83.2
ARG GH_VERSION=2.74.0
RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz" \
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \
@@ -100,7 +92,7 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG GDB_VERSION=17.1
ARG GDB_VERSION=16.3
RUN wget --progress=dot:giga "https://sourceware.org/pub/gdb/releases/gdb-${GDB_VERSION}.tar.gz" \
&& tar xf "gdb-${GDB_VERSION}.tar.gz" \
&& cd "gdb-${GDB_VERSION}" \

View File

@@ -97,14 +97,30 @@ Now you should be able to download the prebuilt dependencies (including `xrpl` p
#### Conan lockfile
To achieve reproducible dependencies, we use a [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
The `conan.lock` file in the repository contains a "snapshot" of the current dependencies.
It is implicitly used when running `conan` commands, you don't need to specify it.
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
To update a lockfile, run from the repository root: `./.github/scripts/conan/regenerate_lockfile.sh`
> [!NOTE]
> Conan uses local cache by default when creating a lockfile.
>
> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile.
To create a new lockfile, run the following commands in the repository root:
```bash
conan remove '*' --confirm
rm conan.lock
# This ensure that xrplf remote is the first to be consulted
conan remote add --force --index 0 xrplf https://conan.ripplex.io
conan lock create .
```
> [!NOTE]
> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all <PROFILE>`.
## Building Clio
@@ -175,7 +191,7 @@ Open the `index.html` file in your browser to see the documentation pages.
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
```sh
docker run -it ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
docker run -it ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
git clone https://github.com/XRPLF/clio
cd clio
```

View File

@@ -293,7 +293,7 @@ This document provides a list of all available Clio configuration properties in
- **Required**: True
- **Type**: int
- **Default value**: `1000`
- **Default value**: `1`
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: The maximum size of the server's request queue. If set to `0`, this means there is no queue size limit.
@@ -391,7 +391,7 @@ This document provides a list of all available Clio configuration properties in
- **Type**: double
- **Default value**: `10`
- **Constraints**: The value must be a positive double number.
- **Description**: The number of seconds the server waits to shutdown gracefully. If Clio does not shutdown gracefully after the specified value, it will be killed instead.
- **Description**: The number of milliseconds the server waits to shutdown gracefully. If Clio does not shutdown gracefully after the specified value, it will be killed instead.
### cache.num_diffs
@@ -441,30 +441,6 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The value must be one of the following: `sync`, `async`, `none`.
- **Description**: The strategy used for Cache loading.
### cache.file.path
- **Required**: False
- **Type**: string
- **Default value**: None
- **Constraints**: None
- **Description**: The path to a file where cache will be saved to on shutdown and loaded from on startup. If the file couldn't be read Clio will load cache as usual (from DB or from rippled).
### cache.file.max_sequence_age
- **Required**: True
- **Type**: int
- **Default value**: `5000`
- **Constraints**: None
- **Description**: Max allowed difference between the latest sequence in DB and in cache file. If the cache file is too old (contains too low latest sequence) Clio will reject using it.
### cache.file.async_save
- **Required**: True
- **Type**: boolean
- **Default value**: `False`
- **Constraints**: None
- **Description**: When false, Clio waits for cache saving to finish before shutting down. When true, cache saving runs in parallel with other shutdown operations.
### log.channels.[].channel
- **Required**: False

View File

@@ -61,7 +61,7 @@
"ip": "0.0.0.0",
"port": 51233,
// Max number of requests to queue up before rejecting further requests.
// Defaults to 1000 (use 0 to make the queue unbound).
// Defaults to 0, which disables the limit.
"max_queue_size": 500,
// If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash
// If matches, the request will be considered as admin request
@@ -137,11 +137,7 @@
// "num_cursors_from_account": 3200, // Read the cursors from the account table until we have enough cursors to partition the ledger to load concurrently.
"num_markers": 48, // The number of markers is the number of coroutines to load the cache concurrently.
"page_fetch_size": 512, // The number of rows to load for each page.
"load": "async", // "sync" to load cache synchronously or "async" to load cache asynchronously or "none"/"no" to turn off the cache.
"file": {
"path": "./cache.bin",
"max_sequence_age": 5000
}
"load": "async" // "sync" to load cache synchronously or "async" to load cache asynchronously or "none"/"no" to turn off the cache.
},
"prometheus": {
"enabled": true,

View File

@@ -45,7 +45,7 @@ if [[ "1.14.0" > "$version" ]]; then
ERROR
-----------------------------------------------------------------------------
A minimum of version 1.14 of $(which doxygen) is required.
A minimum of version 1.14 of `which doxygen` is required.
Your version is $version. Please upgrade it.
Your changes may fail CI checks.
@@ -55,26 +55,26 @@ EOF
exit 0
fi
mkdir -p ${DOCDIR} >/dev/null 2>&1
pushd ${DOCDIR} >/dev/null 2>&1
mkdir -p ${DOCDIR} > /dev/null 2>&1
pushd ${DOCDIR} > /dev/null 2>&1
cat ${ROOT}/docs/Doxyfile |
sed \
-e "s/\${LINT}/YES/" \
-e "s/\${WARN_AS_ERROR}/NO/" \
-e "s!\${SOURCE}!${ROOT}!" \
-e "s/\${USE_DOT}/NO/" \
-e "s/\${EXCLUDES}/impl/" |
${DOXYGEN} - 2>${TMPFILE} 1>/dev/null
cat ${ROOT}/docs/Doxyfile | \
sed \
-e "s/\${LINT}/YES/" \
-e "s/\${WARN_AS_ERROR}/NO/" \
-e "s!\${SOURCE}!${ROOT}!" \
-e "s/\${USE_DOT}/NO/" \
-e "s/\${EXCLUDES}/impl/" \
| ${DOXYGEN} - 2> ${TMPFILE} 1> /dev/null
# We don't want to check for default values and typedefs as well as for member variables
OUT=$(cat ${TMPFILE} |
grep -v "=default" |
grep -v "\(variable\)" |
grep -v "\(typedef\)")
OUT=$(cat ${TMPFILE} \
| grep -v "=default" \
| grep -v "\(variable\)" \
| grep -v "\(typedef\)")
rm -rf ${TMPFILE} >/dev/null 2>&1
popd >/dev/null 2>&1
rm -rf ${TMPFILE} > /dev/null 2>&1
popd > /dev/null 2>&1
if [[ ! -z "$OUT" ]]; then
cat <<EOF

View File

@@ -23,10 +23,10 @@ fix_includes() {
file_path_fixed="${file_path}.tmp.fixed"
# Make all includes to be <...> style
sed -E 's|#include "(.*)"|#include <\1>|g' "$file_path" >"$file_path_all_global"
sed -E 's|#include "(.*)"|#include <\1>|g' "$file_path" > "$file_path_all_global"
# Make local includes to be "..." style
sed -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g" "$file_path_all_global" >"$file_path_fixed"
sed -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g" "$file_path_all_global" > "$file_path_fixed"
rm "$file_path_all_global"
# Check if the temporary file is different from the original file

View File

@@ -4,6 +4,7 @@ import argparse
import re
from pathlib import Path
PATTERN = r'R"JSON\((.*?)\)JSON"'
@@ -39,22 +40,16 @@ def fix_colon_spacing(cpp_content: str) -> str:
raw_json = match.group(1)
raw_json = re.sub(r'":\n\s*(\[|\{)', r'": \1', raw_json)
return f'R"JSON({raw_json})JSON"'
return re.sub(PATTERN, replace_json, cpp_content, flags=re.DOTALL)
def fix_indentation(cpp_content: str) -> str:
if "JSON(" not in cpp_content:
return cpp_content
lines = cpp_content.splitlines()
ends_with_newline = cpp_content.endswith("\n")
def find_indentation(line: str) -> int:
return len(line) - len(line.lstrip())
for line_num, (line, next_line) in enumerate(zip(lines[:-1], lines[1:])):
for (line_num, (line, next_line)) in enumerate(zip(lines[:-1], lines[1:])):
if "JSON(" in line and ")JSON" not in line:
indent = find_indentation(line)
next_indent = find_indentation(next_line)
@@ -69,17 +64,9 @@ def fix_indentation(cpp_content: str) -> str:
if ")JSON" in lines[i]:
lines[i] = " " * indent + lines[i].lstrip()
break
lines[i] = (
lines[i][by_how_much:]
if by_how_much > 0
else " " * (-by_how_much) + lines[i]
)
lines[i] = lines[i][by_how_much:] if by_how_much > 0 else " " * (-by_how_much) + lines[i]
result = "\n".join(lines)
if ends_with_newline:
result += "\n"
return result
return "\n".join(lines) + "\n"
def process_file(file_path: Path, dry_run: bool) -> bool:

View File

@@ -4,7 +4,7 @@
#
set -e -o pipefail
if ! command -v gofmt &>/dev/null; then
if ! command -v gofmt &> /dev/null ; then
echo "gofmt not installed or available in the PATH" >&2
exit 1
fi

View File

@@ -1,4 +1,5 @@
#!/bin/bash
#!/bin/sh
# git for-each-ref refs/tags # see which tags are annotated and which are lightweight. Annotated tags are "tag" objects.
# # Set these so your commits and tags are always signed
@@ -6,7 +7,7 @@
# git config tag.gpgsign true
verify_commit_signed() {
if git verify-commit HEAD &>/dev/null; then
if git verify-commit HEAD &> /dev/null; then
:
# echo "HEAD commit seems signed..."
else
@@ -16,7 +17,7 @@ verify_commit_signed() {
}
verify_tag() {
if git describe --exact-match --tags HEAD &>/dev/null; then
if git describe --exact-match --tags HEAD &> /dev/null; then
: # You might be ok to push
# echo "Tag is annotated."
return 0
@@ -27,7 +28,7 @@ verify_tag() {
}
verify_tag_signed() {
if git verify-tag "$version" &>/dev/null; then
if git verify-tag "$version" &> /dev/null ; then
: # ok, I guess we'll let you push
# echo "Tag appears signed"
return 0
@@ -39,11 +40,11 @@ verify_tag_signed() {
}
# Check some things if we're pushing a branch called "release/"
if echo "$PRE_COMMIT_REMOTE_BRANCH" | grep ^refs\/heads\/release\/ &>/dev/null; then
if echo "$PRE_COMMIT_REMOTE_BRANCH" | grep ^refs\/heads\/release\/ &> /dev/null ; then
version=$(git tag --points-at HEAD)
echo "Looks like you're trying to push a $version release..."
echo "Making sure you've signed and tagged it."
if verify_commit_signed && verify_tag && verify_tag_signed; then
if verify_commit_signed && verify_tag && verify_tag_signed ; then
: # Ok, I guess you can push
else
exit 1

View File

@@ -2,6 +2,7 @@ add_subdirectory(util)
add_subdirectory(data)
add_subdirectory(cluster)
add_subdirectory(etl)
add_subdirectory(etlng)
add_subdirectory(feed)
add_subdirectory(rpc)
add_subdirectory(web)

View File

@@ -5,9 +5,10 @@ target_link_libraries(
clio_app
PUBLIC clio_cluster
clio_etl
clio_etlng
clio_feed
clio_migration
clio_rpc
clio_web
clio_rpc
clio_migration
PRIVATE Boost::program_options
)

View File

@@ -77,10 +77,7 @@ CliArgs::parse(int argc, char const* argv[])
}
if (parsed.contains("version")) {
std::cout << util::build::getClioFullVersionString() << '\n'
<< "Git commit hash: " << util::build::getGitCommitHash() << '\n'
<< "Git build branch: " << util::build::getGitBuildBranch() << '\n'
<< "Build date: " << util::build::getBuildDate() << '\n';
std::cout << util::build::getClioFullVersionString() << '\n';
return Action{Action::Exit{EXIT_SUCCESS}};
}

View File

@@ -25,12 +25,11 @@
#include "data/AmendmentCenter.hpp"
#include "data/BackendFactory.hpp"
#include "data/LedgerCache.hpp"
#include "data/LedgerCacheSaver.hpp"
#include "etl/ETLService.hpp"
#include "etl/LoadBalancer.hpp"
#include "etl/NetworkValidatedLedgers.hpp"
#include "etl/SystemState.hpp"
#include "etl/WriterState.hpp"
#include "etlng/LoadBalancer.hpp"
#include "etlng/LoadBalancerInterface.hpp"
#include "feed/SubscriptionManager.hpp"
#include "migration/MigrationInspectorFactory.hpp"
#include "rpc/Counters.hpp"
@@ -58,7 +57,6 @@
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include <thread>
#include <utility>
#include <vector>
@@ -93,7 +91,6 @@ ClioApplication::ClioApplication(util::config::ClioConfigDefinition const& confi
{
LOG(util::LogService::info()) << "Clio version: " << util::build::getClioFullVersionString();
signalsHandler_.subscribeToStop([this]() { appStopper_.stop(); });
appStopper_.setOnComplete([this]() { signalsHandler_.notifyGracefulShutdownComplete(); });
}
int
@@ -102,32 +99,25 @@ ClioApplication::run(bool const useNgWebServer)
auto const threads = config_.get<uint16_t>("io_threads");
LOG(util::LogService::info()) << "Number of io threads = " << threads;
// Similarly we need a context to run ETL on
// In the future we can remove the raw ioc and use ctx instead
// This context should be above ioc because its reference is getting into tasks inside ioc
util::async::CoroExecutionContext ctx{threads};
// IO context to handle all incoming requests, as well as other things.
// This is not the only io context in the application.
boost::asio::io_context ioc{threads};
// Similarly we need a context to run ETLng on
// In the future we can remove the raw ioc and use ctx instead
util::async::CoroExecutionContext ctx{threads};
// Rate limiter, to prevent abuse
auto whitelistHandler = web::dosguard::WhitelistHandler{config_};
auto const dosguardWeights = web::dosguard::Weights::make(config_);
auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler, dosguardWeights};
auto sweepHandler = web::dosguard::IntervalSweepHandler{config_, ioc, dosGuard};
auto cache = data::LedgerCache{};
auto cacheSaver = data::LedgerCacheSaver{config_, cache};
// Interface to the database
auto backend = data::makeBackend(config_, cache);
auto systemState = etl::SystemState::makeSystemState(config_);
cluster::ClusterCommunicationService clusterCommunicationService{
backend, std::make_unique<etl::WriterState>(systemState)
};
cluster::ClusterCommunicationService clusterCommunicationService{backend};
clusterCommunicationService.run();
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
@@ -152,14 +142,20 @@ ClioApplication::run(bool const useNgWebServer)
// ETL uses the balancer to extract data.
// The server uses the balancer to forward RPCs to a rippled node.
// The balancer itself publishes to streams (transactions_proposed and accounts_proposed)
auto balancer = etl::LoadBalancer::makeLoadBalancer(
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers
);
auto balancer = [&] -> std::shared_ptr<etlng::LoadBalancerInterface> {
if (config_.get<bool>("__ng_etl")) {
return etlng::LoadBalancer::makeLoadBalancer(
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers
);
}
return etl::LoadBalancer::makeLoadBalancer(
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers
);
}();
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
auto etl = etl::ETLService::makeETLService(
config_, std::move(systemState), ctx, backend, subscriptions, balancer, ledgers
);
auto etl = etl::ETLService::makeETLService(config_, ioc, ctx, backend, subscriptions, balancer, ledgers);
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
auto counters = rpc::Counters::makeCounters(workQueue);
@@ -191,7 +187,7 @@ ClioApplication::run(bool const useNgWebServer)
return EXIT_FAILURE;
}
httpServer->onGet("/metrics", MetricsHandler{adminVerifier, workQueue});
httpServer->onGet("/metrics", MetricsHandler{adminVerifier});
httpServer->onGet("/health", HealthCheckHandler{});
httpServer->onGet("/cache_state", CacheStateHandler{cache});
auto requestHandler = RequestHandler{adminVerifier, handler};
@@ -205,16 +201,7 @@ ClioApplication::run(bool const useNgWebServer)
}
appStopper_.setOnStop(
Stopper::makeOnStopCallback(
httpServer.value(),
*balancer,
*etl,
*subscriptions,
*backend,
cacheSaver,
clusterCommunicationService,
ioc
)
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, ioc)
);
// Blocks until stopped.
@@ -229,11 +216,6 @@ ClioApplication::run(bool const useNgWebServer)
auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(config_, backend, rpcEngine, etl, dosGuard);
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
appStopper_.setOnStop(
Stopper::makeOnStopCallback(
*httpServer, *balancer, *etl, *subscriptions, *backend, cacheSaver, clusterCommunicationService, ioc
)
);
// Blocks until stopped.
// When stopped, shared_ptrs fall out of scope

View File

@@ -38,18 +38,7 @@ Stopper::~Stopper()
void
Stopper::setOnStop(std::function<void(boost::asio::yield_context)> cb)
{
util::spawn(ctx_, [this, cb = std::move(cb)](auto yield) {
cb(yield);
if (onCompleteCallback_)
onCompleteCallback_();
});
}
void
Stopper::setOnComplete(std::function<void()> cb)
{
onCompleteCallback_ = std::move(cb);
util::spawn(ctx_, std::move(cb));
}
void

View File

@@ -19,15 +19,13 @@
#pragma once
#include "cluster/Concepts.hpp"
#include "data/BackendInterface.hpp"
#include "data/LedgerCacheSaver.hpp"
#include "etl/ETLServiceInterface.hpp"
#include "etl/LoadBalancerInterface.hpp"
#include "etlng/ETLServiceInterface.hpp"
#include "etlng/LoadBalancerInterface.hpp"
#include "feed/SubscriptionManagerInterface.hpp"
#include "util/CoroutineGroup.hpp"
#include "util/log/Logger.hpp"
#include "web/interface/Concepts.hpp"
#include "web/ng/Server.hpp"
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
@@ -44,7 +42,6 @@ namespace app {
class Stopper {
boost::asio::io_context ctx_;
std::thread worker_;
std::function<void()> onCompleteCallback_;
public:
/**
@@ -60,14 +57,6 @@ public:
void
setOnStop(std::function<void(boost::asio::yield_context)> cb);
/**
* @brief Set the callback to be called when graceful shutdown completes.
*
* @param cb The callback to be called when shutdown completes.
*/
void
setOnComplete(std::function<void()> cb);
/**
* @brief Stop the application and run the shutdown tasks.
*/
@@ -82,30 +71,21 @@ public:
* @param etl The ETL service to stop.
* @param subscriptions The subscription manager to stop.
* @param backend The backend to stop.
* @param cacheSaver The ledger cache saver
* @param clusterCommunicationService The cluster communication service to stop.
* @param ioc The io_context to stop.
* @return The callback to be called on application stop.
*/
template <
web::SomeServer ServerType,
data::SomeLedgerCacheSaver LedgerCacheSaverType,
cluster::SomeClusterCommunicationService ClusterCommunicationServiceType>
template <web::ng::SomeServer ServerType>
static std::function<void(boost::asio::yield_context)>
makeOnStopCallback(
ServerType& server,
etl::LoadBalancerInterface& balancer,
etl::ETLServiceInterface& etl,
etlng::LoadBalancerInterface& balancer,
etlng::ETLServiceInterface& etl,
feed::SubscriptionManagerInterface& subscriptions,
data::BackendInterface& backend,
LedgerCacheSaverType& cacheSaver,
ClusterCommunicationServiceType& clusterCommunicationService,
boost::asio::io_context& ioc
)
{
return [&](boost::asio::yield_context yield) {
cacheSaver.save();
util::CoroutineGroup coroutineGroup{yield};
coroutineGroup.spawn(yield, [&server](auto innerYield) {
server.stop(innerYield);
@@ -117,8 +97,6 @@ public:
});
coroutineGroup.asyncWait(yield);
clusterCommunicationService.stop();
etl.stop();
LOG(util::LogService::info()) << "ETL stopped";
@@ -128,8 +106,6 @@ public:
backend.waitForWritesToFinish();
LOG(util::LogService::info()) << "Backend writes finished";
cacheSaver.waitToFinish();
ioc.stop();
LOG(util::LogService::info()) << "io_context stopped";

View File

@@ -19,10 +19,7 @@
#include "app/WebHandlers.hpp"
#include "rpc/Errors.hpp"
#include "rpc/WorkQueue.hpp"
#include "util/Assert.hpp"
#include "util/CoroutineGroup.hpp"
#include "util/prometheus/Http.hpp"
#include "web/AdminVerificationStrategy.hpp"
#include "web/SubscriptionContextInterface.hpp"
@@ -34,7 +31,6 @@
#include <boost/asio/spawn.hpp>
#include <boost/beast/http/status.hpp>
#include <functional>
#include <memory>
#include <optional>
#include <string>
@@ -80,8 +76,8 @@ DisconnectHook::operator()(web::ng::Connection const& connection)
dosguard_.get().decrement(connection.ip());
}
MetricsHandler::MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier, rpc::WorkQueue& workQueue)
: adminVerifier_{std::move(adminVerifier)}, workQueue_{std::ref(workQueue)}
MetricsHandler::MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier)
: adminVerifier_{std::move(adminVerifier)}
{
}
@@ -90,45 +86,19 @@ MetricsHandler::operator()(
web::ng::Request const& request,
web::ng::ConnectionMetadata& connectionMetadata,
web::SubscriptionContextPtr,
boost::asio::yield_context yield
boost::asio::yield_context
)
{
std::optional<web::ng::Response> response;
util::CoroutineGroup coroutineGroup{yield, 1};
auto const onTaskComplete = coroutineGroup.registerForeign(yield);
ASSERT(onTaskComplete.has_value(), "Coroutine group can't be full");
auto const maybeHttpRequest = request.asHttpRequest();
ASSERT(maybeHttpRequest.has_value(), "Got not a http request in Get");
auto const& httpRequest = maybeHttpRequest->get();
bool const postSuccessful = workQueue_.get().postCoro(
[this, &request, &response, &onTaskComplete = onTaskComplete.value(), &connectionMetadata](
boost::asio::yield_context
) mutable {
auto const maybeHttpRequest = request.asHttpRequest();
ASSERT(maybeHttpRequest.has_value(), "Got not a http request in Get");
auto const& httpRequest = maybeHttpRequest->get();
auto maybeResponse = util::prometheus::handlePrometheusRequest(
httpRequest, adminVerifier_->isAdmin(httpRequest, connectionMetadata.ip())
);
ASSERT(maybeResponse.has_value(), "Got unexpected request for Prometheus");
response = web::ng::Response{std::move(maybeResponse).value(), request};
// notify the coroutine group that the foreign task is done
onTaskComplete();
},
/* isWhiteListed= */ true,
rpc::WorkQueue::Priority::High
// FIXME(#1702): Using veb server thread to handle prometheus request. Better to post on work queue.
auto maybeResponse = util::prometheus::handlePrometheusRequest(
httpRequest, adminVerifier_->isAdmin(httpRequest, connectionMetadata.ip())
);
if (!postSuccessful) {
return web::ng::Response{
boost::beast::http::status::too_many_requests, rpc::makeError(rpc::RippledError::rpcTOO_BUSY), request
};
}
// Put the coroutine to sleep until the foreign task is done
coroutineGroup.asyncWait(yield);
ASSERT(response.has_value(), "Woke up coroutine without setting response");
return std::move(response).value();
ASSERT(maybeResponse.has_value(), "Got unexpected request for Prometheus");
return web::ng::Response{std::move(maybeResponse).value(), request};
}
web::ng::Response

View File

@@ -21,7 +21,6 @@
#include "data/LedgerCacheInterface.hpp"
#include "rpc/Errors.hpp"
#include "rpc/WorkQueue.hpp"
#include "util/log/Logger.hpp"
#include "web/AdminVerificationStrategy.hpp"
#include "web/SubscriptionContextInterface.hpp"
@@ -120,23 +119,20 @@ public:
*/
class MetricsHandler {
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier_;
std::reference_wrapper<rpc::WorkQueue> workQueue_;
public:
/**
* @brief Construct a new MetricsHandler object
*
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access.
* @param workQueue The WorkQueue to use for handling the request.
*/
MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier, rpc::WorkQueue& workQueue);
MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier);
/**
* @brief The call of the function object.
*
* @param request The request to handle.
* @param connectionMetadata The connection metadata.
* @param yield The yield context.
* @return The response to the request.
*/
web::ng::Response
@@ -144,7 +140,7 @@ public:
web::ng::Request const& request,
web::ng::ConnectionMetadata& connectionMetadata,
web::SubscriptionContextPtr,
boost::asio::yield_context yield
boost::asio::yield_context
);
};

View File

@@ -1,134 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "data/BackendInterface.hpp"
#include "etl/WriterState.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/serialize.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_from.hpp>
#include <boost/json/value_to.hpp>
#include <boost/uuid/random_generator.hpp>
#include <boost/uuid/uuid.hpp>
#include <fmt/format.h>
#include <chrono>
#include <memory>
#include <utility>
#include <vector>
namespace cluster {
Backend::Backend(
boost::asio::thread_pool& ctx,
std::shared_ptr<data::BackendInterface> backend,
std::unique_ptr<etl::WriterStateInterface const> writerState,
std::chrono::steady_clock::duration readInterval,
std::chrono::steady_clock::duration writeInterval
)
: backend_(std::move(backend))
, writerState_(std::move(writerState))
, readerTask_(readInterval, ctx)
, writerTask_(writeInterval, ctx)
, selfUuid_(std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()))
{
}
void
Backend::run()
{
readerTask_.run([this](boost::asio::yield_context yield) {
auto clusterData = doRead(yield);
onNewState_(selfUuid_, std::make_shared<ClusterData>(std::move(clusterData)));
});
writerTask_.run([this]() { doWrite(); });
}
Backend::~Backend()
{
stop();
}
void
Backend::stop()
{
readerTask_.stop();
writerTask_.stop();
}
ClioNode::CUuid
Backend::selfId() const
{
return selfUuid_;
}
Backend::ClusterData
Backend::doRead(boost::asio::yield_context yield)
{
BackendInterface::ClioNodesDataFetchResult expectedResult;
try {
expectedResult = backend_->fetchClioNodesData(yield);
} catch (...) {
expectedResult = std::unexpected{"Failed to fetch Clio nodes data"};
}
if (!expectedResult.has_value()) {
return std::unexpected{std::move(expectedResult).error()};
}
std::vector<ClioNode> otherNodesData;
for (auto const& [uuid, nodeDataStr] : expectedResult.value()) {
if (uuid == *selfUuid_) {
continue;
}
boost::system::error_code errorCode;
auto const json = boost::json::parse(nodeDataStr, errorCode);
if (errorCode.failed()) {
return std::unexpected{fmt::format("Error parsing json from DB: {}", nodeDataStr)};
}
auto expectedNodeData = boost::json::try_value_to<ClioNode>(json);
if (expectedNodeData.has_error()) {
return std::unexpected{fmt::format("Error converting json to ClioNode: {}", nodeDataStr)};
}
*expectedNodeData->uuid = uuid;
otherNodesData.push_back(std::move(expectedNodeData).value());
}
otherNodesData.push_back(ClioNode::from(selfUuid_, *writerState_));
return otherNodesData;
}
void
Backend::doWrite()
{
auto const selfData = ClioNode::from(selfUuid_, *writerState_);
boost::json::value jsonValue{};
boost::json::value_from(selfData, jsonValue);
backend_->writeNodeMessage(*selfData.uuid, boost::json::serialize(jsonValue.as_object()));
}
} // namespace cluster

View File

@@ -1,147 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "cluster/ClioNode.hpp"
#include "cluster/impl/RepeatedTask.hpp"
#include "data/BackendInterface.hpp"
#include "etl/WriterState.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/any_io_executor.hpp>
#include <boost/asio/cancellation_signal.hpp>
#include <boost/asio/execution_context.hpp>
#include <boost/asio/executor.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/signals2/connection.hpp>
#include <boost/signals2/signal.hpp>
#include <boost/signals2/variadic_signal.hpp>
#include <boost/uuid/uuid.hpp>
#include <chrono>
#include <concepts>
#include <memory>
#include <string>
#include <vector>
namespace cluster {
/**
* @brief Backend communication handler for cluster state synchronization.
*
* This class manages reading and writing cluster state information to/from the backend database.
* It periodically reads the state of other nodes in the cluster and writes the current node's state,
* enabling cluster-wide coordination and awareness.
*/
class Backend {
public:
/** @brief Type representing cluster data result - either a vector of nodes or an error message */
using ClusterData = std::expected<std::vector<ClioNode>, std::string>;
private:
util::Logger log_{"ClusterCommunication"};
std::shared_ptr<data::BackendInterface> backend_;
std::unique_ptr<etl::WriterStateInterface const> writerState_;
impl::RepeatedTask<boost::asio::thread_pool> readerTask_;
impl::RepeatedTask<boost::asio::thread_pool> writerTask_;
ClioNode::Uuid selfUuid_;
boost::signals2::signal<void(ClioNode::CUuid, std::shared_ptr<ClusterData const>)> onNewState_;
public:
/**
* @brief Construct a Backend communication handler.
*
* @param ctx The execution context for asynchronous operations
* @param backend Interface to the backend database
* @param writerState State indicating whether this node is writing to the database
* @param readInterval How often to read cluster state from the backend
* @param writeInterval How often to write this node's state to the backend
*/
Backend(
boost::asio::thread_pool& ctx,
std::shared_ptr<data::BackendInterface> backend,
std::unique_ptr<etl::WriterStateInterface const> writerState,
std::chrono::steady_clock::duration readInterval,
std::chrono::steady_clock::duration writeInterval
);
~Backend();
Backend(Backend&&) = delete;
Backend&
operator=(Backend&&) = delete;
Backend(Backend const&) = delete;
Backend&
operator=(Backend const&) = delete;
/**
* @brief Start the backend read and write tasks.
*
* Begins periodic reading of cluster state from the backend and writing of this node's state.
*/
void
run();
/**
* @brief Stop the backend read and write tasks.
*
* Stops all periodic tasks and waits for them to complete.
*/
void
stop();
/**
* @brief Subscribe to new cluster state notifications.
*
* @tparam S Callable type accepting (ClioNode::cUUID, ClusterData)
* @param s Subscriber callback to be invoked when new cluster state is available
* @return A connection object that can be used to unsubscribe
*/
template <typename S>
requires std::invocable<S, ClioNode::CUuid, std::shared_ptr<ClusterData const>>
boost::signals2::connection
subscribeToNewState(S&& s)
{
return onNewState_.connect(s);
}
/**
* @brief Get the UUID of this node in the cluster.
*
* @return The UUID of this node.
*/
ClioNode::CUuid
selfId() const;
private:
ClusterData
doRead(boost::asio::yield_context yield);
void
doWrite();
};
} // namespace cluster

View File

@@ -1,7 +1,5 @@
add_library(clio_cluster)
target_sources(
clio_cluster PRIVATE Backend.cpp ClioNode.cpp ClusterCommunicationService.cpp Metrics.cpp WriterDecider.cpp
)
target_sources(clio_cluster PRIVATE ClioNode.cpp ClusterCommunicationService.cpp)
target_link_libraries(clio_cluster PRIVATE clio_util clio_data)

View File

@@ -19,7 +19,6 @@
#include "cluster/ClioNode.hpp"
#include "etl/WriterState.hpp"
#include "util/TimeUtils.hpp"
#include <boost/json/conversion.hpp>
@@ -27,72 +26,39 @@
#include <boost/json/value.hpp>
#include <boost/uuid/uuid.hpp>
#include <chrono>
#include <cstdint>
#include <memory>
#include <stdexcept>
#include <string>
#include <string_view>
#include <utility>
namespace cluster {
namespace {
struct JsonFields {
struct Fields {
static constexpr std::string_view const kUPDATE_TIME = "update_time";
static constexpr std::string_view const kDB_ROLE = "db_role";
};
} // namespace
ClioNode
ClioNode::from(ClioNode::Uuid uuid, etl::WriterStateInterface const& writerState)
{
auto const dbRole = [&writerState]() {
if (writerState.isReadOnly()) {
return ClioNode::DbRole::ReadOnly;
}
if (writerState.isFallback()) {
return ClioNode::DbRole::Fallback;
}
if (writerState.isLoadingCache()) {
return ClioNode::DbRole::LoadingCache;
}
return writerState.isWriting() ? ClioNode::DbRole::Writer : ClioNode::DbRole::NotWriter;
}();
return ClioNode{.uuid = std::move(uuid), .updateTime = std::chrono::system_clock::now(), .dbRole = dbRole};
}
void
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, ClioNode const& node)
{
jv = {
{JsonFields::kUPDATE_TIME, util::systemTpToUtcStr(node.updateTime, ClioNode::kTIME_FORMAT)},
{JsonFields::kDB_ROLE, static_cast<int64_t>(node.dbRole)}
{Fields::kUPDATE_TIME, util::systemTpToUtcStr(node.updateTime, ClioNode::kTIME_FORMAT)},
};
}
ClioNode
tag_invoke(boost::json::value_to_tag<ClioNode>, boost::json::value const& jv)
{
auto const& updateTimeStr = jv.as_object().at(JsonFields::kUPDATE_TIME).as_string();
auto const& updateTimeStr = jv.as_object().at(Fields::kUPDATE_TIME).as_string();
auto const updateTime = util::systemTpFromUtcStr(std::string(updateTimeStr), ClioNode::kTIME_FORMAT);
if (!updateTime.has_value()) {
throw std::runtime_error("Failed to parse update time");
}
auto const dbRoleValue = jv.as_object().at(JsonFields::kDB_ROLE).as_int64();
if (dbRoleValue > static_cast<int64_t>(ClioNode::DbRole::MAX))
throw std::runtime_error("Invalid db_role value");
return ClioNode{
// Json data doesn't contain uuid so leaving it empty here. It will be filled outside of this parsing
.uuid = std::make_shared<boost::uuids::uuid>(),
.updateTime = updateTime.value(),
.dbRole = static_cast<ClioNode::DbRole>(dbRoleValue)
};
return ClioNode{.uuid = std::make_shared<boost::uuids::uuid>(), .updateTime = updateTime.value()};
}
} // namespace cluster

View File

@@ -19,8 +19,6 @@
#pragma once
#include "etl/WriterState.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/value.hpp>
#include <boost/uuid/uuid.hpp>
@@ -39,37 +37,16 @@ struct ClioNode {
*/
static constexpr char const* kTIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ";
/**
* @brief Database role of a node in the cluster.
*
* Roles are used to coordinate which node writes to the database:
* - ReadOnly: Node is configured to never write (strict read-only mode)
* - NotWriter: Node can write but is currently not the designated writer
* - Writer: Node is actively writing to the database
* - Fallback: Node is using the fallback writer decision mechanism
*
* When any node in the cluster is in Fallback mode, the entire cluster switches
* from the cluster communication mechanism to the slower but more reliable
* database-based conflict detection mechanism.
*/
enum class DbRole { ReadOnly = 0, LoadingCache = 1, NotWriter = 2, Writer = 3, Fallback = 4, MAX = 4 };
// enum class WriterRole {
// ReadOnly,
// NotWriter,
// Writer
// };
using Uuid = std::shared_ptr<boost::uuids::uuid>;
using CUuid = std::shared_ptr<boost::uuids::uuid const>;
Uuid uuid; ///< The UUID of the node.
std::shared_ptr<boost::uuids::uuid> uuid; ///< The UUID of the node.
std::chrono::system_clock::time_point updateTime; ///< The time the data about the node was last updated.
DbRole dbRole; ///< The database role of the node
/**
* @brief Create a ClioNode from writer state.
*
* @param uuid The UUID of the node
* @param writerState The writer state to determine the node's database role
* @return A ClioNode with the current time and role derived from writerState
*/
static ClioNode
from(Uuid uuid, etl::WriterStateInterface const& writerState);
// WriterRole writerRole;
};
void

View File

@@ -19,37 +19,98 @@
#include "cluster/ClusterCommunicationService.hpp"
#include "cluster/ClioNode.hpp"
#include "data/BackendInterface.hpp"
#include "etl/WriterState.hpp"
#include "util/Assert.hpp"
#include "util/Spawn.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/bind_cancellation_slot.hpp>
#include <boost/asio/cancellation_type.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/use_future.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/serialize.hpp>
#include <boost/json/value.hpp>
#include <boost/json/value_from.hpp>
#include <boost/json/value_to.hpp>
#include <boost/uuid/random_generator.hpp>
#include <boost/uuid/uuid.hpp>
#include <chrono>
#include <ctime>
#include <latch>
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace {
constexpr auto kTOTAL_WORKERS = 2uz; // 1 reading and 1 writing worker (coroutines)
} // namespace
namespace cluster {
ClusterCommunicationService::ClusterCommunicationService(
std::shared_ptr<data::BackendInterface> backend,
std::unique_ptr<etl::WriterStateInterface> writerState,
std::chrono::steady_clock::duration readInterval,
std::chrono::steady_clock::duration writeInterval
)
: backend_(ctx_, std::move(backend), writerState->clone(), readInterval, writeInterval)
, writerDecider_(ctx_, std::move(writerState))
: backend_(std::move(backend))
, readInterval_(readInterval)
, writeInterval_(writeInterval)
, finishedCountdown_(kTOTAL_WORKERS)
, selfData_{ClioNode{
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()),
.updateTime = std::chrono::system_clock::time_point{}
}}
{
nodesInClusterMetric_.set(1); // The node always sees itself
isHealthy_ = true;
}
void
ClusterCommunicationService::run()
{
backend_.subscribeToNewState([this](auto&&... args) {
metrics_.onNewState(std::forward<decltype(args)>(args)...);
ASSERT(not running_ and not stopped_, "Can only be ran once");
running_ = true;
util::spawn(strand_, [this](boost::asio::yield_context yield) {
boost::asio::steady_timer timer(yield.get_executor());
boost::system::error_code ec;
while (running_) {
timer.expires_after(readInterval_);
auto token = cancelSignal_.slot();
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
if (ec == boost::asio::error::operation_aborted or not running_)
break;
doRead(yield);
}
finishedCountdown_.count_down(1);
});
backend_.subscribeToNewState([this](auto&&... args) {
writerDecider_.onNewState(std::forward<decltype(args)>(args)...);
util::spawn(strand_, [this](boost::asio::yield_context yield) {
boost::asio::steady_timer timer(yield.get_executor());
boost::system::error_code ec;
while (running_) {
doWrite();
timer.expires_after(writeInterval_);
auto token = cancelSignal_.slot();
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
if (ec == boost::asio::error::operation_aborted or not running_)
break;
}
finishedCountdown_.count_down(1);
});
backend_.run();
}
ClusterCommunicationService::~ClusterCommunicationService()
@@ -60,7 +121,107 @@ ClusterCommunicationService::~ClusterCommunicationService()
void
ClusterCommunicationService::stop()
{
backend_.stop();
if (stopped_)
return;
stopped_ = true;
// for ASAN to see through concurrency correctly we need to exit all coroutines before joining the ctx
running_ = false;
// cancelSignal_ is not thread safe so we execute emit on the same strand
boost::asio::spawn(
strand_, [this](auto&&) { cancelSignal_.emit(boost::asio::cancellation_type::all); }, boost::asio::use_future
)
.wait();
finishedCountdown_.wait();
ctx_.join();
}
std::shared_ptr<boost::uuids::uuid>
ClusterCommunicationService::selfUuid() const
{
// Uuid never changes so it is safe to copy it without using strand_
return selfData_.uuid;
}
ClioNode
ClusterCommunicationService::selfData() const
{
ClioNode result{};
util::spawn(strand_, [this, &result](boost::asio::yield_context) { result = selfData_; });
return result;
}
std::expected<std::vector<ClioNode>, std::string>
ClusterCommunicationService::clusterData() const
{
if (not isHealthy_) {
return std::unexpected{"Service is not healthy"};
}
std::vector<ClioNode> result;
util::spawn(strand_, [this, &result](boost::asio::yield_context) {
result = otherNodesData_;
result.push_back(selfData_);
});
return result;
}
void
ClusterCommunicationService::doRead(boost::asio::yield_context yield)
{
otherNodesData_.clear();
BackendInterface::ClioNodesDataFetchResult expectedResult;
try {
expectedResult = backend_->fetchClioNodesData(yield);
} catch (...) {
expectedResult = std::unexpected{"Failed to fecth Clio nodes data"};
}
if (!expectedResult.has_value()) {
LOG(log_.error()) << "Failed to fetch nodes data";
isHealthy_ = false;
return;
}
// Create a new vector here to not have partially parsed data in otherNodesData_
std::vector<ClioNode> otherNodesData;
for (auto const& [uuid, nodeDataStr] : expectedResult.value()) {
if (uuid == *selfData_.uuid) {
continue;
}
boost::system::error_code errorCode;
auto const json = boost::json::parse(nodeDataStr, errorCode);
if (errorCode.failed()) {
LOG(log_.error()) << "Error parsing json from DB: " << nodeDataStr;
isHealthy_ = false;
return;
}
auto expectedNodeData = boost::json::try_value_to<ClioNode>(json);
if (expectedNodeData.has_error()) {
LOG(log_.error()) << "Error converting json to ClioNode: " << json;
isHealthy_ = false;
return;
}
*expectedNodeData->uuid = uuid;
otherNodesData.push_back(std::move(expectedNodeData).value());
}
otherNodesData_ = std::move(otherNodesData);
nodesInClusterMetric_.set(otherNodesData_.size() + 1);
isHealthy_ = true;
}
void
ClusterCommunicationService::doWrite()
{
selfData_.updateTime = std::chrono::system_clock::now();
boost::json::value jsonValue{};
boost::json::value_from(selfData_, jsonValue);
backend_->writeNodeMessage(*selfData_.uuid, boost::json::serialize(jsonValue.as_object()));
}
} // namespace cluster

View File

@@ -19,12 +19,13 @@
#pragma once
#include "cluster/Backend.hpp"
#include "cluster/Concepts.hpp"
#include "cluster/Metrics.hpp"
#include "cluster/WriterDecider.hpp"
#include "cluster/ClioNode.hpp"
#include "cluster/ClusterCommunicationServiceInterface.hpp"
#include "data/BackendInterface.hpp"
#include "etl/WriterState.hpp"
#include "util/log/Logger.hpp"
#include "util/prometheus/Bool.hpp"
#include "util/prometheus/Gauge.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <boost/asio/cancellation_signal.hpp>
#include <boost/asio/spawn.hpp>
@@ -32,49 +33,67 @@
#include <boost/asio/thread_pool.hpp>
#include <boost/uuid/uuid.hpp>
#include <atomic>
#include <chrono>
#include <latch>
#include <memory>
#include <string>
#include <vector>
namespace cluster {
/**
* @brief Service to post and read messages to/from the cluster. It uses a backend to communicate with the cluster.
*/
class ClusterCommunicationService : public ClusterCommunicationServiceTag {
class ClusterCommunicationService : public ClusterCommunicationServiceInterface {
util::prometheus::GaugeInt& nodesInClusterMetric_ = PrometheusService::gaugeInt(
"cluster_nodes_total_number",
{},
"Total number of nodes this node can detect in the cluster."
);
util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric(
"cluster_communication_is_healthy",
{},
"Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a problem)"
);
// TODO: Use util::async::CoroExecutionContext after https://github.com/XRPLF/clio/issues/1973 is implemented
boost::asio::thread_pool ctx_{1};
Backend backend_;
Metrics metrics_;
WriterDecider writerDecider_;
boost::asio::strand<boost::asio::thread_pool::executor_type> strand_ = boost::asio::make_strand(ctx_);
util::Logger log_{"ClusterCommunication"};
std::shared_ptr<data::BackendInterface> backend_;
std::chrono::steady_clock::duration readInterval_;
std::chrono::steady_clock::duration writeInterval_;
boost::asio::cancellation_signal cancelSignal_;
std::latch finishedCountdown_;
std::atomic_bool running_ = false;
bool stopped_ = false;
ClioNode selfData_;
std::vector<ClioNode> otherNodesData_;
public:
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{1000};
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1000};
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{2100};
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1200};
/**
* @brief Construct a new Cluster Communication Service object.
*
* @param backend The backend to use for communication.
* @param writerState The state showing whether clio is writing to the database.
* @param readInterval The interval to read messages from the cluster.
* @param writeInterval The interval to write messages to the cluster.
*/
ClusterCommunicationService(
std::shared_ptr<data::BackendInterface> backend,
std::unique_ptr<etl::WriterStateInterface> writerState,
std::chrono::steady_clock::duration readInterval = kDEFAULT_READ_INTERVAL,
std::chrono::steady_clock::duration writeInterval = kDEFAULT_WRITE_INTERVAL
);
~ClusterCommunicationService() override;
ClusterCommunicationService(ClusterCommunicationService&&) = delete;
ClusterCommunicationService(ClusterCommunicationService const&) = delete;
ClusterCommunicationService&
operator=(ClusterCommunicationService&&) = delete;
ClusterCommunicationService&
operator=(ClusterCommunicationService const&) = delete;
/**
* @brief Start the service.
*/
@@ -86,6 +105,44 @@ public:
*/
void
stop();
ClusterCommunicationService(ClusterCommunicationService&&) = delete;
ClusterCommunicationService(ClusterCommunicationService const&) = delete;
ClusterCommunicationService&
operator=(ClusterCommunicationService&&) = delete;
ClusterCommunicationService&
operator=(ClusterCommunicationService const&) = delete;
/**
* @brief Get the UUID of the current node.
*
* @return The UUID of the current node.
*/
std::shared_ptr<boost::uuids::uuid>
selfUuid() const;
/**
* @brief Get the data of the current node.
*
* @return The data of the current node.
*/
ClioNode
selfData() const override;
/**
* @brief Get the data of all nodes in the cluster (including self).
*
* @return The data of all nodes in the cluster or error if the service is not healthy.
*/
std::expected<std::vector<ClioNode>, std::string>
clusterData() const override;
private:
void
doRead(boost::asio::yield_context yield);
void
doWrite();
};
} // namespace cluster

View File

@@ -17,31 +17,38 @@
*/
//==============================================================================
#include "cluster/Metrics.hpp"
#pragma once
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include <memory>
#include <expected>
#include <string>
#include <vector>
namespace cluster {
Metrics::Metrics()
{
nodesInClusterMetric_.set(1); // The node always sees itself
isHealthy_ = true;
}
/**
* @brief Interface for the cluster communication service.
*/
class ClusterCommunicationServiceInterface {
public:
virtual ~ClusterCommunicationServiceInterface() = default;
void
Metrics::onNewState(ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData)
{
if (clusterData->has_value()) {
isHealthy_ = true;
nodesInClusterMetric_.set(clusterData->value().size());
} else {
isHealthy_ = false;
nodesInClusterMetric_.set(1);
}
}
/**
* @brief Get the data of the current node.
*
* @return The data of the current node.
*/
[[nodiscard]] virtual ClioNode
selfData() const = 0;
/**
* @brief Get the data of all nodes in the cluster (including self).
*
* @return The data of all nodes in the cluster or error if the service is not healthy.
*/
[[nodiscard]] virtual std::expected<std::vector<ClioNode>, std::string>
clusterData() const = 0;
};
} // namespace cluster

View File

@@ -1,76 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "util/prometheus/Bool.hpp"
#include "util/prometheus/Gauge.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <memory>
namespace cluster {
/**
* @brief Manages Prometheus metrics for cluster communication and node tracking.
*
* This class tracks cluster-related metrics including:
* - Total number of nodes detected in the cluster
* - Health status of cluster communication
*/
class Metrics {
/** @brief Gauge tracking the total number of nodes visible in the cluster */
util::prometheus::GaugeInt& nodesInClusterMetric_ = PrometheusService::gaugeInt(
"cluster_nodes_total_number",
{},
"Total number of nodes this node can detect in the cluster."
);
/** @brief Boolean metric indicating whether cluster communication is healthy */
util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric(
"cluster_communication_is_healthy",
{},
"Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a problem)"
);
public:
/**
* @brief Constructs a Metrics instance and initializes metrics.
*
* Sets the initial node count to 1 (self) and marks communication as healthy.
*/
Metrics();
/**
* @brief Updates metrics based on new cluster state.
*
* This callback is invoked when cluster state changes. It updates:
* - Health status based on whether cluster data is available
* - Node count to reflect the current cluster size
*
* @param uuid The UUID of the node (unused in current implementation)
* @param clusterData Shared pointer to the current cluster data; may be empty if communication failed
*/
void
onNewState(ClioNode::CUuid uuid, std::shared_ptr<Backend::ClusterData const> clusterData);
};
} // namespace cluster

View File

@@ -1,98 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "cluster/WriterDecider.hpp"
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "etl/WriterState.hpp"
#include "util/Assert.hpp"
#include "util/Spawn.hpp"
#include <boost/asio/thread_pool.hpp>
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
namespace cluster {
WriterDecider::WriterDecider(boost::asio::thread_pool& ctx, std::unique_ptr<etl::WriterStateInterface> writerState)
: ctx_(ctx), writerState_(std::move(writerState))
{
}
void
WriterDecider::onNewState(ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData)
{
if (not clusterData->has_value())
return;
util::spawn(
ctx_,
[writerState = writerState_->clone(),
selfId = std::move(selfId),
clusterData = clusterData->value()](auto&&) mutable {
auto const selfData =
std::ranges::find_if(clusterData, [&selfId](ClioNode const& node) { return node.uuid == selfId; });
ASSERT(selfData != clusterData.end(), "Self data should always be in the cluster data");
if (selfData->dbRole == ClioNode::DbRole::Fallback) {
return;
}
if (selfData->dbRole == ClioNode::DbRole::ReadOnly) {
writerState->giveUpWriting();
return;
}
// If any node in the cluster is in Fallback mode, the entire cluster must switch
// to the fallback writer decision mechanism for consistency
if (std::ranges::any_of(clusterData, [](ClioNode const& node) {
return node.dbRole == ClioNode::DbRole::Fallback;
})) {
writerState->setWriterDecidingFallback();
return;
}
// We are not ReadOnly and there is no Fallback in the cluster
std::ranges::sort(clusterData, [](ClioNode const& lhs, ClioNode const& rhs) {
return *lhs.uuid < *rhs.uuid;
});
auto const it = std::ranges::find_if(clusterData, [](ClioNode const& node) {
return node.dbRole == ClioNode::DbRole::NotWriter or node.dbRole == ClioNode::DbRole::Writer;
});
if (it == clusterData.end()) {
// No writer nodes in the cluster yet
return;
}
if (*it->uuid == *selfId) {
writerState->startWriting();
} else {
writerState->giveUpWriting();
}
}
);
}
} // namespace cluster

View File

@@ -1,75 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "cluster/Backend.hpp"
#include "cluster/ClioNode.hpp"
#include "etl/WriterState.hpp"
#include <boost/asio/thread_pool.hpp>
#include <memory>
namespace cluster {
/**
* @brief Decides which node in the cluster should be the writer based on cluster state.
*
* This class monitors cluster state changes and determines whether the current node
* should act as the writer to the database. The decision is made by:
* 1. Sorting all nodes by UUID for deterministic ordering
* 2. Selecting the first node that is allowed to write (not ReadOnly)
* 3. Activating writing on this node if it's the current node, otherwise deactivating
*
* This ensures only one node in the cluster actively writes to the database at a time.
*/
class WriterDecider {
/** @brief Thread pool for spawning asynchronous tasks */
boost::asio::thread_pool& ctx_;
/** @brief Interface for controlling the writer state of this node */
std::unique_ptr<etl::WriterStateInterface> writerState_;
public:
/**
* @brief Constructs a WriterDecider.
*
* @param ctx Thread pool for executing asynchronous operations
* @param writerState Writer state interface for controlling write operations
*/
WriterDecider(boost::asio::thread_pool& ctx, std::unique_ptr<etl::WriterStateInterface> writerState);
/**
* @brief Handles cluster state changes and decides whether this node should be the writer.
*
* This method is called when cluster state changes. It asynchronously:
* - Sorts all nodes by UUID to establish a deterministic order
* - Identifies the first node allowed to write (not ReadOnly)
* - Activates writing if this node is selected, otherwise deactivates writing
* - Logs a warning if no nodes in the cluster are allowed to write
*
* @param selfId The UUID of the current node
* @param clusterData Shared pointer to current cluster data; may be empty if communication failed
*/
void
onNewState(ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData);
};
} // namespace cluster

View File

@@ -1,104 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "util/Assert.hpp"
#include "util/Spawn.hpp"
#include <boost/asio/bind_cancellation_slot.hpp>
#include <boost/asio/cancellation_signal.hpp>
#include <boost/asio/cancellation_type.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/executor.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/use_future.hpp>
#include <atomic>
#include <chrono>
#include <concepts>
#include <semaphore>
namespace cluster::impl {
// TODO: Try to replace util::Repeat by this. https://github.com/XRPLF/clio/issues/2926
template <typename Context>
class RepeatedTask {
std::chrono::steady_clock::duration interval_;
boost::asio::strand<typename Context::executor_type> strand_;
enum class State { Running, Stopped };
std::atomic<State> state_ = State::Stopped;
std::binary_semaphore semaphore_{0};
boost::asio::steady_timer timer_;
public:
RepeatedTask(std::chrono::steady_clock::duration interval, Context& ctx)
: interval_(interval), strand_(boost::asio::make_strand(ctx)), timer_(strand_)
{
}
~RepeatedTask()
{
stop();
}
template <typename Fn>
requires std::invocable<Fn, boost::asio::yield_context> or std::invocable<Fn>
void
run(Fn&& f)
{
ASSERT(state_ == State::Stopped, "Can only be ran once");
state_ = State::Running;
util::spawn(strand_, [this, f = std::forward<Fn>(f)](boost::asio::yield_context yield) {
boost::system::error_code ec;
while (state_ == State::Running) {
timer_.expires_after(interval_);
timer_.async_wait(yield[ec]);
if (ec or state_ != State::Running)
break;
if constexpr (std::invocable<decltype(f), boost::asio::yield_context>) {
f(yield);
} else {
f();
}
}
semaphore_.release();
});
}
void
stop()
{
if (auto expected = State::Running; not state_.compare_exchange_strong(expected, State::Stopped))
return; // Already stopped or not started
boost::asio::spawn(strand_, [this](auto&&) { timer_.cancel(); }, boost::asio::use_future).wait();
semaphore_.acquire();
}
};
} // namespace cluster::impl

View File

@@ -146,12 +146,9 @@ AmendmentCenter::isEnabled(AmendmentKey const& key, uint32_t seq) const
bool
AmendmentCenter::isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const
{
try {
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments)
return lookupAmendment(all_, *listAmendments, key);
} catch (std::runtime_error const&) {
return false; // Some old ledger does not contain Amendments ledger object so do best we can for now
}
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments)
return lookupAmendment(all_, *listAmendments, key);
return false;
}
@@ -160,19 +157,13 @@ AmendmentCenter::isEnabled(boost::asio::yield_context yield, std::vector<Amendme
{
namespace rg = std::ranges;
try {
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) {
std::vector<bool> out;
rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) {
return lookupAmendment(all_, *listAmendments, key);
});
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) {
std::vector<bool> out;
rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) {
return lookupAmendment(all_, *listAmendments, key);
});
return out;
}
} catch (std::runtime_error const&) {
return std::vector<bool>(
keys.size(), false
); // Some old ledger does not contain Amendments ledger object so do best we can for now
return out;
}
return std::vector<bool>(keys.size(), false);

View File

@@ -147,12 +147,6 @@ struct Amendments {
REGISTER(fixAMMClawbackRounding);
REGISTER(fixMPTDeliveredAmount);
REGISTER(fixPriceOracleOrder);
REGISTER(DynamicMPT);
REGISTER(fixDelegateV1_1);
REGISTER(fixDirectoryLimit);
REGISTER(fixIncludeKeyletFields);
REGISTER(fixTokenEscrowV1);
REGISTER(LendingProtocol);
// Obsolete but supported by libxrpl
REGISTER(CryptoConditionsSuite);

View File

@@ -270,7 +270,7 @@ BackendInterface::updateRange(uint32_t newMax)
{
std::scoped_lock const lck(rngMtx_);
if (range_.has_value() and newMax < range_->maxSequence) {
if (range_.has_value() && newMax < range_->maxSequence) {
ASSERT(
false,
"Range shouldn't exist yet or newMax should be at least range->maxSequence. newMax = {}, "
@@ -280,14 +280,11 @@ BackendInterface::updateRange(uint32_t newMax)
);
}
updateRangeImpl(newMax);
}
void
BackendInterface::forceUpdateRange(uint32_t newMax)
{
std::scoped_lock const lck(rngMtx_);
updateRangeImpl(newMax);
if (!range_.has_value()) {
range_ = {.minSequence = newMax, .maxSequence = newMax};
} else {
range_->maxSequence = newMax;
}
}
void
@@ -413,14 +410,4 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
return fees;
}
void
BackendInterface::updateRangeImpl(uint32_t newMax)
{
if (!range_.has_value()) {
range_ = {.minSequence = newMax, .maxSequence = newMax};
} else {
range_->maxSequence = newMax;
}
}
} // namespace data

View File

@@ -249,15 +249,6 @@ public:
void
updateRange(uint32_t newMax);
/**
* @brief Updates the range of sequences that are stored in the DB without any checks
* @note In the most cases you should use updateRange() instead
*
* @param newMax The new maximum sequence available
*/
void
forceUpdateRange(uint32_t newMax);
/**
* @brief Sets the range of sequences that are stored in the DB.
*
@@ -785,9 +776,6 @@ private:
*/
virtual bool
doFinishWrites() = 0;
void
updateRangeImpl(uint32_t newMax);
};
} // namespace data

View File

@@ -5,7 +5,6 @@ target_sources(
BackendCounters.cpp
BackendInterface.cpp
LedgerCache.cpp
LedgerCacheSaver.cpp
LedgerHeaderCache.cpp
cassandra/impl/Future.cpp
cassandra/impl/Cluster.cpp
@@ -15,9 +14,6 @@ target_sources(
cassandra/impl/SslContext.cpp
cassandra/Handle.cpp
cassandra/SettingsProvider.cpp
impl/InputFile.cpp
impl/LedgerCacheFile.cpp
impl/OutputFile.cpp
)
target_link_libraries(clio_data PUBLIC cassandra-cpp-driver::cassandra-cpp-driver clio_util)

View File

@@ -20,22 +20,16 @@
#include "data/LedgerCache.hpp"
#include "data/Types.hpp"
#include "data/impl/LedgerCacheFile.hpp"
#include "etl/Models.hpp"
#include "etlng/Models.hpp"
#include "util/Assert.hpp"
#include <xrpl/basics/base_uint.h>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <map>
#include <mutex>
#include <optional>
#include <shared_mutex>
#include <string>
#include <utility>
#include <vector>
namespace data {
@@ -95,7 +89,7 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
}
void
LedgerCache::update(std::vector<etl::model::Object> const& objs, uint32_t seq)
LedgerCache::update(std::vector<etlng::model::Object> const& objs, uint32_t seq)
{
if (disabled_)
return;
@@ -257,34 +251,4 @@ LedgerCache::getSuccessorHitRate() const
return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value();
}
std::expected<void, std::string>
LedgerCache::saveToFile(std::string const& path) const
{
if (not isFull()) {
return std::unexpected{"Ledger cache is not full"};
}
impl::LedgerCacheFile file{path};
std::shared_lock const lock{mtx_};
impl::LedgerCacheFile::DataView const data{.latestSeq = latestSeq_, .map = map_, .deleted = deleted_};
return file.write(data);
}
std::expected<void, std::string>
LedgerCache::loadFromFile(std::string const& path, uint32_t minLatestSequence)
{
impl::LedgerCacheFile file{path};
auto data = file.read(minLatestSequence);
if (not data.has_value()) {
return std::unexpected(std::move(data).error());
}
auto [latestSeq, map, deleted] = std::move(data).value();
std::unique_lock const lock{mtx_};
latestSeq_ = latestSeq;
map_ = std::move(map);
deleted_ = std::move(deleted);
full_ = true;
return {};
}
} // namespace data

View File

@@ -21,7 +21,7 @@
#include "data/LedgerCacheInterface.hpp"
#include "data/Types.hpp"
#include "etl/Models.hpp"
#include "etlng/Models.hpp"
#include "util/prometheus/Bool.hpp"
#include "util/prometheus/Counter.hpp"
#include "util/prometheus/Label.hpp"
@@ -37,7 +37,6 @@
#include <map>
#include <optional>
#include <shared_mutex>
#include <string>
#include <unordered_set>
#include <vector>
@@ -47,16 +46,11 @@ namespace data {
* @brief Cache for an entire ledger.
*/
class LedgerCache : public LedgerCacheInterface {
public:
/** @brief An entry of the cache */
struct CacheEntry {
uint32_t seq = 0;
Blob blob;
};
using CacheMap = std::map<ripple::uint256, CacheEntry>;
private:
// counters for fetchLedgerObject(s) hit rate
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
@@ -79,8 +73,8 @@ private:
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
)};
CacheMap map_;
CacheMap deleted_;
std::map<ripple::uint256, CacheEntry> map_;
std::map<ripple::uint256, CacheEntry> deleted_;
mutable std::shared_mutex mtx_;
std::condition_variable_any cv_;
@@ -104,7 +98,7 @@ public:
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground) override;
void
update(std::vector<etl::model::Object> const& objs, uint32_t seq) override;
update(std::vector<etlng::model::Object> const& objs, uint32_t seq) override;
std::optional<Blob>
get(ripple::uint256 const& key, uint32_t seq) const override;
@@ -144,12 +138,6 @@ public:
void
waitUntilCacheContainsSeq(uint32_t seq) override;
std::expected<void, std::string>
saveToFile(std::string const& path) const override;
std::expected<void, std::string>
loadFromFile(std::string const& path, uint32_t minLatestSequence) override;
};
} // namespace data

View File

@@ -20,16 +20,14 @@
#pragma once
#include "data/Types.hpp"
#include "etl/Models.hpp"
#include "etlng/Models.hpp"
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/hardened_hash.h>
#include <cstddef>
#include <cstdint>
#include <expected>
#include <optional>
#include <string>
#include <vector>
namespace data {
@@ -65,7 +63,7 @@ public:
* @param seq The sequence to update cache for
*/
virtual void
update(std::vector<etl::model::Object> const& objs, uint32_t seq) = 0;
update(std::vector<etlng::model::Object> const& objs, uint32_t seq) = 0;
/**
* @brief Fetch a cached object by its key and sequence number.
@@ -170,27 +168,6 @@ public:
*/
virtual void
waitUntilCacheContainsSeq(uint32_t seq) = 0;
/**
* @brief Save the cache to file
* @note This operation takes about 7 seconds and it keeps a shared lock of mtx_
*
* @param path The file path to save the cache to
* @return An error as a string if any
*/
[[nodiscard]] virtual std::expected<void, std::string>
saveToFile(std::string const& path) const = 0;
/**
* @brief Load the cache from file
* @note This operation takes about 7 seconds and it keeps mtx_ exclusively locked
*
* @param path The file path to load data from
* @param minLatestSequence The minimum allowed value of the latestLedgerSequence in cache file
* @return An error as a string if any
*/
[[nodiscard]] virtual std::expected<void, std::string>
loadFromFile(std::string const& path, uint32_t minLatestSequence) = 0;
};
} // namespace data

View File

@@ -1,75 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/LedgerCacheSaver.hpp"
#include "data/LedgerCacheInterface.hpp"
#include "util/Assert.hpp"
#include "util/Profiler.hpp"
#include "util/log/Logger.hpp"
#include <string>
#include <thread>
namespace data {
LedgerCacheSaver::LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache)
: cacheFilePath_(config.maybeValue<std::string>("cache.file.path"))
, cache_(cache)
, isAsync_(config.get<bool>("cache.file.async_save"))
{
}
LedgerCacheSaver::~LedgerCacheSaver()
{
waitToFinish();
}
void
LedgerCacheSaver::save()
{
ASSERT(not savingThread_.has_value(), "Multiple save() calls are not allowed");
savingThread_ = std::thread([this]() {
if (not cacheFilePath_.has_value()) {
return;
}
LOG(util::LogService::info()) << "Saving ledger cache to " << *cacheFilePath_;
if (auto const [success, durationMs] = util::timed([&]() { return cache_.get().saveToFile(*cacheFilePath_); });
success.has_value()) {
LOG(util::LogService::info()) << "Successfully saved ledger cache in " << durationMs << " ms";
} else {
LOG(util::LogService::error()) << "Error saving LedgerCache to file: " << success.error();
}
});
if (not isAsync_) {
waitToFinish();
}
}
void
LedgerCacheSaver::waitToFinish()
{
if (savingThread_.has_value() and savingThread_->joinable()) {
savingThread_->join();
}
savingThread_.reset();
}
} // namespace data

View File

@@ -1,94 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include "util/config/ConfigDefinition.hpp"
#include <concepts>
#include <functional>
#include <optional>
#include <string>
#include <thread>
namespace data {
/**
* @brief A concept for a class that can save ledger cache asynchronously.
*
* This concept defines the interface requirements for any type that manages
* asynchronous saving of ledger cache to persistent storage.
*/
template <typename T>
concept SomeLedgerCacheSaver = requires(T a) {
{ a.save() } -> std::same_as<void>;
{ a.waitToFinish() } -> std::same_as<void>;
};
/**
* @brief Manages asynchronous saving of ledger cache to a file.
*
* This class provides functionality to save the ledger cache to a file in a separate thread,
* allowing the main application to continue without blocking. The file path is configured
* through the application's configuration system.
*/
class LedgerCacheSaver {
std::optional<std::string> cacheFilePath_;
std::reference_wrapper<LedgerCacheInterface const> cache_;
std::optional<std::thread> savingThread_;
bool isAsync_;
public:
/**
* @brief Constructs a LedgerCacheSaver instance.
*
* @param config The configuration object containing the cache file path setting
* @param cache Reference to the ledger cache interface to be saved
*/
LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache);
/**
* @brief Destructor that ensures the saving thread is properly joined.
*
* Waits for any ongoing save operation to complete before destruction.
*/
~LedgerCacheSaver();
/**
* @brief Initiates an asynchronous save operation of the ledger cache.
*
* Spawns a new thread that saves the ledger cache to the configured file path.
* If no file path is configured, the operation is skipped. Logs the progress
* and result of the save operation.
*/
void
save();
/**
* @brief Waits for the saving thread to complete.
*
* Blocks until the saving operation finishes if a thread is currently active.
* Safe to call multiple times or when no save operation is in progress.
*/
void
waitToFinish();
};
} // namespace data

View File

@@ -247,9 +247,6 @@ struct MPTHoldersAndCursor {
struct LedgerRange {
std::uint32_t minSequence = 0;
std::uint32_t maxSequence = 0;
bool
operator==(LedgerRange const&) const = default;
};
/**

View File

@@ -1,58 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/impl/InputFile.hpp"
#include <xrpl/basics/base_uint.h>
#include <cstddef>
#include <cstring>
#include <ios>
#include <iosfwd>
#include <string>
#include <utility>
namespace data::impl {
InputFile::InputFile(std::string const& path) : file_(path, std::ios::binary | std::ios::in)
{
}
bool
InputFile::isOpen() const
{
return file_.is_open();
}
bool
InputFile::readRaw(char* data, size_t size)
{
file_.read(data, size);
shasum_.update(data, size);
return not file_.fail();
}
ripple::uint256
InputFile::hash() const
{
auto sum = shasum_;
return std::move(sum).finalize();
}
} // namespace data::impl

View File

@@ -1,57 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "util/Shasum.hpp"
#include <xrpl/basics/base_uint.h>
#include <cstddef>
#include <cstring>
#include <fstream>
#include <iosfwd>
#include <string>
namespace data::impl {
class InputFile {
std::ifstream file_;
util::Sha256sum shasum_;
public:
InputFile(std::string const& path);
bool
isOpen() const;
template <typename T>
bool
read(T& t)
{
return readRaw(reinterpret_cast<char*>(&t), sizeof(T));
}
bool
readRaw(char* data, size_t size);
ripple::uint256
hash() const;
};
} // namespace data::impl

View File

@@ -1,215 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/impl/LedgerCacheFile.hpp"
#include "data/LedgerCache.hpp"
#include "data/Types.hpp"
#include <fmt/format.h>
#include <xrpl/basics/base_uint.h>
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <exception>
#include <filesystem>
#include <string>
#include <utility>
namespace data::impl {
using Hash = ripple::uint256;
using Separator = std::array<char, 16>;
static constexpr Separator kSEPARATOR = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
namespace {
std::expected<std::pair<ripple::uint256, LedgerCache::CacheEntry>, std::string>
readCacheEntry(InputFile& file, size_t i)
{
ripple::uint256 key;
if (not file.readRaw(reinterpret_cast<char*>(key.data()), ripple::base_uint<256>::bytes)) {
return std::unexpected(fmt::format("Failed to read key at index {}", i));
}
uint32_t seq{};
if (not file.read(seq)) {
return std::unexpected(fmt::format("Failed to read sequence at index {}", i));
}
size_t blobSize{};
if (not file.read(blobSize)) {
return std::unexpected(fmt::format("Failed to read blob size at index {}", i));
}
Blob blob(blobSize);
if (not file.readRaw(reinterpret_cast<char*>(blob.data()), blobSize)) {
return std::unexpected(fmt::format("Failed to read blob data at index {}", i));
}
return std::make_pair(key, LedgerCache::CacheEntry{.seq = seq, .blob = std::move(blob)});
}
std::expected<void, std::string>
verifySeparator(Separator const& s)
{
if (not std::ranges::all_of(s, [](char c) { return c == 0; })) {
return std::unexpected{"Separator verification failed - data corruption detected"};
}
return {};
}
} // anonymous namespace
LedgerCacheFile::LedgerCacheFile(std::string path) : path_(std::move(path))
{
}
std::expected<void, std::string>
LedgerCacheFile::write(DataView dataView)
{
auto const newFilePath = fmt::format("{}.new", path_);
auto file = OutputFile{newFilePath};
if (not file.isOpen()) {
return std::unexpected{fmt::format("Couldn't open file: {}", newFilePath)};
}
Header const header{
.latestSeq = dataView.latestSeq, .mapSize = dataView.map.size(), .deletedSize = dataView.deleted.size()
};
file.write(header);
file.write(kSEPARATOR);
for (auto const& [k, v] : dataView.map) {
file.write(k.data(), decltype(k)::bytes);
file.write(v.seq);
file.write(v.blob.size());
file.writeRaw(reinterpret_cast<char const*>(v.blob.data()), v.blob.size());
}
file.write(kSEPARATOR);
for (auto const& [k, v] : dataView.deleted) {
file.write(k.data(), decltype(k)::bytes);
file.write(v.seq);
file.write(v.blob.size());
file.writeRaw(reinterpret_cast<char const*>(v.blob.data()), v.blob.size());
}
file.write(kSEPARATOR);
auto const hash = file.hash();
file.write(hash.data(), decltype(hash)::bytes);
// flush internal buffer explicitly before renaming
if (auto const expectedSuccess = file.close(); not expectedSuccess.has_value()) {
return expectedSuccess;
}
try {
std::filesystem::rename(newFilePath, path_);
} catch (std::exception const& e) {
return std::unexpected{fmt::format("Error moving cache file from {} to {}: {}", newFilePath, path_, e.what())};
}
return {};
}
std::expected<LedgerCacheFile::Data, std::string>
LedgerCacheFile::read(uint32_t minLatestSequence)
{
try {
auto file = InputFile{path_};
if (not file.isOpen()) {
return std::unexpected{fmt::format("Couldn't open file: {}", path_)};
}
Data result;
Header header{};
if (not file.read(header)) {
return std::unexpected{"Error reading cache header"};
}
if (header.version != kVERSION) {
return std::unexpected{
fmt::format("Cache has wrong version: expected {} found {}", kVERSION, header.version)
};
}
if (header.latestSeq < minLatestSequence) {
return std::unexpected{fmt::format("Latest sequence ({}) in the cache file is too low.", header.latestSeq)};
}
result.latestSeq = header.latestSeq;
Separator separator{};
if (not file.readRaw(separator.data(), separator.size())) {
return std::unexpected{"Error reading cache header"};
}
if (auto verificationResult = verifySeparator(separator); not verificationResult.has_value()) {
return std::unexpected{std::move(verificationResult).error()};
}
for (size_t i = 0; i < header.mapSize; ++i) {
auto cacheEntryExpected = readCacheEntry(file, i);
if (not cacheEntryExpected.has_value()) {
return std::unexpected{std::move(cacheEntryExpected).error()};
}
// Using insert with hint here to decrease insert operation complexity to the amortized constant instead of
// logN
result.map.insert(result.map.end(), std::move(cacheEntryExpected).value());
}
if (not file.readRaw(separator.data(), separator.size())) {
return std::unexpected{"Error reading separator"};
}
if (auto verificationResult = verifySeparator(separator); not verificationResult.has_value()) {
return std::unexpected{std::move(verificationResult).error()};
}
for (size_t i = 0; i < header.deletedSize; ++i) {
auto cacheEntryExpected = readCacheEntry(file, i);
if (not cacheEntryExpected.has_value()) {
return std::unexpected{std::move(cacheEntryExpected).error()};
}
result.deleted.insert(result.deleted.end(), std::move(cacheEntryExpected).value());
}
if (not file.readRaw(separator.data(), separator.size())) {
return std::unexpected{"Error reading separator"};
}
if (auto verificationResult = verifySeparator(separator); not verificationResult.has_value()) {
return std::unexpected{std::move(verificationResult).error()};
}
auto const dataHash = file.hash();
ripple::uint256 hashFromFile{};
if (not file.readRaw(reinterpret_cast<char*>(hashFromFile.data()), decltype(hashFromFile)::bytes)) {
return std::unexpected{"Error reading hash"};
}
if (dataHash != hashFromFile) {
return std::unexpected{"Hash file corruption detected"};
}
return result;
} catch (std::exception const& e) {
return std::unexpected{fmt::format(" Error reading cache file: {}", e.what())};
} catch (...) {
return std::unexpected{fmt::format(" Error reading cache file")};
}
}
} // namespace data::impl

Some files were not shown because too many files have changed in this diff Show More