Compare commits

..

1 Commits

Author SHA1 Message Date
mathbunnyru
429753b9bb style: Update pre-commit hooks 2025-10-01 01:00:15 +00:00
375 changed files with 10579 additions and 14746 deletions

View File

@@ -49,7 +49,6 @@ IndentFunctionDeclarationAfterType: false
IndentWidth: 4 IndentWidth: 4
IndentWrappedFunctionNames: false IndentWrappedFunctionNames: false
IndentRequiresClause: true IndentRequiresClause: true
InsertNewlineAtEOF: true
RequiresClausePosition: OwnLine RequiresClausePosition: OwnLine
KeepEmptyLinesAtTheStartOfBlocks: false KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1 MaxEmptyLinesToKeep: 1

View File

@@ -54,7 +54,7 @@ format:
_help_max_pargs_hwrap: _help_max_pargs_hwrap:
- If a positional argument group contains more than this many - If a positional argument group contains more than this many
- arguments, then force it to a vertical layout. - arguments, then force it to a vertical layout.
max_pargs_hwrap: 5 max_pargs_hwrap: 6
_help_max_rows_cmdline: _help_max_rows_cmdline:
- If a cmdline positional group consumes more than this many - If a cmdline positional group consumes more than this many
- lines without nesting, then invalidate the layout (and nest) - lines without nesting, then invalidate the layout (and nest)

View File

@@ -1,31 +0,0 @@
name: Build clio
description: Build clio in build directory
inputs:
targets:
description: Space-separated build target names
default: all
nproc_subtract:
description: The number of processors to subtract when calculating parallelism.
required: true
default: "0"
runs:
using: composite
steps:
- name: Get number of processors
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
id: nproc
with:
subtract: ${{ inputs.nproc_subtract }}
- name: Build targets
shell: bash
env:
CMAKE_TARGETS: ${{ inputs.targets }}
run: |
cd build
cmake \
--build . \
--parallel "${{ steps.nproc.outputs.nproc }}" \
--target ${CMAKE_TARGETS}

29
.github/actions/build_clio/action.yml vendored Normal file
View File

@@ -0,0 +1,29 @@
name: Build clio
description: Build clio in build directory
inputs:
targets:
description: Space-separated build target names
default: all
subtract_threads:
description: An option for the action get_number_of_threads. See get_number_of_threads
required: true
default: "0"
runs:
using: composite
steps:
- name: Get number of threads
uses: ./.github/actions/get_number_of_threads
id: number_of_threads
with:
subtract_threads: ${{ inputs.subtract_threads }}
- name: Build targets
shell: bash
run: |
cd build
cmake \
--build . \
--parallel "${{ steps.number_of_threads.outputs.threads_number }}" \
--target ${{ inputs.targets }}

View File

@@ -34,25 +34,25 @@ runs:
steps: steps:
- name: Login to DockerHub - name: Login to DockerHub
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }} if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with: with:
username: ${{ env.DOCKERHUB_USER }} username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_PW }} password: ${{ env.DOCKERHUB_PW }}
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
if: ${{ inputs.push_image == 'true' }} if: ${{ inputs.push_image == 'true' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ env.GITHUB_TOKEN }} password: ${{ env.GITHUB_TOKEN }}
- uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
with: with:
cache-image: false cache-image: false
- uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
id: meta id: meta
with: with:
images: ${{ inputs.images }} images: ${{ inputs.images }}

View File

@@ -1,41 +0,0 @@
name: Cache key
description: Generate cache key for ccache
inputs:
conan_profile:
description: Conan profile name
required: true
build_type:
description: Current build type (e.g. Release, Debug)
required: true
default: Release
code_coverage:
description: Whether code coverage is on
required: true
default: "false"
outputs:
key:
description: Generated cache key for ccache
value: ${{ steps.key_without_commit.outputs.key }}-${{ steps.git_common_ancestor.outputs.commit }}
restore_keys:
description: Cache restore keys for fallback
value: ${{ steps.key_without_commit.outputs.key }}
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git-common-ancestor
- name: Set cache key without commit
id: key_without_commit
shell: bash
env:
RUNNER_OS: ${{ runner.os }}
BUILD_TYPE: ${{ inputs.build_type }}
CODE_COVERAGE: ${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}
CONAN_PROFILE: ${{ inputs.conan_profile }}
run: |
echo "key=clio-ccache-${RUNNER_OS}-${BUILD_TYPE}${CODE_COVERAGE}-${CONAN_PROFILE}-develop" >> "${GITHUB_OUTPUT}"

View File

@@ -37,10 +37,6 @@ inputs:
description: Whether to generate Debian package description: Whether to generate Debian package
required: true required: true
default: "false" default: "false"
version:
description: Version of the clio_server binary
required: false
default: ""
runs: runs:
using: composite using: composite
@@ -48,7 +44,6 @@ runs:
- name: Run cmake - name: Run cmake
shell: bash shell: bash
env: env:
BUILD_DIR: "${{ inputs.build_dir }}"
BUILD_TYPE: "${{ inputs.build_type }}" BUILD_TYPE: "${{ inputs.build_type }}"
SANITIZER_OPTION: |- SANITIZER_OPTION: |-
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' || ${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
@@ -61,22 +56,9 @@ runs:
STATIC: "${{ inputs.static == 'true' && 'ON' || 'OFF' }}" STATIC: "${{ inputs.static == 'true' && 'ON' || 'OFF' }}"
TIME_TRACE: "${{ inputs.time_trace == 'true' && 'ON' || 'OFF' }}" TIME_TRACE: "${{ inputs.time_trace == 'true' && 'ON' || 'OFF' }}"
PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}" PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}"
# GitHub creates a merge commit for a PR
# https://www.kenmuse.com/blog/the-many-shas-of-a-github-pull-request/
#
# We:
# - explicitly provide branch name
# - use `github.head_ref` to get the SHA of last commit in the PR branch
#
# This way it works both for PRs and pushes to branches.
GITHUB_BRANCH_NAME: "${{ github.head_ref || github.ref_name }}"
GITHUB_HEAD_SHA: "${{ github.event.pull_request.head.sha || github.sha }}"
#
# If tag is being pushed, or it's a nightly release, we use that version.
FORCE_CLIO_VERSION: ${{ inputs.version }}
run: | run: |
cmake \ cmake \
-B "${BUILD_DIR}" \ -B ${{inputs.build_dir}} \
-S . \ -S . \
-G Ninja \ -G Ninja \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \

View File

@@ -24,7 +24,7 @@ runs:
-j8 --exclude-throw-branches -j8 --exclude-throw-branches
- name: Archive coverage report - name: Archive coverage report
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: coverage-report.xml name: coverage-report.xml
path: build/coverage_report.xml path: build/coverage_report.xml

View File

@@ -21,17 +21,18 @@ inputs:
runs: runs:
using: composite using: composite
steps: steps:
- name: Create build directory
shell: bash
run: mkdir -p "${{ inputs.build_dir }}"
- name: Run conan - name: Run conan
shell: bash shell: bash
env: env:
BUILD_DIR: "${{ inputs.build_dir }}"
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}" CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
BUILD_TYPE: "${{ inputs.build_type }}"
CONAN_PROFILE: "${{ inputs.conan_profile }}"
run: | run: |
conan \ conan \
install . \ install . \
-of "${BUILD_DIR}" \ -of build \
-b "${CONAN_BUILD_OPTION}" \ -b "$CONAN_BUILD_OPTION" \
-s "build_type=${BUILD_TYPE}" \ -s "build_type=${{ inputs.build_type }}" \
--profile:all "${CONAN_PROFILE}" --profile:all "${{ inputs.conan_profile }}"

View File

@@ -28,17 +28,12 @@ runs:
- name: Create an issue - name: Create an issue
id: create_issue id: create_issue
shell: bash shell: bash
env:
ISSUE_BODY: ${{ inputs.body }}
ISSUE_ASSIGNEES: ${{ inputs.assignees }}
ISSUE_LABELS: ${{ inputs.labels }}
ISSUE_TITLE: ${{ inputs.title }}
run: | run: |
echo -e "${ISSUE_BODY}" > issue.md echo -e '${{ inputs.body }}' > issue.md
gh issue create \ gh issue create \
--assignee "${ISSUE_ASSIGNEES}" \ --assignee '${{ inputs.assignees }}' \
--label "${ISSUE_LABELS}" \ --label '${{ inputs.labels }}' \
--title "${ISSUE_TITLE}" \ --title '${{ inputs.title }}' \
--body-file ./issue.md \ --body-file ./issue.md \
> create_issue.log > create_issue.log
created_issue="$(sed 's|.*/||' create_issue.log)" created_issue="$(sed 's|.*/||' create_issue.log)"

View File

@@ -0,0 +1,36 @@
name: Get number of threads
description: Determines number of threads to use on macOS and Linux
inputs:
subtract_threads:
description: How many threads to subtract from the calculated number
required: true
default: "0"
outputs:
threads_number:
description: Number of threads to use
value: ${{ steps.number_of_threads_export.outputs.num }}
runs:
using: composite
steps:
- name: Get number of threads on mac
id: mac_threads
if: ${{ runner.os == 'macOS' }}
shell: bash
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
- name: Get number of threads on Linux
id: linux_threads
if: ${{ runner.os == 'Linux' }}
shell: bash
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
- name: Shift and export number of threads
id: number_of_threads_export
shell: bash
run: |
num_of_threads="${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}"
shift_by="${{ inputs.subtract_threads }}"
shifted="$((num_of_threads - shift_by))"
echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT

View File

@@ -0,0 +1,38 @@
name: Restore cache
description: Find and restores ccache cache
inputs:
conan_profile:
description: Conan profile name
required: true
ccache_dir:
description: Path to .ccache directory
required: true
build_type:
description: Current build type (e.g. Release, Debug)
required: true
default: Release
code_coverage:
description: Whether code coverage is on
required: true
default: "false"
outputs:
ccache_cache_hit:
description: True if ccache cache has been downloaded
value: ${{ steps.ccache_cache.outputs.cache-hit }}
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git_common_ancestor
- name: Restore ccache cache
uses: actions/cache/restore@v4
id: ccache_cache
if: ${{ env.CCACHE_DISABLE != '1' }}
with:
path: ${{ inputs.ccache_dir }}
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}

38
.github/actions/save_cache/action.yml vendored Normal file
View File

@@ -0,0 +1,38 @@
name: Save cache
description: Save ccache cache for develop branch
inputs:
conan_profile:
description: Conan profile name
required: true
ccache_dir:
description: Path to .ccache directory
required: true
build_type:
description: Current build type (e.g. Release, Debug)
required: true
default: Release
code_coverage:
description: Whether code coverage is on
required: true
default: "false"
ccache_cache_hit:
description: Whether ccache cache has been downloaded
required: true
ccache_cache_miss_rate:
description: How many ccache cache misses happened
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git_common_ancestor
- name: Save ccache cache
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
uses: actions/cache/save@v4
with:
path: ${{ inputs.ccache_dir }}
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}

View File

@@ -14,7 +14,7 @@ updates:
target-branch: develop target-branch: develop
- package-ecosystem: github-actions - package-ecosystem: github-actions
directory: .github/actions/build-clio/ directory: .github/actions/build_clio/
schedule: schedule:
interval: weekly interval: weekly
day: monday day: monday
@@ -27,7 +27,7 @@ updates:
target-branch: develop target-branch: develop
- package-ecosystem: github-actions - package-ecosystem: github-actions
directory: .github/actions/build-docker-image/ directory: .github/actions/build_docker_image/
schedule: schedule:
interval: weekly interval: weekly
day: monday day: monday
@@ -53,7 +53,7 @@ updates:
target-branch: develop target-branch: develop
- package-ecosystem: github-actions - package-ecosystem: github-actions
directory: .github/actions/code-coverage/ directory: .github/actions/code_coverage/
schedule: schedule:
interval: weekly interval: weekly
day: monday day: monday
@@ -79,7 +79,7 @@ updates:
target-branch: develop target-branch: develop
- package-ecosystem: github-actions - package-ecosystem: github-actions
directory: .github/actions/create-issue/ directory: .github/actions/create_issue/
schedule: schedule:
interval: weekly interval: weekly
day: monday day: monday
@@ -92,7 +92,7 @@ updates:
target-branch: develop target-branch: develop
- package-ecosystem: github-actions - package-ecosystem: github-actions
directory: .github/actions/git-common-ancestor/ directory: .github/actions/get_number_of_threads/
schedule: schedule:
interval: weekly interval: weekly
day: monday day: monday
@@ -105,7 +105,33 @@ updates:
target-branch: develop target-branch: develop
- package-ecosystem: github-actions - package-ecosystem: github-actions
directory: .github/actions/cache-key/ directory: .github/actions/git_common_ancestor/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
reviewers:
- XRPLF/clio-dev-team
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/restore_cache/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
reviewers:
- XRPLF/clio-dev-team
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/save_cache/
schedule: schedule:
interval: weekly interval: weekly
day: monday day: monday

View File

@@ -4,7 +4,7 @@ build_type=Release
compiler=apple-clang compiler=apple-clang
compiler.cppstd=20 compiler.cppstd=20
compiler.libcxx=libc++ compiler.libcxx=libc++
compiler.version=17.0 compiler.version=17
os=Macos os=Macos
[conf] [conf]

View File

@@ -3,9 +3,7 @@ import itertools
import json import json
LINUX_OS = ["heavy", "heavy-arm64"] LINUX_OS = ["heavy", "heavy-arm64"]
LINUX_CONTAINERS = [ LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }']
'{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
]
LINUX_COMPILERS = ["gcc", "clang"] LINUX_COMPILERS = ["gcc", "clang"]
MACOS_OS = ["macos15"] MACOS_OS = ["macos15"]

View File

@@ -40,9 +40,9 @@ mkdir -p "$PROFILES_DIR"
if [[ "$(uname)" == "Darwin" ]]; then if [[ "$(uname)" == "Darwin" ]]; then
create_profile_with_sanitizers "apple-clang" "$APPLE_CLANG_PROFILE" create_profile_with_sanitizers "apple-clang" "$APPLE_CLANG_PROFILE"
echo "include(apple-clang)" >"$PROFILES_DIR/default" echo "include(apple-clang)" > "$PROFILES_DIR/default"
else else
create_profile_with_sanitizers "clang" "$CLANG_PROFILE" create_profile_with_sanitizers "clang" "$CLANG_PROFILE"
create_profile_with_sanitizers "gcc" "$GCC_PROFILE" create_profile_with_sanitizers "gcc" "$GCC_PROFILE"
echo "include(gcc)" >"$PROFILES_DIR/default" echo "include(gcc)" > "$PROFILES_DIR/default"
fi fi

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bash
set -ex
TEMP_DIR=$(mktemp -d)
trap "rm -rf $TEMP_DIR" EXIT
echo "Using temporary CONAN_HOME: $TEMP_DIR"
# We use a temporary Conan home to avoid polluting the user's existing Conan
# configuration and to not use local cache (which leads to non-reproducible lockfiles).
export CONAN_HOME="$TEMP_DIR"
# Ensure that the xrplf remote is the first to be consulted, so any recipes we
# patched are used. We also add it there to not created huge diff when the
# official Conan Center Index is updated.
conan remote add --force --index 0 xrplf https://conan.ripplex.io
# Delete any existing lockfile.
rm -f conan.lock
# Create a new lockfile that is compatible with macOS.
# It should also work on Linux.
conan lock create . \
--profile:all=.github/scripts/conan/apple-clang-17.profile

View File

@@ -22,8 +22,8 @@ fi
TEST_BINARY=$1 TEST_BINARY=$1
if [[ ! -f "$TEST_BINARY" ]]; then if [[ ! -f "$TEST_BINARY" ]]; then
echo "Test binary not found: $TEST_BINARY" echo "Test binary not found: $TEST_BINARY"
exit 1 exit 1
fi fi
TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {suite=$1}') TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {suite=$1}')
@@ -31,16 +31,15 @@ TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {su
OUTPUT_DIR="./.sanitizer-report" OUTPUT_DIR="./.sanitizer-report"
mkdir -p "$OUTPUT_DIR" mkdir -p "$OUTPUT_DIR"
export TSAN_OPTIONS="die_after_fork=0"
export MallocNanoZone='0' # for MacOSX
for TEST in $TESTS; do for TEST in $TESTS; do
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}.log" OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}"
$TEST_BINARY --gtest_filter="$TEST" >"$OUTPUT_FILE" 2>&1 export TSAN_OPTIONS="log_path=\"$OUTPUT_FILE\" die_after_fork=0"
export ASAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
export UBSAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
export MallocNanoZone='0' # for MacOSX
$TEST_BINARY --gtest_filter="$TEST" > /dev/null 2>&1
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "'$TEST' failed a sanitizer check." echo "'$TEST' failed a sanitizer check."
else fi
rm "$OUTPUT_FILE"
fi
done done

View File

@@ -20,5 +20,5 @@ for artifact_name in $(ls); do
rm "${artifact_name}/${BINARY_NAME}" rm "${artifact_name}/${BINARY_NAME}"
rm -r "${artifact_name}" rm -r "${artifact_name}"
sha256sum "./${artifact_name}.zip" >"./${artifact_name}.zip.sha256sum" sha256sum "./${artifact_name}.zip" > "./${artifact_name}.zip.sha256sum"
done done

View File

@@ -8,14 +8,14 @@ on:
paths: paths:
- .github/workflows/build.yml - .github/workflows/build.yml
- .github/workflows/reusable-build-test.yml - .github/workflows/build_and_test.yml
- .github/workflows/reusable-build.yml - .github/workflows/build_impl.yml
- .github/workflows/reusable-test.yml - .github/workflows/test_impl.yml
- .github/workflows/reusable-upload-coverage-report.yml - .github/workflows/upload_coverage_report.yml
- ".github/actions/**" - ".github/actions/**"
- "!.github/actions/build-docker-image/**" - "!.github/actions/build_docker_image/**"
- "!.github/actions/create-issue/**" - "!.github/actions/create_issue/**"
- CMakeLists.txt - CMakeLists.txt
- conanfile.py - conanfile.py
@@ -23,7 +23,6 @@ on:
- "cmake/**" - "cmake/**"
- "src/**" - "src/**"
- "tests/**" - "tests/**"
- "benchmarks/**"
- docs/config-description.md - docs/config-description.md
workflow_dispatch: workflow_dispatch:
@@ -34,10 +33,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/develop' && github.run_number || 'branch' }} group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/develop' && github.run_number || 'branch' }}
cancel-in-progress: true cancel-in-progress: true
defaults:
run:
shell: bash
jobs: jobs:
build-and-test: build-and-test:
name: Build and Test name: Build and Test
@@ -50,7 +45,7 @@ jobs:
build_type: [Release, Debug] build_type: [Release, Debug]
container: container:
[ [
'{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }', '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }',
] ]
static: [true] static: [true]
@@ -61,7 +56,7 @@ jobs:
container: "" container: ""
static: false static: false
uses: ./.github/workflows/reusable-build-test.yml uses: ./.github/workflows/build_and_test.yml
with: with:
runs_on: ${{ matrix.os }} runs_on: ${{ matrix.os }}
container: ${{ matrix.container }} container: ${{ matrix.container }}
@@ -77,14 +72,14 @@ jobs:
code_coverage: code_coverage:
name: Run Code Coverage name: Run Code Coverage
uses: ./.github/workflows/reusable-build.yml uses: ./.github/workflows/build_impl.yml
with: with:
runs_on: heavy runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
conan_profile: gcc conan_profile: gcc
build_type: Debug build_type: Debug
download_ccache: true download_ccache: true
upload_ccache: true upload_ccache: false
code_coverage: true code_coverage: true
static: true static: true
upload_clio_server: false upload_clio_server: false
@@ -93,21 +88,40 @@ jobs:
secrets: secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
package:
name: Build packages
uses: ./.github/workflows/build_impl.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
conan_profile: gcc
build_type: Release
download_ccache: true
upload_ccache: false
code_coverage: false
static: true
upload_clio_server: false
package: true
targets: package
analyze_build_time: false
check_config: check_config:
name: Check Config Description name: Check Config Description
needs: build-and-test needs: build-and-test
runs-on: heavy runs-on: heavy
container: container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 - uses: actions/download-artifact@v5
with: with:
name: clio_server_Linux_Release_gcc name: clio_server_Linux_Release_gcc
- name: Compare Config Description - name: Compare Config Description
shell: bash
run: | run: |
repoConfigFile=docs/config-description.md repoConfigFile=docs/config-description.md
configDescriptionFile=config_description_new.md configDescriptionFile=config_description_new.md

View File

@@ -63,21 +63,21 @@ on:
type: string type: string
default: all default: all
expected_version:
description: Expected version of the clio_server binary
required: false
type: string
default: ""
package: package:
description: Whether to generate Debian package description: Whether to generate Debian package
required: false required: false
type: boolean type: boolean
default: false default: false
version:
description: Version of the clio_server binary
required: false
type: string
default: ""
jobs: jobs:
build: build:
uses: ./.github/workflows/reusable-build.yml uses: ./.github/workflows/build_impl.yml
with: with:
runs_on: ${{ inputs.runs_on }} runs_on: ${{ inputs.runs_on }}
container: ${{ inputs.container }} container: ${{ inputs.container }}
@@ -90,12 +90,12 @@ jobs:
upload_clio_server: ${{ inputs.upload_clio_server }} upload_clio_server: ${{ inputs.upload_clio_server }}
targets: ${{ inputs.targets }} targets: ${{ inputs.targets }}
analyze_build_time: false analyze_build_time: false
expected_version: ${{ inputs.expected_version }}
package: ${{ inputs.package }} package: ${{ inputs.package }}
version: ${{ inputs.version }}
test: test:
needs: build needs: build
uses: ./.github/workflows/reusable-test.yml uses: ./.github/workflows/test_impl.yml
with: with:
runs_on: ${{ inputs.runs_on }} runs_on: ${{ inputs.runs_on }}
container: ${{ inputs.container }} container: ${{ inputs.container }}

View File

@@ -38,37 +38,32 @@ on:
description: Whether to strip clio binary description: Whether to strip clio binary
default: true default: true
defaults:
run:
shell: bash
jobs: jobs:
build_and_publish_image: build_and_publish_image:
name: Build and publish image name: Build and publish image
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Download Clio binary from artifact - name: Download Clio binary from artifact
if: ${{ inputs.artifact_name != null }} if: ${{ inputs.artifact_name != null }}
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 uses: actions/download-artifact@v5
with: with:
name: ${{ inputs.artifact_name }} name: ${{ inputs.artifact_name }}
path: ./docker/clio/artifact/ path: ./docker/clio/artifact/
- name: Download Clio binary from url - name: Download Clio binary from url
if: ${{ inputs.clio_server_binary_url != null }} if: ${{ inputs.clio_server_binary_url != null }}
env: shell: bash
BINARY_URL: ${{ inputs.clio_server_binary_url }}
BINARY_SHA256: ${{ inputs.binary_sha256 }}
run: | run: |
wget "${BINARY_URL}" -P ./docker/clio/artifact/ wget "${{inputs.clio_server_binary_url}}" -P ./docker/clio/artifact/
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${BINARY_SHA256}" ]; then if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${{inputs.binary_sha256}}" ]; then
echo "Binary sha256 sum doesn't match" echo "Binary sha256 sum doesn't match"
exit 1 exit 1
fi fi
- name: Unpack binary - name: Unpack binary
shell: bash
run: | run: |
sudo apt update && sudo apt install -y tar unzip sudo apt update && sudo apt install -y tar unzip
cd docker/clio/artifact cd docker/clio/artifact
@@ -85,6 +80,7 @@ jobs:
- name: Strip binary - name: Strip binary
if: ${{ inputs.strip_binary }} if: ${{ inputs.strip_binary }}
shell: bash
run: strip ./docker/clio/clio_server run: strip ./docker/clio/clio_server
- name: Set GHCR_REPO - name: Set GHCR_REPO
@@ -93,7 +89,7 @@ jobs:
echo "GHCR_REPO=$(echo ghcr.io/${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_OUTPUT} echo "GHCR_REPO=$(echo ghcr.io/${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_OUTPUT}
- name: Build Docker image - name: Build Docker image
uses: ./.github/actions/build-docker-image uses: ./.github/actions/build_docker_image
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}

View File

@@ -60,25 +60,21 @@ on:
required: true required: true
type: boolean type: boolean
expected_version:
description: Expected version of the clio_server binary
required: false
type: string
default: ""
package: package:
description: Whether to generate Debian package description: Whether to generate Debian package
required: false required: false
type: boolean type: boolean
version:
description: Version of the clio_server binary
required: false
type: string
default: ""
secrets: secrets:
CODECOV_TOKEN: CODECOV_TOKEN:
required: false required: false
defaults:
run:
shell: bash
jobs: jobs:
build: build:
name: Build name: Build
@@ -88,38 +84,36 @@ jobs:
steps: steps:
- name: Cleanup workspace - name: Cleanup workspace
if: ${{ runner.os == 'macOS' }} if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
# We need to fetch tags to have correct version in the release
# The workaround is based on https://github.com/actions/checkout/issues/1467
fetch-tags: true
ref: ${{ github.ref }}
- name: Prepare runner - name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215 uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with: with:
enable_ccache: ${{ inputs.download_ccache }} disable_ccache: ${{ !inputs.download_ccache }}
- name: Setup conan on macOS - name: Setup conan on macOS
if: ${{ runner.os == 'macOS' }} if: ${{ runner.os == 'macOS' }}
shell: bash
run: ./.github/scripts/conan/init.sh run: ./.github/scripts/conan/init.sh
- name: Generate cache key - name: Restore cache
uses: ./.github/actions/cache-key if: ${{ inputs.download_ccache }}
id: cache_key uses: ./.github/actions/restore_cache
id: restore_cache
with: with:
conan_profile: ${{ inputs.conan_profile }} conan_profile: ${{ inputs.conan_profile }}
ccache_dir: ${{ env.CCACHE_DIR }}
build_type: ${{ inputs.build_type }} build_type: ${{ inputs.build_type }}
code_coverage: ${{ inputs.code_coverage }} code_coverage: ${{ inputs.code_coverage }}
- name: Restore ccache cache
if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }}
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ steps.cache_key.outputs.key }}
restore-keys: |
${{ steps.cache_key.outputs.restore_keys }}
- name: Run conan - name: Run conan
uses: ./.github/actions/conan uses: ./.github/actions/conan
with: with:
@@ -135,10 +129,9 @@ jobs:
static: ${{ inputs.static }} static: ${{ inputs.static }}
time_trace: ${{ inputs.analyze_build_time }} time_trace: ${{ inputs.analyze_build_time }}
package: ${{ inputs.package }} package: ${{ inputs.package }}
version: ${{ inputs.version }}
- name: Build Clio - name: Build Clio
uses: ./.github/actions/build-clio uses: ./.github/actions/build_clio
with: with:
targets: ${{ inputs.targets }} targets: ${{ inputs.targets }}
@@ -148,26 +141,24 @@ jobs:
ClangBuildAnalyzer --all build/ build_time_report.bin ClangBuildAnalyzer --all build/ build_time_report.bin
ClangBuildAnalyzer --analyze build_time_report.bin > build_time_report.txt ClangBuildAnalyzer --analyze build_time_report.bin > build_time_report.txt
cat build_time_report.txt cat build_time_report.txt
shell: bash
- name: Upload build time analyze report - name: Upload build time analyze report
if: ${{ inputs.analyze_build_time }} if: ${{ inputs.analyze_build_time }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: build_time_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }} name: build_time_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build_time_report.txt path: build_time_report.txt
- name: Show ccache's statistics and zero it - name: Show ccache's statistics
if: ${{ inputs.download_ccache }} if: ${{ inputs.download_ccache }}
shell: bash
id: ccache_stats
run: | run: |
ccache --show-stats -vv ccache -s > /tmp/ccache.stats
ccache --zero-stats miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
- name: Save ccache cache cat /tmp/ccache.stats
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
with:
path: ${{ env.CCACHE_DIR }}
key: ${{ steps.cache_key.outputs.key }}
- name: Strip unit_tests - name: Strip unit_tests
if: ${{ !endsWith(inputs.conan_profile, 'san') && !inputs.code_coverage && !inputs.analyze_build_time }} if: ${{ !endsWith(inputs.conan_profile, 'san') && !inputs.code_coverage && !inputs.analyze_build_time }}
@@ -179,32 +170,44 @@ jobs:
- name: Upload clio_server - name: Upload clio_server
if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }} if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }} name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_server path: build/clio_server
- name: Upload clio_tests - name: Upload clio_tests
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }} if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }} name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_tests path: build/clio_tests
- name: Upload clio_integration_tests - name: Upload clio_integration_tests
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }} if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }} name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_integration_tests path: build/clio_integration_tests
- name: Upload Clio Linux package - name: Upload Clio Linux package
if: ${{ inputs.package }} if: ${{ inputs.package }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }} name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/*.deb path: build/*.deb
- name: Save cache
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
uses: ./.github/actions/save_cache
with:
conan_profile: ${{ inputs.conan_profile }}
ccache_dir: ${{ env.CCACHE_DIR }}
build_type: ${{ inputs.build_type }}
code_coverage: ${{ inputs.code_coverage }}
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
# This is run as part of the build job, because it requires the following: # This is run as part of the build job, because it requires the following:
# - source code # - source code
# - conan packages # - conan packages
@@ -213,23 +216,17 @@ jobs:
# It's all available in the build job, but not in the test job # It's all available in the build job, but not in the test job
- name: Run code coverage - name: Run code coverage
if: ${{ inputs.code_coverage }} if: ${{ inputs.code_coverage }}
uses: ./.github/actions/code-coverage uses: ./.github/actions/code_coverage
- name: Verify version is expected - name: Verify expected version
if: ${{ inputs.version != '' }} if: ${{ inputs.expected_version != '' }}
env: shell: bash
INPUT_VERSION: ${{ inputs.version }}
BUILD_TYPE: ${{ inputs.build_type }}
run: | run: |
set -e set -e
EXPECTED_VERSION="clio-${INPUT_VERSION}" EXPECTED_VERSION="clio-${{ inputs.expected_version }}"
if [[ "${BUILD_TYPE}" == "Debug" ]]; then actual_version=$(./build/clio_server --version)
EXPECTED_VERSION="${EXPECTED_VERSION}+DEBUG" if [[ "$actual_version" != "$EXPECTED_VERSION" ]]; then
fi echo "Expected version '$EXPECTED_VERSION', but got '$actual_version'"
actual_version=$(./build/clio_server --version | head -n 1)
if [[ "${actual_version}" != "${EXPECTED_VERSION}" ]]; then
echo "Expected version '${EXPECTED_VERSION}', but got '${actual_version}'"
exit 1 exit 1
fi fi
@@ -241,6 +238,6 @@ jobs:
if: ${{ inputs.code_coverage }} if: ${{ inputs.code_coverage }}
name: Codecov name: Codecov
needs: build needs: build
uses: ./.github/workflows/reusable-upload-coverage-report.yml uses: ./.github/workflows/upload_coverage_report.yml
secrets: secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -12,33 +12,31 @@ concurrency:
env: env:
CONAN_PROFILE: gcc CONAN_PROFILE: gcc
defaults:
run:
shell: bash
jobs: jobs:
build: build:
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}` name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
runs-on: heavy runs-on: heavy
container: container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Prepare runner - name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215 uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with: with:
enable_ccache: false disable_ccache: true
- name: Update libXRPL version requirement - name: Update libXRPL version requirement
shell: bash
run: | run: |
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.conan_ref }}'|g" conanfile.py sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.conan_ref }}'|g" conanfile.py
rm -f conanfile.py.bak rm -f conanfile.py.bak
- name: Update conan lockfile - name: Update conan lockfile
shell: bash
run: | run: |
conan lock create . --profile:all ${{ env.CONAN_PROFILE }} conan lock create . --profile:all ${{ env.CONAN_PROFILE }}
@@ -53,13 +51,13 @@ jobs:
conan_profile: ${{ env.CONAN_PROFILE }} conan_profile: ${{ env.CONAN_PROFILE }}
- name: Build Clio - name: Build Clio
uses: ./.github/actions/build-clio uses: ./.github/actions/build_clio
- name: Strip tests - name: Strip tests
run: strip build/clio_tests run: strip build/clio_tests
- name: Upload clio_tests - name: Upload clio_tests
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: clio_tests_check_libxrpl name: clio_tests_check_libxrpl
path: build/clio_tests path: build/clio_tests
@@ -69,10 +67,10 @@ jobs:
needs: build needs: build
runs-on: heavy runs-on: heavy
container: container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
steps: steps:
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 - uses: actions/download-artifact@v5
with: with:
name: clio_tests_check_libxrpl name: clio_tests_check_libxrpl
@@ -92,10 +90,10 @@ jobs:
issues: write issues: write
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Create an issue - name: Create an issue
uses: ./.github/actions/create-issue uses: ./.github/actions/create_issue
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
with: with:

View File

@@ -5,26 +5,13 @@ on:
types: [opened, edited, reopened, synchronize] types: [opened, edited, reopened, synchronize]
branches: [develop] branches: [develop]
defaults:
run:
shell: bash
jobs: jobs:
check_title: check_title:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: ytanikin/pr-conventional-commits@fda730cb152c05a849d6d84325e50c6182d9d1e9 # 1.5.1 - uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # v1.4.2
with: with:
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]' task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
add_label: false add_label: false
custom_labels: '{"build":"build", "feat":"enhancement", "fix":"bug", "docs":"documentation", "test":"testability", "ci":"ci", "style":"refactoring", "refactor":"refactoring", "perf":"performance", "chore":"tooling"}' custom_labels: '{"build":"build", "feat":"enhancement", "fix":"bug", "docs":"documentation", "test":"testability", "ci":"ci", "style":"refactoring", "refactor":"refactoring", "perf":"performance", "chore":"tooling"}'
- name: Check if message starts with upper-case letter
env:
PR_TITLE: ${{ github.event.pull_request.title }}
run: |
if [[ ! "${PR_TITLE}" =~ ^[a-z]+:\ [\[A-Z] ]]; then
echo "Error: PR title must start with an upper-case letter."
exit 1
fi

View File

@@ -22,16 +22,12 @@ env:
CONAN_PROFILE: clang CONAN_PROFILE: clang
LLVM_TOOLS_VERSION: 20 LLVM_TOOLS_VERSION: 20
defaults:
run:
shell: bash
jobs: jobs:
clang_tidy: clang_tidy:
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes') if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
runs-on: heavy runs-on: heavy
container: container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
permissions: permissions:
contents: write contents: write
@@ -39,14 +35,21 @@ jobs:
pull-requests: write pull-requests: write
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Prepare runner - name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215 uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with: with:
enable_ccache: false disable_ccache: true
- name: Restore cache
uses: ./.github/actions/restore_cache
id: restore_cache
with:
conan_profile: ${{ env.CONAN_PROFILE }}
ccache_dir: ${{ env.CCACHE_DIR }}
- name: Run conan - name: Run conan
uses: ./.github/actions/conan uses: ./.github/actions/conan
@@ -58,36 +61,36 @@ jobs:
with: with:
conan_profile: ${{ env.CONAN_PROFILE }} conan_profile: ${{ env.CONAN_PROFILE }}
- name: Get number of processors - name: Get number of threads
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf uses: ./.github/actions/get_number_of_threads
id: nproc id: number_of_threads
- name: Run clang-tidy (several times) - name: Run clang-tidy
continue-on-error: true continue-on-error: true
id: clang_tidy shell: bash
id: run_clang_tidy
run: | run: |
# We run clang-tidy several times, because some fixes may enable new fixes in subsequent runs. run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
CLANG_TIDY_COMMAND="run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j ${{ steps.nproc.outputs.nproc }} -fix -quiet"
${CLANG_TIDY_COMMAND} ||
${CLANG_TIDY_COMMAND} ||
${CLANG_TIDY_COMMAND}
- name: Check for changes
id: files_changed
continue-on-error: true
run: |
git diff --exit-code
- name: Fix local includes and clang-format style - name: Fix local includes and clang-format style
if: ${{ steps.files_changed.outcome != 'success' }} if: ${{ steps.run_clang_tidy.outcome != 'success' }}
shell: bash
run: | run: |
pre-commit run --all-files fix-local-includes || true pre-commit run --all-files fix-local-includes || true
pre-commit run --all-files clang-format || true pre-commit run --all-files clang-format || true
- name: Print issues found
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
shell: bash
run: |
sed -i '/error\||/!d' ./output.txt
cat output.txt
rm output.txt
- name: Create an issue - name: Create an issue
if: ${{ (steps.clang_tidy.outcome != 'success' || steps.files_changed.outcome != 'success') && github.event_name != 'pull_request' }} if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
id: create_issue id: create_issue
uses: ./.github/actions/create-issue uses: ./.github/actions/create_issue
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
with: with:
@@ -98,7 +101,7 @@ jobs:
List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/ List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
- uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0 - uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0
if: ${{ steps.files_changed.outcome != 'success' && github.event_name != 'pull_request' }} if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
with: with:
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }} gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }} passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
@@ -106,8 +109,8 @@ jobs:
git_commit_gpgsign: true git_commit_gpgsign: true
- name: Create PR with fixes - name: Create PR with fixes
if: ${{ steps.files_changed.outcome != 'success' && github.event_name != 'pull_request' }} if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0 uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
env: env:
GH_REPO: ${{ github.repository }} GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
@@ -122,5 +125,6 @@ jobs:
reviewers: "godexsoft,kuznetsss,PeterChen13579,mathbunnyru" reviewers: "godexsoft,kuznetsss,PeterChen13579,mathbunnyru"
- name: Fail the job - name: Fail the job
if: ${{ steps.clang_tidy.outcome != 'success' || steps.files_changed.outcome != 'success' }} if: ${{ steps.run_clang_tidy.outcome != 'success' }}
shell: bash
run: exit 1 run: exit 1

View File

@@ -10,26 +10,22 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true cancel-in-progress: true
defaults:
run:
shell: bash
jobs: jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 uses: actions/checkout@v4
with: with:
lfs: true lfs: true
- name: Prepare runner - name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215 uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with: with:
enable_ccache: false disable_ccache: true
- name: Create build directory - name: Create build directory
run: mkdir build_docs run: mkdir build_docs
@@ -43,10 +39,10 @@ jobs:
run: cmake --build . --target docs run: cmake --build . --target docs
- name: Setup Pages - name: Setup Pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.0.0 uses: actions/configure-pages@v5
- name: Upload artifact - name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0 uses: actions/upload-pages-artifact@v4
with: with:
path: build_docs/html path: build_docs/html
name: docs-develop name: docs-develop
@@ -66,6 +62,6 @@ jobs:
steps: steps:
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
id: deployment id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5 uses: actions/deploy-pages@v4
with: with:
artifact_name: docs-develop artifact_name: docs-develop

View File

@@ -8,14 +8,14 @@ on:
paths: paths:
- .github/workflows/nightly.yml - .github/workflows/nightly.yml
- .github/workflows/reusable-release.yml - .github/workflows/release_impl.yml
- .github/workflows/reusable-build-test.yml - .github/workflows/build_and_test.yml
- .github/workflows/reusable-build.yml - .github/workflows/build_impl.yml
- .github/workflows/reusable-test.yml - .github/workflows/test_impl.yml
- .github/workflows/build-clio-docker-image.yml - .github/workflows/build_clio_docker_image.yml
- ".github/actions/**" - ".github/actions/**"
- "!.github/actions/code-coverage/**" - "!.github/actions/code_coverage/**"
- .github/scripts/prepare-release-artifacts.sh - .github/scripts/prepare-release-artifacts.sh
concurrency: concurrency:
@@ -23,25 +23,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true cancel-in-progress: true
defaults:
run:
shell: bash
jobs: jobs:
get_date:
name: Get Date
runs-on: ubuntu-latest
outputs:
date: ${{ steps.get_date.outputs.date }}
steps:
- name: Get current date
id: get_date
run: |
echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
build-and-test: build-and-test:
name: Build and Test name: Build and Test
needs: get_date
strategy: strategy:
fail-fast: false fail-fast: false
@@ -55,19 +39,19 @@ jobs:
conan_profile: gcc conan_profile: gcc
build_type: Release build_type: Release
static: true static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
- os: heavy - os: heavy
conan_profile: gcc conan_profile: gcc
build_type: Debug build_type: Debug
static: true static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
- os: heavy - os: heavy
conan_profile: gcc.ubsan conan_profile: gcc.ubsan
build_type: Release build_type: Release
static: false static: false
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
uses: ./.github/workflows/reusable-build-test.yml uses: ./.github/workflows/build_and_test.yml
with: with:
runs_on: ${{ matrix.os }} runs_on: ${{ matrix.os }}
container: ${{ matrix.container }} container: ${{ matrix.container }}
@@ -79,31 +63,9 @@ jobs:
upload_clio_server: true upload_clio_server: true
download_ccache: false download_ccache: false
upload_ccache: false upload_ccache: false
version: nightly-${{ needs.get_date.outputs.date }}
package:
name: Build debian package
needs: get_date
uses: ./.github/workflows/reusable-build.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
conan_profile: gcc
build_type: Release
download_ccache: false
upload_ccache: false
code_coverage: false
static: true
upload_clio_server: false
package: true
version: nightly-${{ needs.get_date.outputs.date }}
targets: package
analyze_build_time: false
analyze_build_time: analyze_build_time:
name: Analyze Build Time name: Analyze Build Time
needs: get_date
strategy: strategy:
fail-fast: false fail-fast: false
@@ -111,13 +73,13 @@ jobs:
include: include:
- os: heavy - os: heavy
conan_profile: clang conan_profile: clang
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
static: true static: true
- os: macos15 - os: macos15
conan_profile: apple-clang conan_profile: apple-clang
container: "" container: ""
static: false static: false
uses: ./.github/workflows/reusable-build.yml uses: ./.github/workflows/build_impl.yml
with: with:
runs_on: ${{ matrix.os }} runs_on: ${{ matrix.os }}
container: ${{ matrix.container }} container: ${{ matrix.container }}
@@ -130,25 +92,24 @@ jobs:
upload_clio_server: false upload_clio_server: false
targets: all targets: all
analyze_build_time: true analyze_build_time: true
version: nightly-${{ needs.get_date.outputs.date }}
nightly_release: nightly_release:
needs: [build-and-test, package, get_date] needs: build-and-test
uses: ./.github/workflows/reusable-release.yml uses: ./.github/workflows/release_impl.yml
with: with:
delete_pattern: "nightly-*" overwrite_release: true
prerelease: true prerelease: true
title: "Clio development build (nightly-${{ needs.get_date.outputs.date }})" title: "Clio development (nightly) build"
version: nightly-${{ needs.get_date.outputs.date }} version: nightly
header: > header: >
> **Note:** Please remember that this is a development release and it is not recommended for production use. > **Note:** Please remember that this is a development release and it is not recommended for production use.
Changelog (including previous releases): <https://github.com/XRPLF/clio/commits/nightly-${{ needs.get_date.outputs.date }}> Changelog (including previous releases): <https://github.com/XRPLF/clio/commits/nightly>
generate_changelog: false generate_changelog: false
draft: false draft: false
build_and_publish_docker_image: build_and_publish_docker_image:
uses: ./.github/workflows/build-clio-docker-image.yml uses: ./.github/workflows/build_clio_docker_image.yml
needs: build-and-test needs: build-and-test
secrets: inherit secrets: inherit
with: with:
@@ -169,10 +130,10 @@ jobs:
issues: write issues: write
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Create an issue - name: Create an issue
uses: ./.github/actions/create-issue uses: ./.github/actions/create_issue
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
with: with:

View File

@@ -1,8 +1,8 @@
name: Pre-commit auto-update name: Pre-commit auto-update
on: on:
# every first day of the month
schedule: schedule:
# every first day of the month
- cron: "0 0 1 * *" - cron: "0 0 1 * *"
pull_request: pull_request:
branches: [release/*, develop] branches: [release/*, develop]
@@ -12,7 +12,7 @@ on:
jobs: jobs:
auto-update: auto-update:
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@ad4ab1ae5a54a4bab0e87294c31fc0729f788b2b uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@afbcbdafbe0ce5439492fb87eda6441371086386
with: with:
sign_commit: true sign_commit: true
committer: "Clio CI <skuznetsov@ripple.com>" committer: "Clio CI <skuznetsov@ripple.com>"

View File

@@ -8,7 +8,7 @@ on:
jobs: jobs:
run-hooks: run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@01163508e81d7dd63d4601d4090b297a260b18c2 uses: XRPLF/actions/.github/workflows/pre-commit.yml@afbcbdafbe0ce5439492fb87eda6441371086386
with: with:
runs_on: heavy runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:14342e087ceb8b593027198bf9ef06a43833c696" }' container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'

View File

@@ -29,9 +29,9 @@ jobs:
conan_profile: gcc conan_profile: gcc
build_type: Release build_type: Release
static: true static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
uses: ./.github/workflows/reusable-build-test.yml uses: ./.github/workflows/build_and_test.yml
with: with:
runs_on: ${{ matrix.os }} runs_on: ${{ matrix.os }}
container: ${{ matrix.container }} container: ${{ matrix.container }}
@@ -43,36 +43,17 @@ jobs:
upload_clio_server: true upload_clio_server: true
download_ccache: false download_ccache: false
upload_ccache: false upload_ccache: false
version: ${{ github.event_name == 'push' && github.ref_name || '' }} expected_version: ${{ github.event_name == 'push' && github.ref_name || '' }}
package:
name: Build debian package
uses: ./.github/workflows/reusable-build.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }'
conan_profile: gcc
build_type: Release
download_ccache: false
upload_ccache: false
code_coverage: false
static: true
upload_clio_server: false
package: true
version: ${{ github.event_name == 'push' && github.ref_name || '' }}
targets: package
analyze_build_time: false
release: release:
needs: [build-and-test, package] needs: build-and-test
uses: ./.github/workflows/reusable-release.yml uses: ./.github/workflows/release_impl.yml
with: with:
delete_pattern: "" overwrite_release: false
prerelease: ${{ contains(github.ref_name, '-') }} prerelease: ${{ contains(github.ref_name, '-') }}
title: "${{ github.ref_name }}" title: "${{ github.ref_name}}"
version: "${{ github.ref_name }}" version: "${{ github.ref_name }}"
header: > header: >
${{ contains(github.ref_name, '-') && '> **Note:** Please remember that this is a release candidate and it is not recommended for production use.' || '' }} ${{ contains(github.ref_name, '-') && '> **Note:** Please remember that this is a release candidate and it is not recommended for production use.' || '' }}
generate_changelog: ${{ !contains(github.ref_name, '-') }} generate_changelog: ${{ !contains(github.ref_name, '-') }}
draft: ${{ !contains(github.ref_name, '-') }} draft: true

View File

@@ -3,10 +3,10 @@ name: Make release
on: on:
workflow_call: workflow_call:
inputs: inputs:
delete_pattern: overwrite_release:
description: "Pattern to delete previous releases" description: "Overwrite the current release and tag"
required: true required: true
type: string type: boolean
prerelease: prerelease:
description: "Create a prerelease" description: "Create a prerelease"
@@ -38,15 +38,11 @@ on:
required: true required: true
type: boolean type: boolean
defaults:
run:
shell: bash
jobs: jobs:
release: release:
runs-on: heavy runs-on: heavy
container: container:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
env: env:
GH_REPO: ${{ github.repository }} GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
@@ -55,75 +51,62 @@ jobs:
contents: write contents: write
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Prepare runner - name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215 uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with: with:
enable_ccache: false disable_ccache: true
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 - uses: actions/download-artifact@v5
with: with:
path: release_artifacts path: release_artifacts
pattern: clio_server_* pattern: clio_server_*
- name: Prepare release artifacts
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with:
path: release_artifacts
pattern: clio_deb_package_*
- name: Create release notes - name: Create release notes
env: shell: bash
RELEASE_HEADER: ${{ inputs.header }}
run: | run: |
echo "# Release notes" > "${RUNNER_TEMP}/release_notes.md" echo "# Release notes" > "${RUNNER_TEMP}/release_notes.md"
echo "" >> "${RUNNER_TEMP}/release_notes.md" echo "" >> "${RUNNER_TEMP}/release_notes.md"
printf '%s\n' "${RELEASE_HEADER}" >> "${RUNNER_TEMP}/release_notes.md" printf '%s\n' "${{ inputs.header }}" >> "${RUNNER_TEMP}/release_notes.md"
- name: Generate changelog - name: Generate changelog
shell: bash
if: ${{ inputs.generate_changelog }} if: ${{ inputs.generate_changelog }}
run: | run: |
LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)" LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)"
LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)" LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)"
BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)" BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)"
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc" >> "${RUNNER_TEMP}/release_notes.md" git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc"
cat CHANGELOG.md >> "${RUNNER_TEMP}/release_notes.md"
- name: Prepare release artifacts
shell: bash
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
- name: Upload release notes - name: Upload release notes
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: release_notes_${{ inputs.version }} name: release_notes_${{ inputs.version }}
path: "${RUNNER_TEMP}/release_notes.md" path: "${RUNNER_TEMP}/release_notes.md"
- name: Remove previous release with a pattern - name: Remove current release and tag
if: ${{ github.event_name != 'pull_request' && inputs.delete_pattern != '' }} if: ${{ github.event_name != 'pull_request' && inputs.overwrite_release }}
env: shell: bash
DELETE_PATTERN: ${{ inputs.delete_pattern }}
run: | run: |
RELEASES_TO_DELETE=$(gh release list --limit 50 --repo "${GH_REPO}" | grep -E "${DELETE_PATTERN}" | awk -F'\t' '{print $3}' || true) gh release delete ${{ inputs.version }} --yes || true
if [ -n "$RELEASES_TO_DELETE" ]; then git push origin :${{ inputs.version }} || true
for RELEASE in $RELEASES_TO_DELETE; do
echo "Deleting release: $RELEASE"
gh release delete "$RELEASE" --repo "${GH_REPO}" --yes --cleanup-tag
done
fi
- name: Publish release - name: Publish release
if: ${{ github.event_name != 'pull_request' }} if: ${{ github.event_name != 'pull_request' }}
env: shell: bash
RELEASE_VERSION: ${{ inputs.version }}
PRERELEASE_OPTION: ${{ inputs.prerelease && '--prerelease' || '' }}
RELEASE_TITLE: ${{ inputs.title }}
DRAFT_OPTION: ${{ inputs.draft && '--draft' || '' }}
run: | run: |
gh release create "${RELEASE_VERSION}" \ gh release create "${{ inputs.version }}" \
${PRERELEASE_OPTION} \ ${{ inputs.prerelease && '--prerelease' || '' }} \
--title "${RELEASE_TITLE}" \ --title "${{ inputs.title }}" \
--target "${GITHUB_SHA}" \ --target "${GITHUB_SHA}" \
${DRAFT_OPTION} \ ${{ inputs.draft && '--draft' || '' }} \
--notes-file "${RUNNER_TEMP}/release_notes.md" \ --notes-file "${RUNNER_TEMP}/release_notes.md" \
./release_artifacts/clio_* ./release_artifacts/clio_server*

View File

@@ -8,14 +8,14 @@ on:
paths: paths:
- .github/workflows/sanitizers.yml - .github/workflows/sanitizers.yml
- .github/workflows/reusable-build-test.yml - .github/workflows/build_and_test.yml
- .github/workflows/reusable-build.yml - .github/workflows/build_impl.yml
- .github/workflows/reusable-test.yml - .github/workflows/test_impl.yml
- ".github/actions/**" - ".github/actions/**"
- "!.github/actions/build-docker-image/**" - "!.github/actions/build_docker_image/**"
- "!.github/actions/create-issue/**" - "!.github/actions/create_issue/**"
- .github/scripts/execute-tests-under-sanitizer.sh - .github/scripts/execute-tests-under-sanitizer
- CMakeLists.txt - CMakeLists.txt
- conanfile.py - conanfile.py
@@ -41,16 +41,17 @@ jobs:
sanitizer_ext: [.asan, .tsan, .ubsan] sanitizer_ext: [.asan, .tsan, .ubsan]
build_type: [Release, Debug] build_type: [Release, Debug]
uses: ./.github/workflows/reusable-build-test.yml uses: ./.github/workflows/build_and_test.yml
with: with:
runs_on: heavy runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696" }' container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
download_ccache: false download_ccache: false
upload_ccache: false upload_ccache: false
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }} conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
build_type: ${{ matrix.build_type }} build_type: ${{ matrix.build_type }}
static: false static: false
run_unit_tests: true # Currently, both gcc.tsan and clang.tsan unit tests hang
run_unit_tests: ${{ matrix.sanitizer_ext != '.tsan' }}
run_integration_tests: false run_integration_tests: false
upload_clio_server: false upload_clio_server: false
targets: clio_tests clio_integration_tests targets: clio_tests clio_integration_tests

View File

@@ -33,10 +33,6 @@ on:
required: true required: true
type: boolean type: boolean
defaults:
run:
shell: bash
jobs: jobs:
unit_tests: unit_tests:
name: Unit testing name: Unit testing
@@ -47,22 +43,23 @@ jobs:
env: env:
# TODO: remove completely when we have fixed all currently existing issues with sanitizers # TODO: remove completely when we have fixed all currently existing issues with sanitizers
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') }} SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || inputs.conan_profile == 'clang.asan' || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
steps: steps:
- name: Cleanup workspace - name: Cleanup workspace
if: ${{ runner.os == 'macOS' }} if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 - uses: actions/download-artifact@v5
with: with:
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }} name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
- name: Make clio_tests executable - name: Make clio_tests executable
shell: bash
run: chmod +x ./clio_tests run: chmod +x ./clio_tests
- name: Run clio_tests (regular) - name: Run clio_tests (regular)
@@ -71,10 +68,11 @@ jobs:
- name: Run clio_tests (sanitizer errors ignored) - name: Run clio_tests (sanitizer errors ignored)
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }} if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
run: ./.github/scripts/execute-tests-under-sanitizer.sh ./clio_tests run: ./.github/scripts/execute-tests-under-sanitizer ./clio_tests
- name: Check for sanitizer report - name: Check for sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }} if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
shell: bash
id: check_report id: check_report
run: | run: |
if ls .sanitizer-report/* 1> /dev/null 2>&1; then if ls .sanitizer-report/* 1> /dev/null 2>&1; then
@@ -85,7 +83,7 @@ jobs:
- name: Upload sanitizer report - name: Upload sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }} if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 uses: actions/upload-artifact@v4
with: with:
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }} name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: .sanitizer-report/* path: .sanitizer-report/*
@@ -93,7 +91,7 @@ jobs:
- name: Create an issue - name: Create an issue
if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }} if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
uses: ./.github/actions/create-issue uses: ./.github/actions/create_issue
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
with: with:
@@ -124,19 +122,13 @@ jobs:
steps: steps:
- name: Cleanup workspace - name: Cleanup workspace
if: ${{ runner.os == 'macOS' }} if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
- name: Delete and start colima (macOS) - name: Spin up scylladb
# This is a temporary workaround for colima issues on macOS runners
if: ${{ runner.os == 'macOS' }} if: ${{ runner.os == 'macOS' }}
timeout-minutes: 3
run: | run: |
colima delete --force docker rm --force scylladb || true
colima start
- name: Spin up scylladb (macOS)
if: ${{ runner.os == 'macOS' }}
timeout-minutes: 1
run: |
docker run \ docker run \
--detach \ --detach \
--name scylladb \ --name scylladb \
@@ -148,15 +140,11 @@ jobs:
--memory 16G \ --memory 16G \
scylladb/scylla scylladb/scylla
- name: Wait for scylladb container to be healthy (macOS)
if: ${{ runner.os == 'macOS' }}
timeout-minutes: 1
run: |
until [ "$(docker inspect -f '{{.State.Health.Status}}' scylladb)" == "healthy" ]; do until [ "$(docker inspect -f '{{.State.Health.Status}}' scylladb)" == "healthy" ]; do
sleep 1 sleep 5
done done
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 - uses: actions/download-artifact@v5
with: with:
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }} name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}

View File

@@ -3,23 +3,23 @@ name: Update CI docker image
on: on:
pull_request: pull_request:
paths: paths:
- .github/workflows/update-docker-ci.yml - .github/workflows/update_docker_ci.yml
- ".github/actions/build-docker-image/**" - ".github/actions/build_docker_image/**"
- "docker/**" - "docker/ci/**"
- "!docker/clio/**" - "docker/compilers/**"
- "!docker/develop/**" - "docker/tools/**"
push: push:
branches: [develop] branches: [develop]
paths: paths:
- .github/workflows/update-docker-ci.yml - .github/workflows/update_docker_ci.yml
- ".github/actions/build-docker-image/**" - ".github/actions/build_docker_image/**"
- "docker/**" - "docker/ci/**"
- "!docker/clio/**" - "docker/compilers/**"
- "!docker/develop/**" - "docker/tools/**"
workflow_dispatch: workflow_dispatch:
concurrency: concurrency:
@@ -33,10 +33,6 @@ env:
GCC_MAJOR_VERSION: 15 GCC_MAJOR_VERSION: 15
GCC_VERSION: 15.2.0 GCC_VERSION: 15.2.0
defaults:
run:
shell: bash
jobs: jobs:
repo: repo:
name: Calculate repo name name: Calculate repo name
@@ -56,15 +52,15 @@ jobs:
needs: repo needs: repo
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Get changed files - name: Get changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1 uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with: with:
files: "docker/compilers/gcc/**" files: "docker/compilers/gcc/**"
- uses: ./.github/actions/build-docker-image - uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }} if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -94,15 +90,15 @@ jobs:
needs: repo needs: repo
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Get changed files - name: Get changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1 uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with: with:
files: "docker/compilers/gcc/**" files: "docker/compilers/gcc/**"
- uses: ./.github/actions/build-docker-image - uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }} if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -132,20 +128,20 @@ jobs:
needs: [repo, gcc-amd64, gcc-arm64] needs: [repo, gcc-amd64, gcc-arm64]
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Get changed files - name: Get changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1 uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with: with:
files: "docker/compilers/gcc/**" files: "docker/compilers/gcc/**"
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }} if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
@@ -153,7 +149,7 @@ jobs:
- name: Login to DockerHub - name: Login to DockerHub
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }} if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with: with:
username: ${{ secrets.DOCKERHUB_USER }} username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_PW }} password: ${{ secrets.DOCKERHUB_PW }}
@@ -183,15 +179,15 @@ jobs:
needs: repo needs: repo
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Get changed files - name: Get changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1 uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with: with:
files: "docker/compilers/clang/**" files: "docker/compilers/clang/**"
- uses: ./.github/actions/build-docker-image - uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }} if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -219,15 +215,15 @@ jobs:
needs: [repo, gcc-merge] needs: [repo, gcc-merge]
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Get changed files - name: Get changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1 uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with: with:
files: "docker/tools/**" files: "docker/tools/**"
- uses: ./.github/actions/build-docker-image - uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }} if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -250,15 +246,15 @@ jobs:
needs: [repo, gcc-merge] needs: [repo, gcc-merge]
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Get changed files - name: Get changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1 uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with: with:
files: "docker/tools/**" files: "docker/tools/**"
- uses: ./.github/actions/build-docker-image - uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }} if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -281,20 +277,20 @@ jobs:
needs: [repo, tools-amd64, tools-arm64] needs: [repo, tools-amd64, tools-arm64]
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Get changed files - name: Get changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1 uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with: with:
files: "docker/tools/**" files: "docker/tools/**"
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }} if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
@@ -310,36 +306,14 @@ jobs:
$image:arm64-latest \ $image:arm64-latest \
$image:amd64-latest $image:amd64-latest
pre-commit:
name: Build and push pre-commit docker image
runs-on: heavy
needs: [repo, tools-merge]
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: ./.github/actions/build-docker-image
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
images: |
${{ needs.repo.outputs.GHCR_REPO }}/clio-pre-commit
push_image: ${{ github.event_name != 'pull_request' }}
directory: docker/pre-commit
tags: |
type=raw,value=latest
type=raw,value=${{ github.sha }}
platforms: linux/amd64,linux/arm64
build_args: |
GHCR_REPO=${{ needs.repo.outputs.GHCR_REPO }}
ci: ci:
name: Build and push CI docker image name: Build and push CI docker image
runs-on: heavy runs-on: heavy
needs: [repo, gcc-merge, clang, tools-merge] needs: [repo, gcc-merge, clang, tools-merge]
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- uses: ./.github/actions/build-docker-image - uses: ./.github/actions/build_docker_image
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}

View File

@@ -18,22 +18,20 @@ on:
pull_request: pull_request:
branches: [develop] branches: [develop]
paths: paths:
- .github/workflows/upload-conan-deps.yml - .github/workflows/upload_conan_deps.yml
- .github/actions/conan/action.yml - .github/actions/conan/action.yml
- ".github/scripts/conan/**" - ".github/scripts/conan/**"
- "!.github/scripts/conan/regenerate_lockfile.sh"
- conanfile.py - conanfile.py
- conan.lock - conan.lock
push: push:
branches: [develop] branches: [develop]
paths: paths:
- .github/workflows/upload-conan-deps.yml - .github/workflows/upload_conan_deps.yml
- .github/actions/conan/action.yml - .github/actions/conan/action.yml
- ".github/scripts/conan/**" - ".github/scripts/conan/**"
- "!.github/scripts/conan/regenerate_lockfile.sh"
- conanfile.py - conanfile.py
- conan.lock - conan.lock
@@ -42,17 +40,13 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true cancel-in-progress: true
defaults:
run:
shell: bash
jobs: jobs:
generate-matrix: generate-matrix:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }} matrix: ${{ steps.set-matrix.outputs.matrix }}
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Calculate conan matrix - name: Calculate conan matrix
id: set-matrix id: set-matrix
@@ -75,15 +69,16 @@ jobs:
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }} CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
- name: Prepare runner - name: Prepare runner
uses: XRPLF/actions/prepare-runner@e8d2d2a546a03e1d161dca52890705f3bc641215 uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with: with:
enable_ccache: false disable_ccache: true
- name: Setup conan on macOS - name: Setup conan on macOS
if: ${{ runner.os == 'macOS' }} if: ${{ runner.os == 'macOS' }}
shell: bash
run: ./.github/scripts/conan/init.sh run: ./.github/scripts/conan/init.sh
- name: Show conan profile - name: Show conan profile
@@ -104,6 +99,4 @@ jobs:
- name: Upload Conan packages - name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }} if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
env: run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
run: conan upload "*" -r=xrplf --confirm ${FORCE_OPTION}

View File

@@ -1,34 +1,31 @@
name: Upload report name: Upload report
on: on:
workflow_dispatch:
workflow_call: workflow_call:
secrets: secrets:
CODECOV_TOKEN: CODECOV_TOKEN:
required: true required: true
defaults:
run:
shell: bash
jobs: jobs:
upload_report: upload_report:
name: Upload report name: Upload report
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Download report artifact - name: Download report artifact
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 uses: actions/download-artifact@v5
with: with:
name: coverage-report.xml name: coverage-report.xml
path: build path: build
- name: Upload coverage report - name: Upload coverage report
if: ${{ hashFiles('build/coverage_report.xml') != '' }} if: ${{ hashFiles('build/coverage_report.xml') != '' }}
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2 uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
with: with:
files: build/coverage_report.xml files: build/coverage_report.xml
fail_ci_if_error: true fail_ci_if_error: true

1
.gitignore vendored
View File

@@ -4,7 +4,6 @@
.build .build
.cache .cache
.vscode .vscode
.zed
.python-version .python-version
.DS_Store .DS_Store
.sanitizer-report .sanitizer-report

View File

@@ -11,10 +11,7 @@
# #
# See https://pre-commit.com for more information # See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks # See https://pre-commit.com/hooks.html for more hooks
exclude: | exclude: ^(docs/doxygen-awesome-theme/|conan\.lock$)
(?x)^(
docs/doxygen-awesome-theme/.*
)$
repos: repos:
# `pre-commit sample-config` default hooks # `pre-commit sample-config` default hooks
@@ -29,12 +26,12 @@ repos:
# Autoformat: YAML, JSON, Markdown, etc. # Autoformat: YAML, JSON, Markdown, etc.
- repo: https://github.com/rbubley/mirrors-prettier - repo: https://github.com/rbubley/mirrors-prettier
rev: 14abee445aea04b39069c19b4bd54efff6775819 # frozen: v3.7.4 rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
hooks: hooks:
- id: prettier - id: prettier
- repo: https://github.com/igorshubovych/markdownlint-cli - repo: https://github.com/igorshubovych/markdownlint-cli
rev: 76b3d32d3f4b965e1d6425253c59407420ae2c43 # frozen: v0.47.0 rev: 192ad822316c3a22fb3d3cc8aa6eafa0b8488360 # frozen: v0.45.0
hooks: hooks:
- id: markdownlint-fix - id: markdownlint-fix
exclude: LICENSE.md exclude: LICENSE.md
@@ -46,7 +43,7 @@ repos:
# hadolint-docker is a special hook that runs hadolint in a Docker container # hadolint-docker is a special hook that runs hadolint in a Docker container
# Docker is not installed in the environment where pre-commit is run # Docker is not installed in the environment where pre-commit is run
stages: [manual] stages: [manual]
entry: hadolint/hadolint:v2.14.0 hadolint entry: hadolint/hadolint:v2.12.1-beta hadolint
- repo: https://github.com/codespell-project/codespell - repo: https://github.com/codespell-project/codespell
rev: 63c8f8312b7559622c0d82815639671ae42132ac # frozen: v2.4.1 rev: 63c8f8312b7559622c0d82815639671ae42132ac # frozen: v2.4.1
@@ -58,17 +55,6 @@ repos:
--ignore-words=pre-commit-hooks/codespell_ignore.txt, --ignore-words=pre-commit-hooks/codespell_ignore.txt,
] ]
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: 25.12.0
hooks:
- id: black
- repo: https://github.com/scop/pre-commit-shfmt
rev: 2a30809d16bc7a60d9b97353c797f42b510d3368 # frozen: v3.12.0-2
hooks:
- id: shfmt
args: ["-i", "4", "--write"]
# Running some C++ hooks before clang-format # Running some C++ hooks before clang-format
# to ensure that the style is consistent. # to ensure that the style is consistent.
- repo: local - repo: local
@@ -94,7 +80,7 @@ repos:
language: script language: script
- repo: https://github.com/pre-commit/mirrors-clang-format - repo: https://github.com/pre-commit/mirrors-clang-format
rev: 75ca4ad908dc4a99f57921f29b7e6c1521e10b26 # frozen: v21.1.8 rev: 719856d56a62953b8d2839fb9e851f25c3cfeef8 # frozen: v21.1.2
hooks: hooks:
- id: clang-format - id: clang-format
args: [--style=file] args: [--style=file]

View File

@@ -75,6 +75,10 @@ if (san)
endif () endif ()
target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer) target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer)
target_compile_definitions(
clio_options INTERFACE $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
)
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB}) target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
endif () endif ()

View File

@@ -180,7 +180,6 @@ Existing maintainers can resign, or be subject to a vote for removal at the behe
- [kuznetsss](https://github.com/kuznetsss) (Ripple) - [kuznetsss](https://github.com/kuznetsss) (Ripple)
- [legleux](https://github.com/legleux) (Ripple) - [legleux](https://github.com/legleux) (Ripple)
- [PeterChen13579](https://github.com/PeterChen13579) (Ripple) - [PeterChen13579](https://github.com/PeterChen13579) (Ripple)
- [mathbunnyru](https://github.com/mathbunnyru) (Ripple)
### Honorable ex-Maintainers ### Honorable ex-Maintainers

View File

@@ -34,6 +34,7 @@ Below are some useful docs to learn more about Clio.
- [How to configure Clio and rippled](./docs/configure-clio.md) - [How to configure Clio and rippled](./docs/configure-clio.md)
- [How to run Clio](./docs/run-clio.md) - [How to run Clio](./docs/run-clio.md)
- [Logging](./docs/logging.md)
- [Troubleshooting guide](./docs/trouble_shooting.md) - [Troubleshooting guide](./docs/trouble_shooting.md)
**General reference material:** **General reference material:**

View File

@@ -9,12 +9,10 @@ target_sources(
util/async/ExecutionContextBenchmarks.cpp util/async/ExecutionContextBenchmarks.cpp
# Logger # Logger
util/log/LoggerBenchmark.cpp util/log/LoggerBenchmark.cpp
# WorkQueue
rpc/WorkQueueBenchmarks.cpp
) )
include(deps/gbench) include(deps/gbench)
target_include_directories(clio_benchmark PRIVATE .) target_include_directories(clio_benchmark PRIVATE .)
target_link_libraries(clio_benchmark PRIVATE clio_rpc clio_util benchmark::benchmark_main spdlog::spdlog) target_link_libraries(clio_benchmark PUBLIC clio_util benchmark::benchmark_main spdlog::spdlog)
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,145 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "rpc/WorkQueue.hpp"
#include "util/Assert.hpp"
#include "util/config/Array.hpp"
#include "util/config/ConfigConstraints.hpp"
#include "util/config/ConfigDefinition.hpp"
#include "util/config/ConfigValue.hpp"
#include "util/config/Types.hpp"
#include "util/log/Logger.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <benchmark/benchmark.h>
#include <boost/asio/steady_timer.hpp>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <mutex>
#include <thread>
#include <vector>
using namespace rpc;
using namespace util::config;
namespace {
auto const kCONFIG = ClioConfigDefinition{
{"prometheus.compress_reply", ConfigValue{ConfigType::Boolean}.defaultValue(true)},
{"prometheus.enabled", ConfigValue{ConfigType::Boolean}.defaultValue(true)},
{"log.channels.[].channel", Array{ConfigValue{ConfigType::String}}},
{"log.channels.[].level", Array{ConfigValue{ConfigType::String}}},
{"log.level", ConfigValue{ConfigType::String}.defaultValue("info")},
{"log.format", ConfigValue{ConfigType::String}.defaultValue(R"(%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v)")},
{"log.is_async", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
{"log.enable_console", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
{"log.directory", ConfigValue{ConfigType::String}.optional()},
{"log.rotation_size", ConfigValue{ConfigType::Integer}.defaultValue(2048).withConstraint(gValidateUint32)},
{"log.directory_max_files", ConfigValue{ConfigType::Integer}.defaultValue(25).withConstraint(gValidateUint32)},
{"log.tag_style", ConfigValue{ConfigType::String}.defaultValue("none")},
};
// this should be a fixture but it did not work with Args very well
void
init()
{
static std::once_flag kONCE;
std::call_once(kONCE, [] {
PrometheusService::init(kCONFIG);
(void)util::LogService::init(kCONFIG);
});
}
} // namespace
static void
benchmarkWorkQueue(benchmark::State& state)
{
init();
auto const wqThreads = static_cast<uint32_t>(state.range(0));
auto const maxQueueSize = static_cast<uint32_t>(state.range(1));
auto const clientThreads = static_cast<uint32_t>(state.range(2));
auto const itemsPerClient = static_cast<uint32_t>(state.range(3));
auto const clientProcessingMs = static_cast<uint32_t>(state.range(4));
for (auto _ : state) {
std::atomic_size_t totalExecuted = 0uz;
std::atomic_size_t totalQueued = 0uz;
state.PauseTiming();
WorkQueue queue(wqThreads, maxQueueSize);
state.ResumeTiming();
std::vector<std::thread> threads;
threads.reserve(clientThreads);
for (auto t = 0uz; t < clientThreads; ++t) {
threads.emplace_back([&] {
for (auto i = 0uz; i < itemsPerClient; ++i) {
totalQueued += static_cast<std::size_t>(queue.postCoro(
[&clientProcessingMs, &totalExecuted](auto yield) {
++totalExecuted;
boost::asio::steady_timer timer(
yield.get_executor(), std::chrono::milliseconds{clientProcessingMs}
);
timer.async_wait(yield);
std::this_thread::sleep_for(std::chrono::microseconds{10});
},
/* isWhiteListed = */ false
));
}
});
}
for (auto& t : threads)
t.join();
queue.stop();
ASSERT(totalExecuted == totalQueued, "Totals don't match");
ASSERT(totalQueued <= itemsPerClient * clientThreads, "Queued more than requested");
if (maxQueueSize == 0) {
ASSERT(totalQueued == itemsPerClient * clientThreads, "Queued exactly the expected amount");
} else {
ASSERT(totalQueued >= std::min(maxQueueSize, itemsPerClient * clientThreads), "Queued less than expected");
}
}
}
// Usage example:
/*
./clio_benchmark \
--benchmark_repetitions=10 \
--benchmark_display_aggregates_only=true \
--benchmark_min_time=1x \
--benchmark_filter="WorkQueue"
*/
// TODO: figure out what happens on 1 thread
BENCHMARK(benchmarkWorkQueue)
->ArgsProduct({{2, 4, 8, 16}, {0, 5'000}, {4, 8, 16}, {1'000, 10'000}, {10, 100, 250}})
->Unit(benchmark::kMillisecond);

View File

@@ -49,6 +49,8 @@ postprocessors = [
] ]
# render body even when there are no releases to process # render body even when there are no releases to process
# render_always = true # render_always = true
# output file path
output = "CHANGELOG.md"
[git] [git]
# parse the commits based on https://www.conventionalcommits.org # parse the commits based on https://www.conventionalcommits.org

View File

@@ -1,42 +1,42 @@
find_package(Git REQUIRED) find_package(Git REQUIRED)
if (DEFINED ENV{GITHUB_BRANCH_NAME}) set(GIT_COMMAND describe --tags --exact-match)
set(GIT_BUILD_BRANCH $ENV{GITHUB_BRANCH_NAME})
set(GIT_COMMIT_HASH $ENV{GITHUB_HEAD_SHA})
else ()
set(GIT_COMMAND branch --show-current)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_BUILD_BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(GIT_COMMAND rev-parse HEAD)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
endif ()
execute_process( execute_process(
COMMAND date +%Y%m%d%H%M%S WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BUILD_DATE COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE TAG
RESULT_VARIABLE RC
ERROR_VARIABLE ERR
OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE
) )
message(STATUS "Git branch: ${GIT_BUILD_BRANCH}") if (RC EQUAL 0)
message(STATUS "Git commit hash: ${GIT_COMMIT_HASH}") message(STATUS "Found tag '${TAG}' in git. Will use it as Clio version")
message(STATUS "Build date: ${BUILD_DATE}") set(CLIO_VERSION "${TAG}")
set(DOC_CLIO_VERSION "${TAG}")
if (DEFINED ENV{FORCE_CLIO_VERSION} AND NOT "$ENV{FORCE_CLIO_VERSION}" STREQUAL "")
message(STATUS "Using explicitly provided '${FORCE_CLIO_VERSION}' as Clio version")
set(CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
set(DOC_CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
else () else ()
message(STATUS "Using 'YYYYMMDDHMS-<branch>-<git short rev>' as Clio version") message(STATUS "Error finding tag in git: ${ERR}")
message(STATUS "Will use 'YYYYMMDDHMS-<branch>-<git-rev>' as Clio version")
string(SUBSTRING ${GIT_COMMIT_HASH} 0 7 GIT_COMMIT_HASH_SHORT) set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE DATE
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(CLIO_VERSION "${BUILD_DATE}-${GIT_BUILD_BRANCH}-${GIT_COMMIT_HASH_SHORT}") set(GIT_COMMAND branch --show-current)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(GIT_COMMAND rev-parse --short HEAD)
execute_process(
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE REV
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
)
set(CLIO_VERSION "${DATE}-${BRANCH}-${REV}")
set(DOC_CLIO_VERSION "develop") set(DOC_CLIO_VERSION "develop")
endif () endif ()

View File

@@ -1,17 +0,0 @@
[Unit]
Description=Clio XRPL API server
Documentation=https://github.com/XRPLF/clio.git
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json
Restart=on-failure
User=clio
Group=clio
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -11,6 +11,3 @@ file(READ docs/examples/config/example-config.json config)
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}") string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}") file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json) install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
configure_file("${CMAKE_SOURCE_DIR}/cmake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)

View File

@@ -10,36 +10,37 @@ CLIO_BIN="$CLIO_PREFIX/bin/${CLIO_EXECUTABLE}"
CLIO_CONFIG="$CLIO_PREFIX/etc/config.json" CLIO_CONFIG="$CLIO_PREFIX/etc/config.json"
case "$1" in case "$1" in
configure) configure)
if ! id -u "$USER_NAME" >/dev/null 2>&1; then if ! id -u "$USER_NAME" >/dev/null 2>&1; then
# Users who should not have a home directory should have their home directory set to /nonexistent # Users who should not have a home directory should have their home directory set to /nonexistent
# https://www.debian.org/doc/debian-policy/ch-opersys.html#non-existent-home-directories # https://www.debian.org/doc/debian-policy/ch-opersys.html#non-existent-home-directories
useradd \ useradd \
--system \ --system \
--home-dir /nonexistent \ --home-dir /nonexistent \
--no-create-home \ --no-create-home \
--shell /usr/sbin/nologin \ --shell /usr/sbin/nologin \
--comment "system user for ${CLIO_EXECUTABLE}" \ --comment "system user for ${CLIO_EXECUTABLE}" \
--user-group \ --user-group \
${USER_NAME} ${USER_NAME}
fi fi
install -d -o "$USER_NAME" -g "$GROUP_NAME" /var/log/clio install -d -o "$USER_NAME" -g "$GROUP_NAME" /var/log/clio
if [ -f "$CLIO_CONFIG" ]; then if [ -f "$CLIO_CONFIG" ]; then
chown "$USER_NAME:$GROUP_NAME" "$CLIO_CONFIG" chown "$USER_NAME:$GROUP_NAME" "$CLIO_CONFIG"
fi fi
chown -R "$USER_NAME:$GROUP_NAME" "$CLIO_PREFIX" chown -R "$USER_NAME:$GROUP_NAME" "$CLIO_PREFIX"
ln -sf "$CLIO_BIN" "/usr/bin/${CLIO_EXECUTABLE}" ln -sf "$CLIO_BIN" "/usr/bin/${CLIO_EXECUTABLE}"
;; ;;
abort-upgrade | abort-remove | abort-deconfigure) ;; abort-upgrade|abort-remove|abort-deconfigure)
*) ;;
echo "postinst called with unknown argument \`$1'" >&2 *)
exit 1 echo "postinst called with unknown argument \`$1'" >&2
;; exit 1
;;
esac esac
exit 0 exit 0

View File

@@ -1,52 +1,51 @@
{ {
"version": "0.5", "version": "0.5",
"requires": [ "requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075", "zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1765850149.987", "xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
"xrpl/3.0.0#534d3f65a336109eee929b88962bae4e%1765375071.547", "xrpl/2.6.1-rc2#c14c6a4092fb2b97d3a93906dcee87b7%1759161400.392",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1765850149.926", "sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
"spdlog/1.17.0#bcbaaf7147bda6ad24ffbd1ac3d7142c%1767636069.964", "spdlog/1.15.3#3ca0e9e6b83af4d0151e26541d140c86%1754401846.61",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1765850149.46", "soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
"re2/20230301#ca3b241baec15bd31ea9187150e0b333%1765850148.103", "re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656", "rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656",
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456", "protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729", "openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1765850143.957", "nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374", "minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1765850143.914", "lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
"libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592", "libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1765842973.492", "libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1765842973.03", "libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
"libarchive/3.8.1#ffee18995c706e02bf96e7a2f7042e0d%1765850144.736", "libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385", "http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385",
"gtest/1.17.0#5224b3b3ff3b4ce1133cbdd27d53ee7d%1755784855.585", "gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1754325000.842",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958", "grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
"fmt/12.1.0#50abab23274d56bb8f42c94b3b9a40c7%1763984116.926", "fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1754324999.086",
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819", "doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
"date/3.0.4#862e11e80030356b53c2c38599ceb32b%1765850143.772", "date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
"cassandra-cpp-driver/2.17.0#bd3934138689482102c265d01288a316%1764175359.611", "cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1754324997.363",
"c-ares/1.34.5#5581c2b62a608b40bb85d965ab3ec7c8%1765850144.336", "c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
"bzip2/1.0.8#c470882369c2d95c5c77e970c0c7e321%1765850143.837", "bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
"boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1%1764175359.61", "boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368%1754325043.336",
"benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1754578869.672", "benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1754578869.672",
"abseil/20230802.1#90ba607d4ee8fb5fb157c3db540671fc%1764175359.429" "abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
], ],
"build_requires": [ "build_requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1765850150.075", "zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88%1764175361.456", "protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"cmake/4.2.0#ae0a44f44a1ef9ab68fd4b3e9a1f8671%1765850153.937", "cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89",
"cmake/3.31.10#313d16a1aa16bbdb2ca0792467214b76%1765850153.479", "b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28"
"b2/5.3.3#107c15377719889654eb9a162a673975%1765850144.355"
], ],
"python_requires": [], "python_requires": [],
"overrides": { "overrides": {
"boost/1.83.0": [ "boost/1.83.0": [
null, null,
"boost/1.83.0#91d8b1572534d2c334d6790e3c34d0c1" "boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368"
], ],
"protobuf/3.21.12": [ "protobuf/3.21.12": [
null, null,
"protobuf/3.21.12#44ee56c0a6eea0c19aeeaca680370b88" "protobuf/3.21.12"
], ],
"lz4/1.9.4": [ "lz4/1.9.4": [
"lz4/1.10.0" "lz4/1.10.0"
@@ -56,4 +55,4 @@
] ]
}, },
"config_requires": [] "config_requires": []
} }

View File

@@ -3,60 +3,62 @@ from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
class ClioConan(ConanFile): class ClioConan(ConanFile):
name = "clio" name = 'clio'
license = "ISC" license = 'ISC'
author = "Alex Kremer <akremer@ripple.com>, John Freeman <jfreeman@ripple.com>, Ayaz Salikhov <asalikhov@ripple.com>" author = 'Alex Kremer <akremer@ripple.com>, John Freeman <jfreeman@ripple.com>, Ayaz Salikhov <asalikhov@ripple.com>'
url = "https://github.com/xrplf/clio" url = 'https://github.com/xrplf/clio'
description = "Clio RPC server" description = 'Clio RPC server'
settings = "os", "compiler", "build_type", "arch" settings = 'os', 'compiler', 'build_type', 'arch'
options = {} options = {}
requires = [ requires = [
"boost/1.83.0", 'boost/1.83.0',
"cassandra-cpp-driver/2.17.0", 'cassandra-cpp-driver/2.17.0',
"fmt/12.1.0", 'fmt/11.2.0',
"grpc/1.50.1", 'protobuf/3.21.12',
"libbacktrace/cci.20210118", 'grpc/1.50.1',
"openssl/1.1.1w", 'openssl/1.1.1w',
"protobuf/3.21.12", 'xrpl/2.6.1-rc2',
"spdlog/1.17.0", 'zlib/1.3.1',
"xrpl/3.0.0", 'libbacktrace/cci.20210118',
"zlib/1.3.1", 'spdlog/1.15.3',
] ]
default_options = { default_options = {
"cassandra-cpp-driver/*:shared": False, 'xrpl/*:tests': False,
"date/*:header_only": True, 'xrpl/*:rocksdb': False,
"grpc/*:secure": True, 'cassandra-cpp-driver/*:shared': False,
"grpc/*:shared": False, 'date/*:header_only': True,
"gtest/*:no_main": True, 'grpc/*:shared': False,
"libpq/*:shared": False, 'grpc/*:secure': True,
"lz4/*:shared": False, 'libpq/*:shared': False,
"openssl/*:shared": False, 'lz4/*:shared': False,
"protobuf/*:shared": False, 'openssl/*:shared': False,
"protobuf/*:with_zlib": True, 'protobuf/*:shared': False,
"snappy/*:shared": False, 'protobuf/*:with_zlib': True,
"xrpl/*:rocksdb": False, 'snappy/*:shared': False,
"xrpl/*:tests": False, 'gtest/*:no_main': True,
} }
exports_sources = ("CMakeLists.txt", "cmake/*", "src/*") exports_sources = (
'CMakeLists.txt', 'cmake/*', 'src/*'
)
def requirements(self): def requirements(self):
self.requires("gtest/1.17.0") self.requires('gtest/1.14.0')
self.requires("benchmark/1.9.4") self.requires('benchmark/1.9.4')
def configure(self): def configure(self):
if self.settings.compiler == "apple-clang": if self.settings.compiler == 'apple-clang':
self.options["boost"].visibility = "global" self.options['boost'].visibility = 'global'
def layout(self): def layout(self):
cmake_layout(self) cmake_layout(self)
# Fix this setting to follow the default introduced in Conan 1.48 # Fix this setting to follow the default introduced in Conan 1.48
# to align with our build instructions. # to align with our build instructions.
self.folders.generators = "build/generators" self.folders.generators = 'build/generators'
generators = "CMakeDeps" generators = 'CMakeDeps'
def generate(self): def generate(self):
tc = CMakeToolchain(self) tc = CMakeToolchain(self)

View File

@@ -36,28 +36,32 @@ RUN apt-get update \
libmpfr-dev \ libmpfr-dev \
libncurses-dev \ libncurses-dev \
make \ make \
ninja-build \
wget \ wget \
zip \ zip \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Install Python tools # Install Python tools
RUN apt-get update \ ARG PYTHON_VERSION=3.13
RUN add-apt-repository ppa:deadsnakes/ppa \
&& apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \ && apt-get install -y --no-install-recommends --no-install-suggests \
python3 \ python${PYTHON_VERSION} \
python3-pip \ python${PYTHON_VERSION}-venv \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/* \
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}
# Create a virtual environment for python tools
RUN python${PYTHON_VERSION} -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install -q --no-cache-dir \ RUN pip install -q --no-cache-dir \
# TODO: Remove this once we switch to newer Ubuntu base image
# lxml 6.0.0 is not compatible with our image
'lxml<6.0.0' \
cmake \ cmake \
conan==2.24.0 \ conan==2.20.1 \
gcovr \ gcovr \
# We're adding pre-commit to this image as well,
# because clang-tidy workflow requires it
pre-commit pre-commit
# Install LLVM tools # Install LLVM tools
@@ -106,7 +110,6 @@ COPY --from=clio-tools \
/usr/local/bin/git-cliff \ /usr/local/bin/git-cliff \
/usr/local/bin/gh \ /usr/local/bin/gh \
/usr/local/bin/gdb \ /usr/local/bin/gdb \
/usr/local/bin/ninja \
/usr/local/bin/ /usr/local/bin/
WORKDIR /root WORKDIR /root

View File

@@ -5,18 +5,17 @@ It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but c
The image is based on Ubuntu 20.04 and contains: The image is based on Ubuntu 20.04 and contains:
- ccache 4.12.2 - ccache 4.11.3
- Clang 19 - Clang 19
- ClangBuildAnalyzer 1.6.0 - ClangBuildAnalyzer 1.6.0
- Conan 2.24.0 - Conan 2.20.1
- Doxygen 1.16.1 - Doxygen 1.12
- GCC 15.2.0 - GCC 15.2.0
- GDB 17.1 - GDB 16.3
- gh 2.83.2 - gh 2.74
- git-cliff 2.11.0 - git-cliff 2.9.1
- mold 2.40.4 - mold 2.40.1
- Ninja 1.13.2 - Python 3.13
- Python 3.8
- and some other useful tools - and some other useful tools
Conan is set up to build Clio without any additional steps. Conan is set up to build Clio without any additional steps.

View File

@@ -3,13 +3,6 @@
{% set sanitizer_opt_map = {"asan": "address", "tsan": "thread", "ubsan": "undefined"} %} {% set sanitizer_opt_map = {"asan": "address", "tsan": "thread", "ubsan": "undefined"} %}
{% set sanitizer = sanitizer_opt_map[sani] %} {% set sanitizer = sanitizer_opt_map[sani] %}
{% set sanitizer_b2_flags_map = {
"address": "context-impl=ucontext address-sanitizer=norecover",
"thread": "context-impl=ucontext thread-sanitizer=norecover",
"undefined": "undefined-sanitizer=norecover"
} %}
{% set sanitizer_b2_flags_str = sanitizer_b2_flags_map[sanitizer] %}
{% set sanitizer_build_flags_str = "-fsanitize=" ~ sanitizer ~ " -g -O1 -fno-omit-frame-pointer" %} {% set sanitizer_build_flags_str = "-fsanitize=" ~ sanitizer ~ " -g -O1 -fno-omit-frame-pointer" %}
{% set sanitizer_build_flags = sanitizer_build_flags_str.split(' ') %} {% set sanitizer_build_flags = sanitizer_build_flags_str.split(' ') %}
{% set sanitizer_link_flags_str = "-fsanitize=" ~ sanitizer %} {% set sanitizer_link_flags_str = "-fsanitize=" ~ sanitizer %}
@@ -18,8 +11,7 @@
include({{ compiler }}) include({{ compiler }})
[options] [options]
boost/*:extra_b2_flags="{{ sanitizer_b2_flags_str }}" boost/*:extra_b2_flags="cxxflags=\"{{ sanitizer_build_flags_str }}\" linkflags=\"{{ sanitizer_link_flags_str }}\""
boost/*:without_context=False
boost/*:without_stacktrace=True boost/*:without_stacktrace=True
[conf] [conf]
@@ -28,10 +20,4 @@ tools.build:cxxflags+={{ sanitizer_build_flags }}
tools.build:exelinkflags+={{ sanitizer_link_flags }} tools.build:exelinkflags+={{ sanitizer_link_flags }}
tools.build:sharedlinkflags+={{ sanitizer_link_flags }} tools.build:sharedlinkflags+={{ sanitizer_link_flags }}
{% if sanitizer == "address" %} tools.info.package_id:confs+=["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]
tools.build:defines+=["BOOST_USE_ASAN", "BOOST_USE_UCONTEXT"]
{% elif sanitizer == "thread" %}
tools.build:defines+=["BOOST_USE_TSAN", "BOOST_USE_UCONTEXT"]
{% endif %}
tools.info.package_id:confs+=["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags", "tools.build:defines"]

View File

@@ -8,7 +8,7 @@ ARG UBUNTU_VERSION
ARG GCC_MAJOR_VERSION ARG GCC_MAJOR_VERSION
ARG BUILD_VERSION=0 ARG BUILD_VERSION=1
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH ARG TARGETARCH
@@ -34,7 +34,6 @@ RUN wget --progress=dot:giga https://gcc.gnu.org/pub/gcc/releases/gcc-$GCC_VERSI
WORKDIR /gcc-$GCC_VERSION WORKDIR /gcc-$GCC_VERSION
RUN ./contrib/download_prerequisites RUN ./contrib/download_prerequisites
# hadolint ignore=DL3059
RUN mkdir /gcc-build RUN mkdir /gcc-build
WORKDIR /gcc-build WORKDIR /gcc-build
RUN /gcc-$GCC_VERSION/configure \ RUN /gcc-$GCC_VERSION/configure \

View File

@@ -1,6 +1,6 @@
services: services:
clio_develop: clio_develop:
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
volumes: volumes:
- clio_develop_conan_data:/root/.conan2/p - clio_develop_conan_data:/root/.conan2/p
- clio_develop_ccache:/root/.ccache - clio_develop_ccache:/root/.ccache

View File

@@ -2,7 +2,7 @@
script_dir=$(dirname $0) script_dir=$(dirname $0)
pushd $script_dir >/dev/null pushd $script_dir > /dev/null
function start_container { function start_container {
if [ -z "$(docker ps -q -f name=clio_develop)" ]; then if [ -z "$(docker ps -q -f name=clio_develop)" ]; then
@@ -41,26 +41,21 @@ EOF
} }
case $1 in case $1 in
-h | --help) -h|--help)
print_help print_help ;;
;;
-t | --terminal) -t|--terminal)
open_terminal open_terminal ;;
;;
-s | --stop) -s|--stop)
stop_container stop_container ;;
;;
-*) -*)
echo "Unknown option: $1" echo "Unknown option: $1"
print_help print_help ;;
;;
*) *)
run "$@" run "$@" ;;
;;
esac esac
popd >/dev/null popd > /dev/null

View File

@@ -1,38 +0,0 @@
ARG GHCR_REPO=invalid
FROM ${GHCR_REPO}/clio-tools:latest AS clio-tools
# We're using Ubuntu 24.04 to have a more recent version of Python
FROM ubuntu:24.04
ARG DEBIAN_FRONTEND=noninteractive
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
# hadolint ignore=DL3002
USER root
WORKDIR /root
# Install common tools and dependencies
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
curl \
git \
libatomic1 \
software-properties-common \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Python tools
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
python3 \
python3-pip \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN pip install -q --no-cache-dir --break-system-packages \
pre-commit
COPY --from=clio-tools \
/usr/local/bin/doxygen \
/usr/local/bin/

View File

@@ -8,10 +8,11 @@ ARG TARGETARCH
SHELL ["/bin/bash", "-o", "pipefail", "-c"] SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ARG BUILD_VERSION=0 ARG BUILD_VERSION=2
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \ && apt-get install -y --no-install-recommends --no-install-suggests \
ninja-build \
python3 \ python3 \
python3-pip \ python3-pip \
software-properties-common \ software-properties-common \
@@ -23,16 +24,7 @@ RUN apt-get update \
WORKDIR /tmp WORKDIR /tmp
ARG NINJA_VERSION=1.13.2 ARG MOLD_VERSION=2.40.1
RUN wget --progress=dot:giga "https://github.com/ninja-build/ninja/archive/refs/tags/v${NINJA_VERSION}.tar.gz" \
&& tar xf "v${NINJA_VERSION}.tar.gz" \
&& cd "ninja-${NINJA_VERSION}" \
&& ./configure.py --bootstrap \
&& mv ninja /usr/local/bin/ninja \
&& rm -rf /tmp/* /var/tmp/*
ARG MOLD_VERSION=2.40.4
RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v${MOLD_VERSION}.tar.gz" \ RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v${MOLD_VERSION}.tar.gz" \
&& tar xf "v${MOLD_VERSION}.tar.gz" \ && tar xf "v${MOLD_VERSION}.tar.gz" \
&& cd "mold-${MOLD_VERSION}" \ && cd "mold-${MOLD_VERSION}" \
@@ -42,7 +34,7 @@ RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v
&& ninja install \ && ninja install \
&& rm -rf /tmp/* /var/tmp/* && rm -rf /tmp/* /var/tmp/*
ARG CCACHE_VERSION=4.12.2 ARG CCACHE_VERSION=4.11.3
RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \ RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \ && tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
&& cd "ccache-${CCACHE_VERSION}" \ && cd "ccache-${CCACHE_VERSION}" \
@@ -59,7 +51,7 @@ RUN apt-get update \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
ARG DOXYGEN_VERSION=1.16.1 ARG DOXYGEN_VERSION=1.12.0
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \ RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \ && tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
&& cd "doxygen-${DOXYGEN_VERSION}" \ && cd "doxygen-${DOXYGEN_VERSION}" \
@@ -79,13 +71,13 @@ RUN wget --progress=dot:giga "https://github.com/aras-p/ClangBuildAnalyzer/archi
&& ninja install \ && ninja install \
&& rm -rf /tmp/* /var/tmp/* && rm -rf /tmp/* /var/tmp/*
ARG GIT_CLIFF_VERSION=2.11.0 ARG GIT_CLIFF_VERSION=2.9.1
RUN wget --progress=dot:giga "https://github.com/orhun/git-cliff/releases/download/v${GIT_CLIFF_VERSION}/git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz" \ RUN wget --progress=dot:giga "https://github.com/orhun/git-cliff/releases/download/v${GIT_CLIFF_VERSION}/git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz" \
&& tar xf git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz \ && tar xf git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz \
&& mv git-cliff-${GIT_CLIFF_VERSION}/git-cliff /usr/local/bin/git-cliff \ && mv git-cliff-${GIT_CLIFF_VERSION}/git-cliff /usr/local/bin/git-cliff \
&& rm -rf /tmp/* /var/tmp/* && rm -rf /tmp/* /var/tmp/*
ARG GH_VERSION=2.83.2 ARG GH_VERSION=2.74.0
RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz" \ RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz" \
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \ && tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \ && mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \
@@ -100,7 +92,7 @@ RUN apt-get update \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
ARG GDB_VERSION=17.1 ARG GDB_VERSION=16.3
RUN wget --progress=dot:giga "https://sourceware.org/pub/gdb/releases/gdb-${GDB_VERSION}.tar.gz" \ RUN wget --progress=dot:giga "https://sourceware.org/pub/gdb/releases/gdb-${GDB_VERSION}.tar.gz" \
&& tar xf "gdb-${GDB_VERSION}.tar.gz" \ && tar xf "gdb-${GDB_VERSION}.tar.gz" \
&& cd "gdb-${GDB_VERSION}" \ && cd "gdb-${GDB_VERSION}" \

View File

@@ -15,7 +15,6 @@ EXTRACT_ANON_NSPACES = NO
SORT_MEMBERS_CTORS_1ST = YES SORT_MEMBERS_CTORS_1ST = YES
INPUT = ${SOURCE}/src INPUT = ${SOURCE}/src
USE_MDFILE_AS_MAINPAGE = ${SOURCE}/src/README.md
EXCLUDE_SYMBOLS = ${EXCLUDES} EXCLUDE_SYMBOLS = ${EXCLUDES}
RECURSIVE = YES RECURSIVE = YES
HAVE_DOT = ${USE_DOT} HAVE_DOT = ${USE_DOT}

View File

@@ -97,14 +97,30 @@ Now you should be able to download the prebuilt dependencies (including `xrpl` p
#### Conan lockfile #### Conan lockfile
To achieve reproducible dependencies, we use a [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html). To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
The `conan.lock` file in the repository contains a "snapshot" of the current dependencies. The `conan.lock` file in the repository contains a "snapshot" of the current dependencies.
It is implicitly used when running `conan` commands, you don't need to specify it. It is implicitly used when running `conan` commands, you don't need to specify it.
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency. You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
To update a lockfile, run from the repository root: `./.github/scripts/conan/regenerate_lockfile.sh` > [!NOTE]
> Conan uses local cache by default when creating a lockfile.
>
> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile.
To create a new lockfile, run the following commands in the repository root:
```bash
conan remove '*' --confirm
rm conan.lock
# This ensure that xrplf remote is the first to be consulted
conan remote add --force --index 0 xrplf https://conan.ripplex.io
conan lock create .
```
> [!NOTE]
> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all <PROFILE>`.
## Building Clio ## Building Clio
@@ -161,7 +177,7 @@ There are several CMake options you can use to customize the build:
### Generating API docs for Clio ### Generating API docs for Clio
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.14.0 on your system. The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.12.0 on your system.
To generate the API docs, please use CMake option `-Ddocs=ON` as described above and build the `docs` target. To generate the API docs, please use CMake option `-Ddocs=ON` as described above and build the `docs` target.
@@ -175,7 +191,7 @@ Open the `index.html` file in your browser to see the documentation pages.
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine. It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
```sh ```sh
docker run -it ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696 docker run -it ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
git clone https://github.com/XRPLF/clio git clone https://github.com/XRPLF/clio
cd clio cd clio
``` ```

View File

@@ -89,14 +89,6 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`. - **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: Represents the number of threads that will be used for database operations. - **Description**: Represents the number of threads that will be used for database operations.
### database.cassandra.provider
- **Required**: True
- **Type**: string
- **Default value**: `cassandra`
- **Constraints**: The value must be one of the following: `cassandra`, `aws_keyspace`.
- **Description**: The specific database backend provider we are using.
### database.cassandra.core_connections_per_host ### database.cassandra.core_connections_per_host
- **Required**: True - **Required**: True
@@ -293,7 +285,7 @@ This document provides a list of all available Clio configuration properties in
- **Required**: True - **Required**: True
- **Type**: int - **Type**: int
- **Default value**: `1000` - **Default value**: `1`
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`. - **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: The maximum size of the server's request queue. If set to `0`, this means there is no queue size limit. - **Description**: The maximum size of the server's request queue. If set to `0`, this means there is no queue size limit.
@@ -391,7 +383,7 @@ This document provides a list of all available Clio configuration properties in
- **Type**: double - **Type**: double
- **Default value**: `10` - **Default value**: `10`
- **Constraints**: The value must be a positive double number. - **Constraints**: The value must be a positive double number.
- **Description**: The number of seconds the server waits to shutdown gracefully. If Clio does not shutdown gracefully after the specified value, it will be killed instead. - **Description**: The number of milliseconds the server waits to shutdown gracefully. If Clio does not shutdown gracefully after the specified value, it will be killed instead.
### cache.num_diffs ### cache.num_diffs
@@ -441,30 +433,6 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The value must be one of the following: `sync`, `async`, `none`. - **Constraints**: The value must be one of the following: `sync`, `async`, `none`.
- **Description**: The strategy used for Cache loading. - **Description**: The strategy used for Cache loading.
### cache.file.path
- **Required**: False
- **Type**: string
- **Default value**: None
- **Constraints**: None
- **Description**: The path to a file where cache will be saved to on shutdown and loaded from on startup. If the file couldn't be read Clio will load cache as usual (from DB or from rippled).
### cache.file.max_sequence_age
- **Required**: True
- **Type**: int
- **Default value**: `5000`
- **Constraints**: None
- **Description**: Max allowed difference between the latest sequence in DB and in cache file. If the cache file is too old (contains too low latest sequence) Clio will reject using it.
### cache.file.async_save
- **Required**: True
- **Type**: boolean
- **Default value**: `False`
- **Constraints**: None
- **Description**: When false, Clio waits for cache saving to finish before shutting down. When true, cache saving runs in parallel with other shutdown operations.
### log.channels.[].channel ### log.channels.[].channel
- **Required**: False - **Required**: False

View File

@@ -951,7 +951,7 @@ span.arrowhead {
border-color: var(--primary-color); border-color: var(--primary-color);
} }
#nav-tree-contents > ul > li:first-child > div > a { #nav-tree ul li:first-child > div > a {
opacity: 0; opacity: 0;
pointer-events: none; pointer-events: none;
} }

View File

@@ -61,7 +61,7 @@
"ip": "0.0.0.0", "ip": "0.0.0.0",
"port": 51233, "port": 51233,
// Max number of requests to queue up before rejecting further requests. // Max number of requests to queue up before rejecting further requests.
// Defaults to 1000 (use 0 to make the queue unbound). // Defaults to 0, which disables the limit.
"max_queue_size": 500, "max_queue_size": 500,
// If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash // If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash
// If matches, the request will be considered as admin request // If matches, the request will be considered as admin request
@@ -137,11 +137,7 @@
// "num_cursors_from_account": 3200, // Read the cursors from the account table until we have enough cursors to partition the ledger to load concurrently. // "num_cursors_from_account": 3200, // Read the cursors from the account table until we have enough cursors to partition the ledger to load concurrently.
"num_markers": 48, // The number of markers is the number of coroutines to load the cache concurrently. "num_markers": 48, // The number of markers is the number of coroutines to load the cache concurrently.
"page_fetch_size": 512, // The number of rows to load for each page. "page_fetch_size": 512, // The number of rows to load for each page.
"load": "async", // "sync" to load cache synchronously or "async" to load cache asynchronously or "none"/"no" to turn off the cache. "load": "async" // "sync" to load cache synchronously or "async" to load cache asynchronously or "none"/"no" to turn off the cache.
"file": {
"path": "./cache.bin",
"max_sequence_age": 5000
}
}, },
"prometheus": { "prometheus": {
"enabled": true, "enabled": true,

View File

@@ -77,7 +77,7 @@ It's possible to configure `minimum`, `maximum` and `default` version like so:
All of the above are optional. All of the above are optional.
Clio will fallback to hardcoded defaults when these values are not specified in the config file, or if the configured values are outside of the minimum and maximum supported versions hardcoded in [src/rpc/common/APIVersion.hpp](../src/rpc/common/APIVersion.hpp). Clio will fallback to hardcoded defaults when these values are not specified in the config file, or if the configured values are outside of the minimum and maximum supported versions hardcoded in [src/rpc/common/APIVersion.h](../src/rpc/common/APIVersion.hpp).
> [!TIP] > [!TIP]
> See the [example-config.json](../docs/examples/config/example-config.json) for more details. > See the [example-config.json](../docs/examples/config/example-config.json) for more details.

View File

@@ -36,45 +36,45 @@ EOF
exit 0 exit 0
fi fi
# Check version of doxygen is at least 1.14 # Check version of doxygen is at least 1.12
version=$($DOXYGEN --version | grep -o '[0-9\.]*') version=$($DOXYGEN --version | grep -o '[0-9\.]*')
if [[ "1.14.0" > "$version" ]]; then if [[ "1.12.0" > "$version" ]]; then
# No hard error if doxygen version is not the one we want - let CI deal with it # No hard error if doxygen version is not the one we want - let CI deal with it
cat <<EOF cat <<EOF
ERROR ERROR
----------------------------------------------------------------------------- -----------------------------------------------------------------------------
A minimum of version 1.14 of $(which doxygen) is required. A minimum of version 1.12 of `which doxygen` is required.
Your version is $version. Please upgrade it. Your version is $version. Please upgrade it for next time.
Your changes may fail CI checks. Your changes may fail to pass CI once pushed.
----------------------------------------------------------------------------- -----------------------------------------------------------------------------
EOF EOF
exit 0 exit 0
fi fi
mkdir -p ${DOCDIR} >/dev/null 2>&1 mkdir -p ${DOCDIR} > /dev/null 2>&1
pushd ${DOCDIR} >/dev/null 2>&1 pushd ${DOCDIR} > /dev/null 2>&1
cat ${ROOT}/docs/Doxyfile | cat ${ROOT}/docs/Doxyfile | \
sed \ sed \
-e "s/\${LINT}/YES/" \ -e "s/\${LINT}/YES/" \
-e "s/\${WARN_AS_ERROR}/NO/" \ -e "s/\${WARN_AS_ERROR}/NO/" \
-e "s!\${SOURCE}!${ROOT}!" \ -e "s!\${SOURCE}!${ROOT}!" \
-e "s/\${USE_DOT}/NO/" \ -e "s/\${USE_DOT}/NO/" \
-e "s/\${EXCLUDES}/impl/" | -e "s/\${EXCLUDES}/impl/" \
${DOXYGEN} - 2>${TMPFILE} 1>/dev/null | ${DOXYGEN} - 2> ${TMPFILE} 1> /dev/null
# We don't want to check for default values and typedefs as well as for member variables # We don't want to check for default values and typedefs as well as for member variables
OUT=$(cat ${TMPFILE} | OUT=$(cat ${TMPFILE} \
grep -v "=default" | | grep -v "=default" \
grep -v "\(variable\)" | | grep -v "\(variable\)" \
grep -v "\(typedef\)") | grep -v "\(typedef\)")
rm -rf ${TMPFILE} >/dev/null 2>&1 rm -rf ${TMPFILE} > /dev/null 2>&1
popd >/dev/null 2>&1 popd > /dev/null 2>&1
if [[ ! -z "$OUT" ]]; then if [[ ! -z "$OUT" ]]; then
cat <<EOF cat <<EOF

View File

@@ -23,10 +23,10 @@ fix_includes() {
file_path_fixed="${file_path}.tmp.fixed" file_path_fixed="${file_path}.tmp.fixed"
# Make all includes to be <...> style # Make all includes to be <...> style
sed -E 's|#include "(.*)"|#include <\1>|g' "$file_path" >"$file_path_all_global" sed -E 's|#include "(.*)"|#include <\1>|g' "$file_path" > "$file_path_all_global"
# Make local includes to be "..." style # Make local includes to be "..." style
sed -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g" "$file_path_all_global" >"$file_path_fixed" sed -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g" "$file_path_all_global" > "$file_path_fixed"
rm "$file_path_all_global" rm "$file_path_all_global"
# Check if the temporary file is different from the original file # Check if the temporary file is different from the original file

View File

@@ -4,6 +4,7 @@ import argparse
import re import re
from pathlib import Path from pathlib import Path
PATTERN = r'R"JSON\((.*?)\)JSON"' PATTERN = r'R"JSON\((.*?)\)JSON"'
@@ -39,22 +40,16 @@ def fix_colon_spacing(cpp_content: str) -> str:
raw_json = match.group(1) raw_json = match.group(1)
raw_json = re.sub(r'":\n\s*(\[|\{)', r'": \1', raw_json) raw_json = re.sub(r'":\n\s*(\[|\{)', r'": \1', raw_json)
return f'R"JSON({raw_json})JSON"' return f'R"JSON({raw_json})JSON"'
return re.sub(PATTERN, replace_json, cpp_content, flags=re.DOTALL) return re.sub(PATTERN, replace_json, cpp_content, flags=re.DOTALL)
def fix_indentation(cpp_content: str) -> str: def fix_indentation(cpp_content: str) -> str:
if "JSON(" not in cpp_content:
return cpp_content
lines = cpp_content.splitlines() lines = cpp_content.splitlines()
ends_with_newline = cpp_content.endswith("\n")
def find_indentation(line: str) -> int: def find_indentation(line: str) -> int:
return len(line) - len(line.lstrip()) return len(line) - len(line.lstrip())
for line_num, (line, next_line) in enumerate(zip(lines[:-1], lines[1:])): for (line_num, (line, next_line)) in enumerate(zip(lines[:-1], lines[1:])):
if "JSON(" in line and ")JSON" not in line: if "JSON(" in line and ")JSON" not in line:
indent = find_indentation(line) indent = find_indentation(line)
next_indent = find_indentation(next_line) next_indent = find_indentation(next_line)
@@ -69,17 +64,9 @@ def fix_indentation(cpp_content: str) -> str:
if ")JSON" in lines[i]: if ")JSON" in lines[i]:
lines[i] = " " * indent + lines[i].lstrip() lines[i] = " " * indent + lines[i].lstrip()
break break
lines[i] = ( lines[i] = lines[i][by_how_much:] if by_how_much > 0 else " " * (-by_how_much) + lines[i]
lines[i][by_how_much:]
if by_how_much > 0
else " " * (-by_how_much) + lines[i]
)
result = "\n".join(lines) return "\n".join(lines) + "\n"
if ends_with_newline:
result += "\n"
return result
def process_file(file_path: Path, dry_run: bool) -> bool: def process_file(file_path: Path, dry_run: bool) -> bool:

View File

@@ -4,7 +4,7 @@
# #
set -e -o pipefail set -e -o pipefail
if ! command -v gofmt &>/dev/null; then if ! command -v gofmt &> /dev/null ; then
echo "gofmt not installed or available in the PATH" >&2 echo "gofmt not installed or available in the PATH" >&2
exit 1 exit 1
fi fi

View File

@@ -1,4 +1,5 @@
#!/bin/bash #!/bin/sh
# git for-each-ref refs/tags # see which tags are annotated and which are lightweight. Annotated tags are "tag" objects. # git for-each-ref refs/tags # see which tags are annotated and which are lightweight. Annotated tags are "tag" objects.
# # Set these so your commits and tags are always signed # # Set these so your commits and tags are always signed
@@ -6,7 +7,7 @@
# git config tag.gpgsign true # git config tag.gpgsign true
verify_commit_signed() { verify_commit_signed() {
if git verify-commit HEAD &>/dev/null; then if git verify-commit HEAD &> /dev/null; then
: :
# echo "HEAD commit seems signed..." # echo "HEAD commit seems signed..."
else else
@@ -16,7 +17,7 @@ verify_commit_signed() {
} }
verify_tag() { verify_tag() {
if git describe --exact-match --tags HEAD &>/dev/null; then if git describe --exact-match --tags HEAD &> /dev/null; then
: # You might be ok to push : # You might be ok to push
# echo "Tag is annotated." # echo "Tag is annotated."
return 0 return 0
@@ -27,7 +28,7 @@ verify_tag() {
} }
verify_tag_signed() { verify_tag_signed() {
if git verify-tag "$version" &>/dev/null; then if git verify-tag "$version" &> /dev/null ; then
: # ok, I guess we'll let you push : # ok, I guess we'll let you push
# echo "Tag appears signed" # echo "Tag appears signed"
return 0 return 0
@@ -39,11 +40,11 @@ verify_tag_signed() {
} }
# Check some things if we're pushing a branch called "release/" # Check some things if we're pushing a branch called "release/"
if echo "$PRE_COMMIT_REMOTE_BRANCH" | grep ^refs\/heads\/release\/ &>/dev/null; then if echo "$PRE_COMMIT_REMOTE_BRANCH" | grep ^refs\/heads\/release\/ &> /dev/null ; then
version=$(git tag --points-at HEAD) version=$(git tag --points-at HEAD)
echo "Looks like you're trying to push a $version release..." echo "Looks like you're trying to push a $version release..."
echo "Making sure you've signed and tagged it." echo "Making sure you've signed and tagged it."
if verify_commit_signed && verify_tag && verify_tag_signed; then if verify_commit_signed && verify_tag && verify_tag_signed ; then
: # Ok, I guess you can push : # Ok, I guess you can push
else else
exit 1 exit 1

View File

@@ -2,6 +2,7 @@ add_subdirectory(util)
add_subdirectory(data) add_subdirectory(data)
add_subdirectory(cluster) add_subdirectory(cluster)
add_subdirectory(etl) add_subdirectory(etl)
add_subdirectory(etlng)
add_subdirectory(feed) add_subdirectory(feed)
add_subdirectory(rpc) add_subdirectory(rpc)
add_subdirectory(web) add_subdirectory(web)

View File

@@ -1,20 +0,0 @@
# Clio API server
## Introduction
Clio is an XRP Ledger API server optimized for RPC calls over WebSocket or JSON-RPC.
It stores validated historical ledger and transaction data in a more space efficient format, and uses up to 4 times
less space than [rippled](https://github.com/XRPLF/rippled).
Clio can be configured to store data in [Apache Cassandra](https://cassandra.apache.org/_/index.html) or
[ScyllaDB](https://www.scylladb.com/), enabling scalable read throughput. Multiple Clio nodes can share
access to the same dataset, which allows for a highly available cluster of Clio nodes without the need for redundant
data storage or computation.
## Develop
As you prepare to develop code for Clio, please be sure you are aware of our current
[Contribution guidelines](https://github.com/XRPLF/clio/blob/develop/CONTRIBUTING.md).
Read about @ref "rpc" carefully to know more about writing your own handlers for Clio.

View File

@@ -5,9 +5,10 @@ target_link_libraries(
clio_app clio_app
PUBLIC clio_cluster PUBLIC clio_cluster
clio_etl clio_etl
clio_etlng
clio_feed clio_feed
clio_migration
clio_rpc
clio_web clio_web
clio_rpc
clio_migration
PRIVATE Boost::program_options PRIVATE Boost::program_options
) )

View File

@@ -77,10 +77,7 @@ CliArgs::parse(int argc, char const* argv[])
} }
if (parsed.contains("version")) { if (parsed.contains("version")) {
std::cout << util::build::getClioFullVersionString() << '\n' std::cout << util::build::getClioFullVersionString() << '\n';
<< "Git commit hash: " << util::build::getGitCommitHash() << '\n'
<< "Git build branch: " << util::build::getGitBuildBranch() << '\n'
<< "Build date: " << util::build::getBuildDate() << '\n';
return Action{Action::Exit{EXIT_SUCCESS}}; return Action{Action::Exit{EXIT_SUCCESS}};
} }

View File

@@ -25,10 +25,11 @@
#include "data/AmendmentCenter.hpp" #include "data/AmendmentCenter.hpp"
#include "data/BackendFactory.hpp" #include "data/BackendFactory.hpp"
#include "data/LedgerCache.hpp" #include "data/LedgerCache.hpp"
#include "data/LedgerCacheSaver.hpp"
#include "etl/ETLService.hpp" #include "etl/ETLService.hpp"
#include "etl/LoadBalancer.hpp" #include "etl/LoadBalancer.hpp"
#include "etl/NetworkValidatedLedgers.hpp" #include "etl/NetworkValidatedLedgers.hpp"
#include "etlng/LoadBalancer.hpp"
#include "etlng/LoadBalancerInterface.hpp"
#include "feed/SubscriptionManager.hpp" #include "feed/SubscriptionManager.hpp"
#include "migration/MigrationInspectorFactory.hpp" #include "migration/MigrationInspectorFactory.hpp"
#include "rpc/Counters.hpp" #include "rpc/Counters.hpp"
@@ -56,7 +57,6 @@
#include <cstdlib> #include <cstdlib>
#include <memory> #include <memory>
#include <optional> #include <optional>
#include <string>
#include <thread> #include <thread>
#include <utility> #include <utility>
#include <vector> #include <vector>
@@ -91,7 +91,6 @@ ClioApplication::ClioApplication(util::config::ClioConfigDefinition const& confi
{ {
LOG(util::LogService::info()) << "Clio version: " << util::build::getClioFullVersionString(); LOG(util::LogService::info()) << "Clio version: " << util::build::getClioFullVersionString();
signalsHandler_.subscribeToStop([this]() { appStopper_.stop(); }); signalsHandler_.subscribeToStop([this]() { appStopper_.stop(); });
appStopper_.setOnComplete([this]() { signalsHandler_.notifyGracefulShutdownComplete(); });
} }
int int
@@ -100,23 +99,20 @@ ClioApplication::run(bool const useNgWebServer)
auto const threads = config_.get<uint16_t>("io_threads"); auto const threads = config_.get<uint16_t>("io_threads");
LOG(util::LogService::info()) << "Number of io threads = " << threads; LOG(util::LogService::info()) << "Number of io threads = " << threads;
// Similarly we need a context to run ETL on
// In the future we can remove the raw ioc and use ctx instead
// This context should be above ioc because its reference is getting into tasks inside ioc
util::async::CoroExecutionContext ctx{threads};
// IO context to handle all incoming requests, as well as other things. // IO context to handle all incoming requests, as well as other things.
// This is not the only io context in the application. // This is not the only io context in the application.
boost::asio::io_context ioc{threads}; boost::asio::io_context ioc{threads};
// Similarly we need a context to run ETLng on
// In the future we can remove the raw ioc and use ctx instead
util::async::CoroExecutionContext ctx{threads};
// Rate limiter, to prevent abuse // Rate limiter, to prevent abuse
auto whitelistHandler = web::dosguard::WhitelistHandler{config_}; auto whitelistHandler = web::dosguard::WhitelistHandler{config_};
auto const dosguardWeights = web::dosguard::Weights::make(config_); auto const dosguardWeights = web::dosguard::Weights::make(config_);
auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler, dosguardWeights}; auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler, dosguardWeights};
auto sweepHandler = web::dosguard::IntervalSweepHandler{config_, ioc, dosGuard}; auto sweepHandler = web::dosguard::IntervalSweepHandler{config_, ioc, dosGuard};
auto cache = data::LedgerCache{}; auto cache = data::LedgerCache{};
auto cacheSaver = data::LedgerCacheSaver{config_, cache};
// Interface to the database // Interface to the database
auto backend = data::makeBackend(config_, cache); auto backend = data::makeBackend(config_, cache);
@@ -146,12 +142,20 @@ ClioApplication::run(bool const useNgWebServer)
// ETL uses the balancer to extract data. // ETL uses the balancer to extract data.
// The server uses the balancer to forward RPCs to a rippled node. // The server uses the balancer to forward RPCs to a rippled node.
// The balancer itself publishes to streams (transactions_proposed and accounts_proposed) // The balancer itself publishes to streams (transactions_proposed and accounts_proposed)
auto balancer = etl::LoadBalancer::makeLoadBalancer( auto balancer = [&] -> std::shared_ptr<etlng::LoadBalancerInterface> {
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers if (config_.get<bool>("__ng_etl")) {
); return etlng::LoadBalancer::makeLoadBalancer(
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers
);
}
return etl::LoadBalancer::makeLoadBalancer(
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers
);
}();
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes // ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
auto etl = etl::ETLService::makeETLService(config_, ctx, backend, subscriptions, balancer, ledgers); auto etl = etl::ETLService::makeETLService(config_, ioc, ctx, backend, subscriptions, balancer, ledgers);
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_); auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
auto counters = rpc::Counters::makeCounters(workQueue); auto counters = rpc::Counters::makeCounters(workQueue);
@@ -183,7 +187,7 @@ ClioApplication::run(bool const useNgWebServer)
return EXIT_FAILURE; return EXIT_FAILURE;
} }
httpServer->onGet("/metrics", MetricsHandler{adminVerifier, workQueue}); httpServer->onGet("/metrics", MetricsHandler{adminVerifier});
httpServer->onGet("/health", HealthCheckHandler{}); httpServer->onGet("/health", HealthCheckHandler{});
httpServer->onGet("/cache_state", CacheStateHandler{cache}); httpServer->onGet("/cache_state", CacheStateHandler{cache});
auto requestHandler = RequestHandler{adminVerifier, handler}; auto requestHandler = RequestHandler{adminVerifier, handler};
@@ -197,7 +201,7 @@ ClioApplication::run(bool const useNgWebServer)
} }
appStopper_.setOnStop( appStopper_.setOnStop(
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc) Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, ioc)
); );
// Blocks until stopped. // Blocks until stopped.
@@ -212,9 +216,6 @@ ClioApplication::run(bool const useNgWebServer)
auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(config_, backend, rpcEngine, etl, dosGuard); auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(config_, backend, rpcEngine, etl, dosGuard);
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache); auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
appStopper_.setOnStop(
Stopper::makeOnStopCallback(*httpServer, *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
);
// Blocks until stopped. // Blocks until stopped.
// When stopped, shared_ptrs fall out of scope // When stopped, shared_ptrs fall out of scope

View File

@@ -38,18 +38,7 @@ Stopper::~Stopper()
void void
Stopper::setOnStop(std::function<void(boost::asio::yield_context)> cb) Stopper::setOnStop(std::function<void(boost::asio::yield_context)> cb)
{ {
util::spawn(ctx_, [this, cb = std::move(cb)](auto yield) { util::spawn(ctx_, std::move(cb));
cb(yield);
if (onCompleteCallback_)
onCompleteCallback_();
});
}
void
Stopper::setOnComplete(std::function<void()> cb)
{
onCompleteCallback_ = std::move(cb);
} }
void void

View File

@@ -20,13 +20,12 @@
#pragma once #pragma once
#include "data/BackendInterface.hpp" #include "data/BackendInterface.hpp"
#include "data/LedgerCacheSaver.hpp" #include "etlng/ETLServiceInterface.hpp"
#include "etl/ETLServiceInterface.hpp" #include "etlng/LoadBalancerInterface.hpp"
#include "etl/LoadBalancerInterface.hpp"
#include "feed/SubscriptionManagerInterface.hpp" #include "feed/SubscriptionManagerInterface.hpp"
#include "util/CoroutineGroup.hpp" #include "util/CoroutineGroup.hpp"
#include "util/log/Logger.hpp" #include "util/log/Logger.hpp"
#include "web/interface/Concepts.hpp" #include "web/ng/Server.hpp"
#include <boost/asio/executor_work_guard.hpp> #include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp> #include <boost/asio/io_context.hpp>
@@ -43,7 +42,6 @@ namespace app {
class Stopper { class Stopper {
boost::asio::io_context ctx_; boost::asio::io_context ctx_;
std::thread worker_; std::thread worker_;
std::function<void()> onCompleteCallback_;
public: public:
/** /**
@@ -59,14 +57,6 @@ public:
void void
setOnStop(std::function<void(boost::asio::yield_context)> cb); setOnStop(std::function<void(boost::asio::yield_context)> cb);
/**
* @brief Set the callback to be called when graceful shutdown completes.
*
* @param cb The callback to be called when shutdown completes.
*/
void
setOnComplete(std::function<void()> cb);
/** /**
* @brief Stop the application and run the shutdown tasks. * @brief Stop the application and run the shutdown tasks.
*/ */
@@ -81,25 +71,21 @@ public:
* @param etl The ETL service to stop. * @param etl The ETL service to stop.
* @param subscriptions The subscription manager to stop. * @param subscriptions The subscription manager to stop.
* @param backend The backend to stop. * @param backend The backend to stop.
* @param cacheSaver The ledger cache saver
* @param ioc The io_context to stop. * @param ioc The io_context to stop.
* @return The callback to be called on application stop. * @return The callback to be called on application stop.
*/ */
template <web::SomeServer ServerType, data::SomeLedgerCacheSaver LedgerCacheSaverType> template <web::ng::SomeServer ServerType>
static std::function<void(boost::asio::yield_context)> static std::function<void(boost::asio::yield_context)>
makeOnStopCallback( makeOnStopCallback(
ServerType& server, ServerType& server,
etl::LoadBalancerInterface& balancer, etlng::LoadBalancerInterface& balancer,
etl::ETLServiceInterface& etl, etlng::ETLServiceInterface& etl,
feed::SubscriptionManagerInterface& subscriptions, feed::SubscriptionManagerInterface& subscriptions,
data::BackendInterface& backend, data::BackendInterface& backend,
LedgerCacheSaverType& cacheSaver,
boost::asio::io_context& ioc boost::asio::io_context& ioc
) )
{ {
return [&](boost::asio::yield_context yield) { return [&](boost::asio::yield_context yield) {
cacheSaver.save();
util::CoroutineGroup coroutineGroup{yield}; util::CoroutineGroup coroutineGroup{yield};
coroutineGroup.spawn(yield, [&server](auto innerYield) { coroutineGroup.spawn(yield, [&server](auto innerYield) {
server.stop(innerYield); server.stop(innerYield);
@@ -120,8 +106,6 @@ public:
backend.waitForWritesToFinish(); backend.waitForWritesToFinish();
LOG(util::LogService::info()) << "Backend writes finished"; LOG(util::LogService::info()) << "Backend writes finished";
cacheSaver.waitToFinish();
ioc.stop(); ioc.stop();
LOG(util::LogService::info()) << "io_context stopped"; LOG(util::LogService::info()) << "io_context stopped";

View File

@@ -19,10 +19,7 @@
#include "app/WebHandlers.hpp" #include "app/WebHandlers.hpp"
#include "rpc/Errors.hpp"
#include "rpc/WorkQueue.hpp"
#include "util/Assert.hpp" #include "util/Assert.hpp"
#include "util/CoroutineGroup.hpp"
#include "util/prometheus/Http.hpp" #include "util/prometheus/Http.hpp"
#include "web/AdminVerificationStrategy.hpp" #include "web/AdminVerificationStrategy.hpp"
#include "web/SubscriptionContextInterface.hpp" #include "web/SubscriptionContextInterface.hpp"
@@ -34,7 +31,6 @@
#include <boost/asio/spawn.hpp> #include <boost/asio/spawn.hpp>
#include <boost/beast/http/status.hpp> #include <boost/beast/http/status.hpp>
#include <functional>
#include <memory> #include <memory>
#include <optional> #include <optional>
#include <string> #include <string>
@@ -80,8 +76,8 @@ DisconnectHook::operator()(web::ng::Connection const& connection)
dosguard_.get().decrement(connection.ip()); dosguard_.get().decrement(connection.ip());
} }
MetricsHandler::MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier, rpc::WorkQueue& workQueue) MetricsHandler::MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier)
: adminVerifier_{std::move(adminVerifier)}, workQueue_{std::ref(workQueue)} : adminVerifier_{std::move(adminVerifier)}
{ {
} }
@@ -90,45 +86,19 @@ MetricsHandler::operator()(
web::ng::Request const& request, web::ng::Request const& request,
web::ng::ConnectionMetadata& connectionMetadata, web::ng::ConnectionMetadata& connectionMetadata,
web::SubscriptionContextPtr, web::SubscriptionContextPtr,
boost::asio::yield_context yield boost::asio::yield_context
) )
{ {
std::optional<web::ng::Response> response; auto const maybeHttpRequest = request.asHttpRequest();
util::CoroutineGroup coroutineGroup{yield, 1}; ASSERT(maybeHttpRequest.has_value(), "Got not a http request in Get");
auto const onTaskComplete = coroutineGroup.registerForeign(yield); auto const& httpRequest = maybeHttpRequest->get();
ASSERT(onTaskComplete.has_value(), "Coroutine group can't be full");
bool const postSuccessful = workQueue_.get().postCoro( // FIXME(#1702): Using veb server thread to handle prometheus request. Better to post on work queue.
[this, &request, &response, &onTaskComplete = onTaskComplete.value(), &connectionMetadata]( auto maybeResponse = util::prometheus::handlePrometheusRequest(
boost::asio::yield_context httpRequest, adminVerifier_->isAdmin(httpRequest, connectionMetadata.ip())
) mutable {
auto const maybeHttpRequest = request.asHttpRequest();
ASSERT(maybeHttpRequest.has_value(), "Got not a http request in Get");
auto const& httpRequest = maybeHttpRequest->get();
auto maybeResponse = util::prometheus::handlePrometheusRequest(
httpRequest, adminVerifier_->isAdmin(httpRequest, connectionMetadata.ip())
);
ASSERT(maybeResponse.has_value(), "Got unexpected request for Prometheus");
response = web::ng::Response{std::move(maybeResponse).value(), request};
// notify the coroutine group that the foreign task is done
onTaskComplete();
},
/* isWhiteListed= */ true,
rpc::WorkQueue::Priority::High
); );
ASSERT(maybeResponse.has_value(), "Got unexpected request for Prometheus");
if (!postSuccessful) { return web::ng::Response{std::move(maybeResponse).value(), request};
return web::ng::Response{
boost::beast::http::status::too_many_requests, rpc::makeError(rpc::RippledError::rpcTOO_BUSY), request
};
}
// Put the coroutine to sleep until the foreign task is done
coroutineGroup.asyncWait(yield);
ASSERT(response.has_value(), "Woke up coroutine without setting response");
return std::move(response).value();
} }
web::ng::Response web::ng::Response

View File

@@ -21,7 +21,6 @@
#include "data/LedgerCacheInterface.hpp" #include "data/LedgerCacheInterface.hpp"
#include "rpc/Errors.hpp" #include "rpc/Errors.hpp"
#include "rpc/WorkQueue.hpp"
#include "util/log/Logger.hpp" #include "util/log/Logger.hpp"
#include "web/AdminVerificationStrategy.hpp" #include "web/AdminVerificationStrategy.hpp"
#include "web/SubscriptionContextInterface.hpp" #include "web/SubscriptionContextInterface.hpp"
@@ -120,23 +119,20 @@ public:
*/ */
class MetricsHandler { class MetricsHandler {
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier_; std::shared_ptr<web::AdminVerificationStrategy> adminVerifier_;
std::reference_wrapper<rpc::WorkQueue> workQueue_;
public: public:
/** /**
* @brief Construct a new MetricsHandler object * @brief Construct a new MetricsHandler object
* *
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access. * @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access.
* @param workQueue The WorkQueue to use for handling the request.
*/ */
MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier, rpc::WorkQueue& workQueue); MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier);
/** /**
* @brief The call of the function object. * @brief The call of the function object.
* *
* @param request The request to handle. * @param request The request to handle.
* @param connectionMetadata The connection metadata. * @param connectionMetadata The connection metadata.
* @param yield The yield context.
* @return The response to the request. * @return The response to the request.
*/ */
web::ng::Response web::ng::Response
@@ -144,7 +140,7 @@ public:
web::ng::Request const& request, web::ng::Request const& request,
web::ng::ConnectionMetadata& connectionMetadata, web::ng::ConnectionMetadata& connectionMetadata,
web::SubscriptionContextPtr, web::SubscriptionContextPtr,
boost::asio::yield_context yield boost::asio::yield_context
); );
}; };

View File

@@ -146,12 +146,9 @@ AmendmentCenter::isEnabled(AmendmentKey const& key, uint32_t seq) const
bool bool
AmendmentCenter::isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const AmendmentCenter::isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const
{ {
try { if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments)
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) return lookupAmendment(all_, *listAmendments, key);
return lookupAmendment(all_, *listAmendments, key);
} catch (std::runtime_error const&) {
return false; // Some old ledger does not contain Amendments ledger object so do best we can for now
}
return false; return false;
} }
@@ -160,19 +157,13 @@ AmendmentCenter::isEnabled(boost::asio::yield_context yield, std::vector<Amendme
{ {
namespace rg = std::ranges; namespace rg = std::ranges;
try { if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) {
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) { std::vector<bool> out;
std::vector<bool> out; rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) {
rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) { return lookupAmendment(all_, *listAmendments, key);
return lookupAmendment(all_, *listAmendments, key); });
});
return out; return out;
}
} catch (std::runtime_error const&) {
return std::vector<bool>(
keys.size(), false
); // Some old ledger does not contain Amendments ledger object so do best we can for now
} }
return std::vector<bool>(keys.size(), false); return std::vector<bool>(keys.size(), false);

View File

@@ -147,12 +147,6 @@ struct Amendments {
REGISTER(fixAMMClawbackRounding); REGISTER(fixAMMClawbackRounding);
REGISTER(fixMPTDeliveredAmount); REGISTER(fixMPTDeliveredAmount);
REGISTER(fixPriceOracleOrder); REGISTER(fixPriceOracleOrder);
REGISTER(DynamicMPT);
REGISTER(fixDelegateV1_1);
REGISTER(fixDirectoryLimit);
REGISTER(fixIncludeKeyletFields);
REGISTER(fixTokenEscrowV1);
REGISTER(LendingProtocol);
// Obsolete but supported by libxrpl // Obsolete but supported by libxrpl
REGISTER(CryptoConditionsSuite); REGISTER(CryptoConditionsSuite);

View File

@@ -21,7 +21,6 @@
#include "data/BackendInterface.hpp" #include "data/BackendInterface.hpp"
#include "data/CassandraBackend.hpp" #include "data/CassandraBackend.hpp"
#include "data/KeyspaceBackend.hpp"
#include "data/LedgerCacheInterface.hpp" #include "data/LedgerCacheInterface.hpp"
#include "data/cassandra/SettingsProvider.hpp" #include "data/cassandra/SettingsProvider.hpp"
#include "util/config/ConfigDefinition.hpp" #include "util/config/ConfigDefinition.hpp"
@@ -46,7 +45,6 @@ namespace data {
inline std::shared_ptr<BackendInterface> inline std::shared_ptr<BackendInterface>
makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache) makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
{ {
using namespace cassandra::impl;
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming) static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
LOG(log.info()) << "Constructing BackendInterface"; LOG(log.info()) << "Constructing BackendInterface";
@@ -57,15 +55,9 @@ makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheI
if (boost::iequals(type, "cassandra")) { if (boost::iequals(type, "cassandra")) {
auto const cfg = config.getObject("database." + type); auto const cfg = config.getObject("database." + type);
if (providerFromString(cfg.getValueView("provider").asString()) == Provider::Keyspace) { backend = std::make_shared<data::cassandra::CassandraBackend>(
backend = std::make_shared<data::cassandra::KeyspaceBackend>( data::cassandra::SettingsProvider{cfg}, cache, readOnly
data::cassandra::SettingsProvider{cfg}, cache, readOnly );
);
} else {
backend = std::make_shared<data::cassandra::CassandraBackend>(
data::cassandra::SettingsProvider{cfg}, cache, readOnly
);
}
} }
if (!backend) if (!backend)

View File

@@ -270,7 +270,7 @@ BackendInterface::updateRange(uint32_t newMax)
{ {
std::scoped_lock const lck(rngMtx_); std::scoped_lock const lck(rngMtx_);
if (range_.has_value() and newMax < range_->maxSequence) { if (range_.has_value() && newMax < range_->maxSequence) {
ASSERT( ASSERT(
false, false,
"Range shouldn't exist yet or newMax should be at least range->maxSequence. newMax = {}, " "Range shouldn't exist yet or newMax should be at least range->maxSequence. newMax = {}, "
@@ -280,14 +280,11 @@ BackendInterface::updateRange(uint32_t newMax)
); );
} }
updateRangeImpl(newMax); if (!range_.has_value()) {
} range_ = {.minSequence = newMax, .maxSequence = newMax};
} else {
void range_->maxSequence = newMax;
BackendInterface::forceUpdateRange(uint32_t newMax) }
{
std::scoped_lock const lck(rngMtx_);
updateRangeImpl(newMax);
} }
void void
@@ -413,14 +410,4 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
return fees; return fees;
} }
void
BackendInterface::updateRangeImpl(uint32_t newMax)
{
if (!range_.has_value()) {
range_ = {.minSequence = newMax, .maxSequence = newMax};
} else {
range_->maxSequence = newMax;
}
}
} // namespace data } // namespace data

View File

@@ -249,15 +249,6 @@ public:
void void
updateRange(uint32_t newMax); updateRange(uint32_t newMax);
/**
* @brief Updates the range of sequences that are stored in the DB without any checks
* @note In the most cases you should use updateRange() instead
*
* @param newMax The new maximum sequence available
*/
void
forceUpdateRange(uint32_t newMax);
/** /**
* @brief Sets the range of sequences that are stored in the DB. * @brief Sets the range of sequences that are stored in the DB.
* *
@@ -304,7 +295,7 @@ public:
* @param account The account to fetch transactions for * @param account The account to fetch transactions for
* @param limit The maximum number of transactions per result page * @param limit The maximum number of transactions per result page
* @param forward Whether to fetch the page forwards or backwards from the given cursor * @param forward Whether to fetch the page forwards or backwards from the given cursor
* @param txnCursor The cursor to resume fetching from * @param cursor The cursor to resume fetching from
* @param yield The coroutine context * @param yield The coroutine context
* @return Results and a cursor to resume from * @return Results and a cursor to resume from
*/ */
@@ -313,7 +304,7 @@ public:
ripple::AccountID const& account, ripple::AccountID const& account,
std::uint32_t limit, std::uint32_t limit,
bool forward, bool forward,
std::optional<TransactionsCursor> const& txnCursor, std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context yield boost::asio::yield_context yield
) const = 0; ) const = 0;
@@ -785,9 +776,6 @@ private:
*/ */
virtual bool virtual bool
doFinishWrites() = 0; doFinishWrites() = 0;
void
updateRangeImpl(uint32_t newMax);
}; };
} // namespace data } // namespace data

View File

@@ -5,7 +5,6 @@ target_sources(
BackendCounters.cpp BackendCounters.cpp
BackendInterface.cpp BackendInterface.cpp
LedgerCache.cpp LedgerCache.cpp
LedgerCacheSaver.cpp
LedgerHeaderCache.cpp LedgerHeaderCache.cpp
cassandra/impl/Future.cpp cassandra/impl/Future.cpp
cassandra/impl/Cluster.cpp cassandra/impl/Cluster.cpp
@@ -15,9 +14,6 @@ target_sources(
cassandra/impl/SslContext.cpp cassandra/impl/SslContext.cpp
cassandra/Handle.cpp cassandra/Handle.cpp
cassandra/SettingsProvider.cpp cassandra/SettingsProvider.cpp
impl/InputFile.cpp
impl/LedgerCacheFile.cpp
impl/OutputFile.cpp
) )
target_link_libraries(clio_data PUBLIC cassandra-cpp-driver::cassandra-cpp-driver clio_util) target_link_libraries(clio_data PUBLIC cassandra-cpp-driver::cassandra-cpp-driver clio_util)

File diff suppressed because it is too large Load Diff

View File

@@ -1,309 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/LedgerHeaderCache.hpp"
#include "data/Types.hpp"
#include "data/cassandra/CassandraBackendFamily.hpp"
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/KeyspaceSchema.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/ExecutionStrategy.hpp"
#include "util/Assert.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/json/object.hpp>
#include <boost/uuid/string_generator.hpp>
#include <boost/uuid/uuid.hpp>
#include <cassandra.h>
#include <fmt/format.h>
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/strHex.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/LedgerHeader.h>
#include <xrpl/protocol/nft.h>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <stdexcept>
#include <utility>
#include <vector>
namespace data::cassandra {
/**
* @brief Implements @ref CassandraBackendFamily for Keyspace
*
* @tparam SettingsProviderType The settings provider type
* @tparam ExecutionStrategyType The execution strategy type
* @tparam FetchLedgerCacheType The ledger header cache type
*/
template <
SomeSettingsProvider SettingsProviderType,
SomeExecutionStrategy ExecutionStrategyType,
typename FetchLedgerCacheType = FetchLedgerCache>
class BasicKeyspaceBackend : public CassandraBackendFamily<
SettingsProviderType,
ExecutionStrategyType,
KeyspaceSchema<SettingsProviderType>,
FetchLedgerCacheType> {
using DefaultCassandraFamily = CassandraBackendFamily<
SettingsProviderType,
ExecutionStrategyType,
KeyspaceSchema<SettingsProviderType>,
FetchLedgerCacheType>;
using DefaultCassandraFamily::executor_;
using DefaultCassandraFamily::ledgerSequence_;
using DefaultCassandraFamily::log_;
using DefaultCassandraFamily::range_;
using DefaultCassandraFamily::schema_;
public:
/**
* @brief Inherit the constructors of the base class.
*/
using DefaultCassandraFamily::DefaultCassandraFamily;
/**
* @brief Move constructor is deleted because handle_ is shared by reference with executor
*/
BasicKeyspaceBackend(BasicKeyspaceBackend&&) = delete;
bool
doFinishWrites() override
{
this->waitForWritesToFinish();
// !range_.has_value() means the table 'ledger_range' is not populated;
// This would be the first write to the table.
// In this case, insert both min_sequence/max_sequence range into the table.
if (not range_.has_value()) {
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/false, ledgerSequence_);
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/true, ledgerSequence_);
}
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
log_.warn() << "Update failed for ledger " << ledgerSequence_;
return false;
}
log_.info() << "Committed ledger " << ledgerSequence_;
return true;
}
NFTsAndCursor
fetchNFTsByIssuer(
ripple::AccountID const& issuer,
std::optional<std::uint32_t> const& taxon,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const override
{
std::vector<ripple::uint256> nftIDs;
if (taxon.has_value()) {
// Keyspace and ScyllaDB uses the same logic for taxon-filtered queries
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
} else {
// Amazon Keyspaces Workflow for non-taxon queries
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
Statement const firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
firstQuery.bindAt(1, startTaxon);
firstQuery.bindAt(2, startTokenID);
firstQuery.bindAt(3, Limit{limit});
auto const firstRes = executor_.read(yield, firstQuery);
if (firstRes.has_value()) {
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
nftIDs.push_back(nftID);
}
if (nftIDs.size() < limit) {
auto const remainingLimit = limit - nftIDs.size();
Statement const secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
secondQuery.bindAt(1, startTaxon);
secondQuery.bindAt(2, Limit{remainingLimit});
auto const secondRes = executor_.read(yield, secondQuery);
if (secondRes.has_value()) {
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
nftIDs.push_back(nftID);
}
}
}
return populateNFTsAndCreateCursor(nftIDs, ledgerSequence, limit, yield);
}
/**
* @brief (Unsupported in Keyspaces) Fetches account root object indexes by page.
* @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
* This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces does not support, and there is
* no efficient alternative. This is acceptable as the cache is primarily loaded via diffs. Calling this
* function will throw an exception.
*
* @param number The total number of accounts to fetch.
* @param pageSize The maximum number of accounts per page.
* @param seq The accounts need to exist at this ledger sequence.
* @param yield The coroutine context.
* @return A vector of ripple::uint256 representing the account root hashes.
*/
std::vector<ripple::uint256>
fetchAccountRoots(
[[maybe_unused]] std::uint32_t number,
[[maybe_unused]] std::uint32_t pageSize,
[[maybe_unused]] std::uint32_t seq,
[[maybe_unused]] boost::asio::yield_context yield
) const override
{
ASSERT(false, "Fetching account roots is not supported by the Keyspaces backend.");
std::unreachable();
}
private:
std::vector<ripple::uint256>
fetchNFTIDsByTaxon(
ripple::AccountID const& issuer,
std::uint32_t const taxon,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const
{
std::vector<ripple::uint256> nftIDs;
Statement const statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
statement.bindAt(1, taxon);
statement.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
statement.bindAt(3, Limit{limit});
auto const res = executor_.read(yield, statement);
if (res.has_value() && res->hasRows()) {
for (auto const [nftID] : extract<ripple::uint256>(*res))
nftIDs.push_back(nftID);
}
return nftIDs;
}
std::vector<ripple::uint256>
fetchNFTIDsWithoutTaxon(
ripple::AccountID const& issuer,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const
{
std::vector<ripple::uint256> nftIDs;
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
firstQuery.bindAt(1, startTaxon);
firstQuery.bindAt(2, startTokenID);
firstQuery.bindAt(3, Limit{limit});
auto const firstRes = executor_.read(yield, firstQuery);
if (firstRes.has_value()) {
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
nftIDs.push_back(nftID);
}
if (nftIDs.size() < limit) {
auto const remainingLimit = limit - nftIDs.size();
Statement secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
secondQuery.bindAt(1, startTaxon);
secondQuery.bindAt(2, Limit{remainingLimit});
auto const secondRes = executor_.read(yield, secondQuery);
if (secondRes.has_value()) {
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
nftIDs.push_back(nftID);
}
}
return nftIDs;
}
/**
* @brief Takes a list of NFT IDs, fetches their full data, and assembles the final result with a cursor.
*/
NFTsAndCursor
populateNFTsAndCreateCursor(
std::vector<ripple::uint256> const& nftIDs,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
boost::asio::yield_context yield
) const
{
if (nftIDs.empty()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
NFTsAndCursor ret;
if (nftIDs.size() == limit)
ret.cursor = nftIDs.back();
// Prepare and execute queries to fetch NFT info and URIs in parallel.
std::vector<Statement> selectNFTStatements;
selectNFTStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
return schema_->selectNFT.bind(nftID, ledgerSequence);
}
);
std::vector<Statement> selectNFTURIStatements;
selectNFTURIStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
}
);
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
// Combine the results into final NFT objects.
for (auto i = 0u; i < nftIDs.size(); ++i) {
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>();
maybeRow.has_value()) {
auto [seq, owner, isBurned] = *maybeRow;
NFT nft(nftIDs[i], seq, owner, isBurned);
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
nft.uri = *maybeUri;
ret.nfts.push_back(nft);
}
}
return ret;
}
};
using KeyspaceBackend = BasicKeyspaceBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;
} // namespace data::cassandra

View File

@@ -20,22 +20,16 @@
#include "data/LedgerCache.hpp" #include "data/LedgerCache.hpp"
#include "data/Types.hpp" #include "data/Types.hpp"
#include "data/impl/LedgerCacheFile.hpp" #include "etlng/Models.hpp"
#include "etl/Models.hpp"
#include "util/Assert.hpp" #include "util/Assert.hpp"
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include <cstdlib>
#include <cstring>
#include <map>
#include <mutex> #include <mutex>
#include <optional> #include <optional>
#include <shared_mutex> #include <shared_mutex>
#include <string>
#include <utility>
#include <vector> #include <vector>
namespace data { namespace data {
@@ -95,7 +89,7 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
} }
void void
LedgerCache::update(std::vector<etl::model::Object> const& objs, uint32_t seq) LedgerCache::update(std::vector<etlng::model::Object> const& objs, uint32_t seq)
{ {
if (disabled_) if (disabled_)
return; return;
@@ -257,34 +251,4 @@ LedgerCache::getSuccessorHitRate() const
return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value(); return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value();
} }
std::expected<void, std::string>
LedgerCache::saveToFile(std::string const& path) const
{
if (not isFull()) {
return std::unexpected{"Ledger cache is not full"};
}
impl::LedgerCacheFile file{path};
std::shared_lock const lock{mtx_};
impl::LedgerCacheFile::DataView const data{.latestSeq = latestSeq_, .map = map_, .deleted = deleted_};
return file.write(data);
}
std::expected<void, std::string>
LedgerCache::loadFromFile(std::string const& path, uint32_t minLatestSequence)
{
impl::LedgerCacheFile file{path};
auto data = file.read(minLatestSequence);
if (not data.has_value()) {
return std::unexpected(std::move(data).error());
}
auto [latestSeq, map, deleted] = std::move(data).value();
std::unique_lock const lock{mtx_};
latestSeq_ = latestSeq;
map_ = std::move(map);
deleted_ = std::move(deleted);
full_ = true;
return {};
}
} // namespace data } // namespace data

View File

@@ -21,7 +21,7 @@
#include "data/LedgerCacheInterface.hpp" #include "data/LedgerCacheInterface.hpp"
#include "data/Types.hpp" #include "data/Types.hpp"
#include "etl/Models.hpp" #include "etlng/Models.hpp"
#include "util/prometheus/Bool.hpp" #include "util/prometheus/Bool.hpp"
#include "util/prometheus/Counter.hpp" #include "util/prometheus/Counter.hpp"
#include "util/prometheus/Label.hpp" #include "util/prometheus/Label.hpp"
@@ -37,7 +37,6 @@
#include <map> #include <map>
#include <optional> #include <optional>
#include <shared_mutex> #include <shared_mutex>
#include <string>
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
@@ -47,16 +46,11 @@ namespace data {
* @brief Cache for an entire ledger. * @brief Cache for an entire ledger.
*/ */
class LedgerCache : public LedgerCacheInterface { class LedgerCache : public LedgerCacheInterface {
public:
/** @brief An entry of the cache */
struct CacheEntry { struct CacheEntry {
uint32_t seq = 0; uint32_t seq = 0;
Blob blob; Blob blob;
}; };
using CacheMap = std::map<ripple::uint256, CacheEntry>;
private:
// counters for fetchLedgerObject(s) hit rate // counters for fetchLedgerObject(s) hit rate
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt( std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number", "ledger_cache_counter_total_number",
@@ -79,8 +73,8 @@ private:
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}}) util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
)}; )};
CacheMap map_; std::map<ripple::uint256, CacheEntry> map_;
CacheMap deleted_; std::map<ripple::uint256, CacheEntry> deleted_;
mutable std::shared_mutex mtx_; mutable std::shared_mutex mtx_;
std::condition_variable_any cv_; std::condition_variable_any cv_;
@@ -104,7 +98,7 @@ public:
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground) override; update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground) override;
void void
update(std::vector<etl::model::Object> const& objs, uint32_t seq) override; update(std::vector<etlng::model::Object> const& objs, uint32_t seq) override;
std::optional<Blob> std::optional<Blob>
get(ripple::uint256 const& key, uint32_t seq) const override; get(ripple::uint256 const& key, uint32_t seq) const override;
@@ -144,12 +138,6 @@ public:
void void
waitUntilCacheContainsSeq(uint32_t seq) override; waitUntilCacheContainsSeq(uint32_t seq) override;
std::expected<void, std::string>
saveToFile(std::string const& path) const override;
std::expected<void, std::string>
loadFromFile(std::string const& path, uint32_t minLatestSequence) override;
}; };
} // namespace data } // namespace data

View File

@@ -20,16 +20,14 @@
#pragma once #pragma once
#include "data/Types.hpp" #include "data/Types.hpp"
#include "etl/Models.hpp" #include "etlng/Models.hpp"
#include <xrpl/basics/base_uint.h> #include <xrpl/basics/base_uint.h>
#include <xrpl/basics/hardened_hash.h> #include <xrpl/basics/hardened_hash.h>
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include <expected>
#include <optional> #include <optional>
#include <string>
#include <vector> #include <vector>
namespace data { namespace data {
@@ -65,7 +63,7 @@ public:
* @param seq The sequence to update cache for * @param seq The sequence to update cache for
*/ */
virtual void virtual void
update(std::vector<etl::model::Object> const& objs, uint32_t seq) = 0; update(std::vector<etlng::model::Object> const& objs, uint32_t seq) = 0;
/** /**
* @brief Fetch a cached object by its key and sequence number. * @brief Fetch a cached object by its key and sequence number.
@@ -170,27 +168,6 @@ public:
*/ */
virtual void virtual void
waitUntilCacheContainsSeq(uint32_t seq) = 0; waitUntilCacheContainsSeq(uint32_t seq) = 0;
/**
* @brief Save the cache to file
* @note This operation takes about 7 seconds and it keeps a shared lock of mtx_
*
* @param path The file path to save the cache to
* @return An error as a string if any
*/
[[nodiscard]] virtual std::expected<void, std::string>
saveToFile(std::string const& path) const = 0;
/**
* @brief Load the cache from file
* @note This operation takes about 7 seconds and it keeps mtx_ exclusively locked
*
* @param path The file path to load data from
* @param minLatestSequence The minimum allowed value of the latestLedgerSequence in cache file
* @return An error as a string if any
*/
[[nodiscard]] virtual std::expected<void, std::string>
loadFromFile(std::string const& path, uint32_t minLatestSequence) = 0;
}; };
} // namespace data } // namespace data

View File

@@ -1,75 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/LedgerCacheSaver.hpp"
#include "data/LedgerCacheInterface.hpp"
#include "util/Assert.hpp"
#include "util/Profiler.hpp"
#include "util/log/Logger.hpp"
#include <string>
#include <thread>
namespace data {
LedgerCacheSaver::LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache)
: cacheFilePath_(config.maybeValue<std::string>("cache.file.path"))
, cache_(cache)
, isAsync_(config.get<bool>("cache.file.async_save"))
{
}
LedgerCacheSaver::~LedgerCacheSaver()
{
waitToFinish();
}
void
LedgerCacheSaver::save()
{
ASSERT(not savingThread_.has_value(), "Multiple save() calls are not allowed");
savingThread_ = std::thread([this]() {
if (not cacheFilePath_.has_value()) {
return;
}
LOG(util::LogService::info()) << "Saving ledger cache to " << *cacheFilePath_;
if (auto const [success, durationMs] = util::timed([&]() { return cache_.get().saveToFile(*cacheFilePath_); });
success.has_value()) {
LOG(util::LogService::info()) << "Successfully saved ledger cache in " << durationMs << " ms";
} else {
LOG(util::LogService::error()) << "Error saving LedgerCache to file: " << success.error();
}
});
if (not isAsync_) {
waitToFinish();
}
}
void
LedgerCacheSaver::waitToFinish()
{
if (savingThread_.has_value() and savingThread_->joinable()) {
savingThread_->join();
}
savingThread_.reset();
}
} // namespace data

View File

@@ -1,94 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include "util/config/ConfigDefinition.hpp"
#include <concepts>
#include <functional>
#include <optional>
#include <string>
#include <thread>
namespace data {
/**
* @brief A concept for a class that can save ledger cache asynchronously.
*
* This concept defines the interface requirements for any type that manages
* asynchronous saving of ledger cache to persistent storage.
*/
template <typename T>
concept SomeLedgerCacheSaver = requires(T a) {
{ a.save() } -> std::same_as<void>;
{ a.waitToFinish() } -> std::same_as<void>;
};
/**
* @brief Manages asynchronous saving of ledger cache to a file.
*
* This class provides functionality to save the ledger cache to a file in a separate thread,
* allowing the main application to continue without blocking. The file path is configured
* through the application's configuration system.
*/
class LedgerCacheSaver {
std::optional<std::string> cacheFilePath_;
std::reference_wrapper<LedgerCacheInterface const> cache_;
std::optional<std::thread> savingThread_;
bool isAsync_;
public:
/**
* @brief Constructs a LedgerCacheSaver instance.
*
* @param config The configuration object containing the cache file path setting
* @param cache Reference to the ledger cache interface to be saved
*/
LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache);
/**
* @brief Destructor that ensures the saving thread is properly joined.
*
* Waits for any ongoing save operation to complete before destruction.
*/
~LedgerCacheSaver();
/**
* @brief Initiates an asynchronous save operation of the ledger cache.
*
* Spawns a new thread that saves the ledger cache to the configured file path.
* If no file path is configured, the operation is skipped. Logs the progress
* and result of the save operation.
*/
void
save();
/**
* @brief Waits for the saving thread to complete.
*
* Blocks until the saving operation finishes if a thread is currently active.
* Safe to call multiple times or when no save operation is in progress.
*/
void
waitToFinish();
};
} // namespace data

View File

@@ -1,10 +1,8 @@
# Backend # Backend
@page "backend" Backend
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. Currently, Cassandra and ScyllaDB are the only supported databases that are production-ready. The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. Currently, Cassandra and ScyllaDB are the only supported databases that are production-ready.
To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.hpp](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.hpp](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file. To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file.
## Data Model ## Data Model

View File

@@ -247,9 +247,6 @@ struct MPTHoldersAndCursor {
struct LedgerRange { struct LedgerRange {
std::uint32_t minSequence = 0; std::uint32_t minSequence = 0;
std::uint32_t maxSequence = 0; std::uint32_t maxSequence = 0;
bool
operator==(LedgerRange const&) const = default;
}; };
/** /**

View File

@@ -1,975 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/BackendInterface.hpp"
#include "data/DBHelpers.hpp"
#include "data/LedgerCacheInterface.hpp"
#include "data/LedgerHeaderCache.hpp"
#include "data/Types.hpp"
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/ExecutionStrategy.hpp"
#include "util/Assert.hpp"
#include "util/LedgerUtils.hpp"
#include "util/Profiler.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/json/object.hpp>
#include <boost/uuid/string_generator.hpp>
#include <boost/uuid/uuid.hpp>
#include <cassandra.h>
#include <fmt/format.h>
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/strHex.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/LedgerHeader.h>
#include <xrpl/protocol/nft.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <limits>
#include <optional>
#include <stdexcept>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
class CacheBackendCassandraTest;
namespace data::cassandra {
/**
* @brief Implements @ref BackendInterface for Cassandra/ScyllaDB/Keyspace.
*
* Note: This is a safer and more correct rewrite of the original implementation of the backend.
*
* @tparam SettingsProviderType The settings provider type
* @tparam ExecutionStrategyType The execution strategy type
* @tparam SchemaType The Schema type
* @tparam FetchLedgerCacheType The ledger header cache type
*/
template <
SomeSettingsProvider SettingsProviderType,
SomeExecutionStrategy ExecutionStrategyType,
typename SchemaType,
typename FetchLedgerCacheType = FetchLedgerCache>
class CassandraBackendFamily : public BackendInterface {
protected:
util::Logger log_{"Backend"};
SettingsProviderType settingsProvider_;
SchemaType schema_;
std::atomic_uint32_t ledgerSequence_ = 0u;
friend class ::CacheBackendCassandraTest;
Handle handle_;
// have to be mutable because BackendInterface constness :(
mutable ExecutionStrategyType executor_;
// TODO: move to interface level
mutable FetchLedgerCacheType ledgerCache_{};
public:
/**
* @brief Create a new cassandra/scylla backend instance.
*
* @param settingsProvider The settings provider
* @param cache The ledger cache
* @param readOnly Whether the database should be in readonly mode
*/
CassandraBackendFamily(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
: BackendInterface(cache)
, settingsProvider_{std::move(settingsProvider)}
, schema_{settingsProvider_}
, handle_{settingsProvider_.getSettings()}
, executor_{settingsProvider_.getSettings(), handle_}
{
if (auto const res = handle_.connect(); not res.has_value())
throw std::runtime_error("Could not connect to database: " + res.error());
if (not readOnly) {
if (auto const res = handle_.execute(schema_.createKeyspace); not res.has_value()) {
// on datastax, creation of keyspaces can be configured to only be done thru the admin
// interface. this does not mean that the keyspace does not already exist tho.
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
throw std::runtime_error("Could not create keyspace: " + res.error());
}
if (auto const res = handle_.executeEach(schema_.createSchema); not res.has_value())
throw std::runtime_error("Could not create schema: " + res.error());
}
try {
schema_.prepareStatements(handle_);
} catch (std::runtime_error const& ex) {
auto const error = fmt::format(
"Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
"node with write access to DB should be started first.",
ex.what(),
readOnly
);
LOG(log_.error()) << error;
throw std::runtime_error(error);
}
LOG(log_.info()) << "Created (revamped) CassandraBackend";
}
/*
* @brief Move constructor is deleted because handle_ is shared by reference with executor
*/
CassandraBackendFamily(CassandraBackendFamily&&) = delete;
TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool forward,
std::optional<TransactionsCursor> const& txnCursor,
boost::asio::yield_context yield
) const override
{
auto rng = fetchLedgerRange();
if (!rng)
return {.txns = {}, .cursor = {}};
Statement const statement = [this, forward, &account]() {
if (forward)
return schema_->selectAccountTxForward.bind(account);
return schema_->selectAccountTx.bind(account);
}();
auto cursor = txnCursor;
if (cursor) {
statement.bindAt(1, cursor->asTuple());
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
<< cursor->transactionIndex;
} else {
auto const seq = forward ? rng->minSequence : rng->maxSequence;
auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " idx = " << seq
<< " tuple = " << placeHolder;
}
// FIXME: Limit is a hack to support uint32_t properly for the time
// being. Should be removed later and schema updated to use proper
// types.
statement.bindAt(2, Limit{limit});
auto const res = executor_.read(yield, statement);
auto const& results = res.value();
if (not results.hasRows()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
std::vector<ripple::uint256> hashes = {};
auto numRows = results.numRows();
LOG(log_.info()) << "num_rows = " << numRows;
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
hashes.push_back(hash);
if (--numRows == 0) {
LOG(log_.debug()) << "Setting cursor";
cursor = data;
}
}
auto const txns = fetchTransactions(hashes, yield);
LOG(log_.debug()) << "Txns = " << txns.size();
if (txns.size() == limit) {
LOG(log_.debug()) << "Returning cursor";
return {txns, cursor};
}
return {txns, {}};
}
void
waitForWritesToFinish() override
{
executor_.sync();
}
void
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
{
executor_.write(schema_->insertLedgerHeader, ledgerHeader.seq, std::move(blob));
executor_.write(schema_->insertLedgerHash, ledgerHeader.hash, ledgerHeader.seq);
ledgerSequence_ = ledgerHeader.seq;
}
std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res.has_value()) {
if (auto const& rows = *res; rows) {
if (auto const maybeRow = rows.template get<uint32_t>(); maybeRow.has_value())
return maybeRow;
LOG(log_.error()) << "Could not fetch latest ledger - no rows";
return std::nullopt;
}
LOG(log_.error()) << "Could not fetch latest ledger - no result";
} else {
LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
}
return std::nullopt;
}
std::optional<ripple::LedgerHeader>
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
{
if (auto const lock = ledgerCache_.get(); lock.has_value() && lock->seq == sequence)
return lock->ledger;
auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
if (res) {
if (auto const& result = res.value(); result) {
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
auto const header = util::deserializeHeader(ripple::makeSlice(*maybeValue));
ledgerCache_.put(FetchLedgerCache::CacheEntry{header, sequence});
return header;
}
LOG(log_.error()) << "Could not fetch ledger by sequence - no rows";
return std::nullopt;
}
LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
} else {
LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
}
return std::nullopt;
}
std::optional<ripple::LedgerHeader>
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
if (auto const& result = res.value(); result) {
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
return fetchLedgerBySequence(*maybeValue, yield);
LOG(log_.error()) << "Could not fetch ledger by hash - no rows";
return std::nullopt;
}
LOG(log_.error()) << "Could not fetch ledger by hash - no result";
} else {
LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
}
return std::nullopt;
}
std::optional<LedgerRange>
hardFetchLedgerRange(boost::asio::yield_context yield) const override
{
auto const res = executor_.read(yield, schema_->selectLedgerRange);
if (res) {
auto const& results = res.value();
if (not results.hasRows()) {
LOG(log_.debug()) << "Could not fetch ledger range - no rows";
return std::nullopt;
}
// TODO: this is probably a good place to use user type in
// cassandra instead of having two rows with bool flag. or maybe at
// least use tuple<int, int>?
LedgerRange range;
std::size_t idx = 0;
for (auto [seq] : extract<uint32_t>(results)) {
if (idx == 0) {
range.maxSequence = range.minSequence = seq;
} else if (idx == 1) {
range.maxSequence = seq;
}
++idx;
}
if (range.minSequence > range.maxSequence)
std::swap(range.minSequence, range.maxSequence);
LOG(log_.debug()) << "After hardFetchLedgerRange range is " << range.minSequence << ":"
<< range.maxSequence;
return range;
}
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
return std::nullopt;
}
std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
{
auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
return fetchTransactions(hashes, yield);
}
std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto start = std::chrono::system_clock::now();
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
if (not res) {
LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
return {};
}
auto const& result = res.value();
if (not result.hasRows()) {
LOG(log_.warn()) << "Could not fetch all transaction hashes - no rows; ledger = "
<< std::to_string(ledgerSequence);
return {};
}
std::vector<ripple::uint256> hashes;
for (auto [hash] : extract<ripple::uint256>(result))
hashes.push_back(std::move(hash));
auto end = std::chrono::system_clock::now();
LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from database in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< " milliseconds";
return hashes;
}
std::optional<NFT>
fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
if (not res)
return std::nullopt;
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
auto [seq, owner, isBurned] = *maybeRow;
auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
// now fetch URI. Usually we will have the URI even for burned NFTs,
// but if the first ledger on this clio included NFTokenBurn
// transactions we will not have the URIs for any of those tokens.
// In any other case not having the URI indicates something went
// wrong with our data.
//
// TODO - in the future would be great for any handlers that use
// this could inject a warning in this case (the case of not having
// a URI because it was burned in the first ledger) to indicate that
// even though we are returning a blank URI, the NFT might have had
// one.
auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
if (uriRes) {
if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
result->uri = *maybeUri;
}
return result;
}
LOG(log_.error()) << "Could not fetch NFT - no rows";
return std::nullopt;
}
TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context yield
) const override
{
auto rng = fetchLedgerRange();
if (!rng)
return {.txns = {}, .cursor = {}};
Statement const statement = [this, forward, &tokenID]() {
if (forward)
return schema_->selectNFTTxForward.bind(tokenID);
return schema_->selectNFTTx.bind(tokenID);
}();
auto cursor = cursorIn;
if (cursor) {
statement.bindAt(1, cursor->asTuple());
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
<< cursor->transactionIndex;
} else {
auto const seq = forward ? rng->minSequence : rng->maxSequence;
auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq
<< " tuple = " << placeHolder;
}
statement.bindAt(2, Limit{limit});
auto const res = executor_.read(yield, statement);
auto const& results = res.value();
if (not results.hasRows()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
std::vector<ripple::uint256> hashes = {};
auto numRows = results.numRows();
LOG(log_.info()) << "num_rows = " << numRows;
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
hashes.push_back(hash);
if (--numRows == 0) {
LOG(log_.debug()) << "Setting cursor";
cursor = data;
// forward queries by ledger/tx sequence `>=`
// so we have to advance the index by one
if (forward)
++cursor->transactionIndex;
}
}
auto const txns = fetchTransactions(hashes, yield);
LOG(log_.debug()) << "NFT Txns = " << txns.size();
if (txns.size() == limit) {
LOG(log_.debug()) << "Returning cursor";
return {txns, cursor};
}
return {txns, {}};
}
MPTHoldersAndCursor
fetchMPTHolders(
ripple::uint192 const& mptID,
std::uint32_t const limit,
std::optional<ripple::AccountID> const& cursorIn,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto const holderEntries = executor_.read(
yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
);
auto const& holderResults = holderEntries.value();
if (not holderResults.hasRows()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
std::vector<ripple::uint256> mptKeys;
std::optional<ripple::AccountID> cursor;
for (auto const [holder] : extract<ripple::AccountID>(holderResults)) {
mptKeys.push_back(ripple::keylet::mptoken(mptID, holder).key);
cursor = holder;
}
auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
mptObjects.erase(it, mptObjects.end());
ASSERT(mptKeys.size() <= limit, "Number of keys can't exceed the limit");
if (mptKeys.size() == limit)
return {mptObjects, cursor};
return {mptObjects, {}};
}
std::optional<Blob>
doFetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
if (auto const result = res->template get<Blob>(); result) {
if (result->size())
return result;
} else {
LOG(log_.debug()) << "Could not fetch ledger object - no rows";
}
} else {
LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
}
return std::nullopt;
}
std::optional<std::uint32_t>
doFetchLedgerObjectSeq(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
if (auto const result = res->template get<Blob, std::uint32_t>(); result) {
auto [_, seq] = result.value();
return seq;
}
LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
} else {
LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
}
return std::nullopt;
}
std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
auto [transaction, meta, seq, date] = *maybeValue;
return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
}
LOG(log_.debug()) << "Could not fetch transaction - no rows";
} else {
LOG(log_.error()) << "Could not fetch transaction: " << res.error();
}
return std::nullopt;
}
std::optional<ripple::uint256>
doFetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
if (auto const result = res->template get<ripple::uint256>(); result) {
if (*result == kLAST_KEY)
return std::nullopt;
return result;
}
LOG(log_.debug()) << "Could not fetch successor - no rows";
} else {
LOG(log_.error()) << "Could not fetch successor: " << res.error();
}
return std::nullopt;
}
std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
{
if (hashes.empty())
return {};
auto const numHashes = hashes.size();
std::vector<TransactionAndMetadata> results;
results.reserve(numHashes);
std::vector<Statement> statements;
statements.reserve(numHashes);
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
// TODO: seems like a job for "hash IN (list of hashes)" instead?
std::transform(
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
return schema_->selectTransaction.bind(hash);
}
);
auto const entries = executor_.readEach(yield, statements);
std::transform(
std::cbegin(entries),
std::cend(entries),
std::back_inserter(results),
[](auto const& res) -> TransactionAndMetadata {
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
return *maybeRow;
return {};
}
);
});
ASSERT(numHashes == results.size(), "Number of hashes and results must match");
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in " << timeDiff
<< " milliseconds";
return results;
}
std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
if (keys.empty())
return {};
auto const numKeys = keys.size();
LOG(log_.trace()) << "Fetching " << numKeys << " objects";
std::vector<Blob> results;
results.reserve(numKeys);
std::vector<Statement> statements;
statements.reserve(numKeys);
// TODO: seems like a job for "key IN (list of keys)" instead?
std::transform(
std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
return schema_->selectObject.bind(key, sequence);
}
);
auto const entries = executor_.readEach(yield, statements);
std::transform(
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
return *maybeValue;
return {};
}
);
LOG(log_.trace()) << "Fetched " << numKeys << " objects";
return results;
}
std::vector<LedgerObject>
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
{
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
if (not res) {
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
return {};
}
auto const& results = res.value();
if (not results) {
LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
return {};
}
std::vector<ripple::uint256> resultKeys;
for (auto [key] : extract<ripple::uint256>(results))
resultKeys.push_back(key);
return resultKeys;
});
// one of the above errors must have happened
if (keys.empty())
return {};
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in " << timeDiff
<< " milliseconds";
auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
std::vector<LedgerObject> results;
results.reserve(keys.size());
std::transform(
std::cbegin(keys),
std::cend(keys),
std::cbegin(objs),
std::back_inserter(results),
[](auto const& key, auto const& obj) { return LedgerObject{key, obj}; }
);
return results;
}
std::optional<std::string>
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
{
auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
if (not res) {
LOG(log_.error()) << "Could not fetch migrator status: " << res.error();
return {};
}
auto const& results = res.value();
if (not results) {
return {};
}
for (auto [statusString] : extract<std::string>(results))
return statusString;
return {};
}
std::expected<std::vector<std::pair<boost::uuids::uuid, std::string>>, std::string>
fetchClioNodesData(boost::asio::yield_context yield) const override
{
auto const readResult = executor_.read(yield, schema_->selectClioNodesData);
if (not readResult)
return std::unexpected{readResult.error().message()};
std::vector<std::pair<boost::uuids::uuid, std::string>> result;
for (auto [uuid, message] : extract<boost::uuids::uuid, std::string>(*readResult)) {
result.emplace_back(uuid, std::move(message));
}
return result;
}
void
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
{
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
if (range_)
executor_.write(schema_->insertDiff, seq, key);
executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
}
void
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
{
LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
ASSERT(!key.empty(), "Key must not be empty");
ASSERT(!successor.empty(), "Successor must not be empty");
executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
}
void
writeAccountTransactions(std::vector<AccountTransactionsData> data) override
{
std::vector<Statement> statements;
statements.reserve(data.size() * 10); // assume 10 transactions avg
for (auto& record : data) {
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
});
}
executor_.write(std::move(statements));
}
void
writeAccountTransaction(AccountTransactionsData record) override
{
std::vector<Statement> statements;
statements.reserve(record.accounts.size());
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
});
executor_.write(std::move(statements));
}
void
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
{
std::vector<Statement> statements;
statements.reserve(data.size());
std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
return schema_->insertNFTTx.bind(
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
);
});
executor_.write(std::move(statements));
}
void
writeTransaction(
std::string&& hash,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata
) override
{
LOG(log_.trace()) << "Writing txn to database";
executor_.write(schema_->insertLedgerTransaction, seq, hash);
executor_.write(
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
);
}
void
writeNFTs(std::vector<NFTsData> const& data) override
{
std::vector<Statement> statements;
statements.reserve(data.size() * 3);
for (NFTsData const& record : data) {
if (!record.onlyUriChanged) {
statements.push_back(
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
);
// If `uri` is set (and it can be set to an empty uri), we know this
// is a net-new NFT. That is, this NFT has not been seen before by
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
// the same NFT ID as an already-burned token. In this case, we need
// to record the URI and link to the issuer_nf_tokens table.
if (record.uri) {
statements.push_back(schema_->insertIssuerNFT.bind(
ripple::nft::getIssuer(record.tokenID),
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
record.tokenID
));
statements.push_back(
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
);
}
} else {
// only uri changed, we update the uri table only
statements.push_back(
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
);
}
}
executor_.writeEach(std::move(statements));
}
void
writeMPTHolders(std::vector<MPTHolderData> const& data) override
{
std::vector<Statement> statements;
statements.reserve(data.size());
for (auto [mptId, holder] : data)
statements.push_back(schema_->insertMPTHolder.bind(mptId, holder));
executor_.write(std::move(statements));
}
void
startWrites() const override
{
// Note: no-op in original implementation too.
// probably was used in PG to start a transaction or smth.
}
void
writeMigratorStatus(std::string const& migratorName, std::string const& status) override
{
executor_.writeSync(
schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
);
}
void
writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
{
executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
}
bool
isTooBusy() const override
{
return executor_.isTooBusy();
}
boost::json::object
stats() const override
{
return executor_.stats();
}
protected:
/**
* @brief Executes statements and tries to write to DB
*
* @param statement statement to execute
* @return true if successful, false if it fails
*/
bool
executeSyncUpdate(Statement statement)
{
auto const res = executor_.writeSync(statement);
auto maybeSuccess = res->template get<bool>();
if (not maybeSuccess) {
LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
return false;
}
if (not maybeSuccess.value()) {
LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
// error may indicate that another writer wrote something.
// in this case let's just compare the current state of things
// against what we were trying to write in the first place and
// use that as the source of truth for the result.
auto rng = hardFetchLedgerRangeNoThrow();
return rng && rng->maxSequence == ledgerSequence_;
}
return true;
}
};
} // namespace data::cassandra

View File

@@ -1,178 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Schema.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include "util/log/Logger.hpp"
#include <boost/json/string.hpp>
#include <fmt/compile.h>
#include <functional>
#include <memory>
namespace data::cassandra {
/**
* @brief Manages the DB schema and provides access to prepared statements.
*/
template <SomeSettingsProvider SettingsProviderType>
class CassandraSchema : public Schema<SettingsProvider> {
using Schema::Schema;
public:
/**
* @brief Construct a new Cassandra Schema object
*
* @param settingsProvider The settings provider
*/
struct CassandraStatements : public Schema<SettingsProvider>::Statements {
using Schema<SettingsProvider>::Statements::Statements;
//
// Update (and "delete") queries
//
PreparedStatement updateLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
UPDATE {}
SET sequence = ?
WHERE is_latest = ?
IF sequence IN (?, null)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
//
// Select queries
//
PreparedStatement selectNFTIDsByIssuer = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND (taxon, token_id) > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
PreparedStatement selectAccountFromBeginning = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > 0
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectAccountFromToken = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > token(?)
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectLedgerPageKeys = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT key
FROM {}
WHERE TOKEN(key) >= ?
AND sequence <= ?
PER PARTITION LIMIT 1
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")
)
);
}();
PreparedStatement selectLedgerPage = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT object, key
FROM {}
WHERE TOKEN(key) >= ?
AND sequence <= ?
PER PARTITION LIMIT 1
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")
)
);
}();
};
void
prepareStatements(Handle const& handle) override
{
LOG(log_.info()) << "Preparing cassandra statements";
statements_ = std::make_unique<CassandraStatements>(settingsProvider_, handle);
LOG(log_.info()) << "Finished preparing statements";
}
/**
* @brief Provides access to statements.
*
* @return The statements
*/
std::unique_ptr<CassandraStatements> const&
operator->() const
{
return statements_;
}
private:
std::unique_ptr<CassandraStatements> statements_{nullptr};
};
} // namespace data::cassandra

View File

@@ -1,140 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Schema.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include "util/log/Logger.hpp"
#include <boost/json/string.hpp>
#include <fmt/compile.h>
#include <functional>
#include <memory>
namespace data::cassandra {
/**
* @brief Manages the DB schema and provides access to prepared statements.
*/
template <SomeSettingsProvider SettingsProviderType>
class KeyspaceSchema : public Schema<SettingsProvider> {
public:
using Schema::Schema;
/**
* @brief Construct a new Keyspace Schema object
*
* @param settingsProvider The settings provider
*/
struct KeyspaceStatements : public Schema<SettingsProvider>::Statements {
using Schema<SettingsProvider>::Statements::Statements;
//
// Insert queries
//
PreparedStatement insertLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
INSERT INTO {} (is_latest, sequence) VALUES (?, ?) IF NOT EXISTS
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
//
// Update (and "delete") queries
//
PreparedStatement updateLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
UPDATE {}
SET sequence = ?
WHERE is_latest = ?
IF sequence = ?
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
PreparedStatement selectLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT sequence
FROM {}
WHERE is_latest in (True, False)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
//
// Select queries
//
PreparedStatement selectNFTsAfterTaxonKeyspaces = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND taxon > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
};
void
prepareStatements(Handle const& handle) override
{
LOG(log_.info()) << "Preparing aws keyspace statements";
statements_ = std::make_unique<KeyspaceStatements>(settingsProvider_, handle);
LOG(log_.info()) << "Finished preparing statements";
}
/**
* @brief Provides access to statements.
*
* @return The statements
*/
std::unique_ptr<KeyspaceStatements> const&
operator->() const
{
return statements_;
}
private:
std::unique_ptr<KeyspaceStatements> statements_{nullptr};
};
} // namespace data::cassandra

View File

@@ -24,10 +24,10 @@
#include "data/cassandra/Types.hpp" #include "data/cassandra/Types.hpp"
#include "util/log/Logger.hpp" #include "util/log/Logger.hpp"
#include <boost/json/string.hpp>
#include <fmt/compile.h> #include <fmt/compile.h>
#include <functional> #include <functional>
#include <memory>
#include <string> #include <string>
#include <string_view> #include <string_view>
#include <vector> #include <vector>
@@ -53,15 +53,12 @@ template <SomeSettingsProvider SettingsProviderType>
*/ */
template <SomeSettingsProvider SettingsProviderType> template <SomeSettingsProvider SettingsProviderType>
class Schema { class Schema {
protected:
util::Logger log_{"Backend"}; util::Logger log_{"Backend"};
std::reference_wrapper<SettingsProviderType const> settingsProvider_; std::reference_wrapper<SettingsProviderType const> settingsProvider_;
public: public:
virtual ~Schema() = default;
/** /**
* @brief Shared Schema's between all Schema classes (Cassandra and Keyspace) * @brief Construct a new Schema object
* *
* @param settingsProvider The settings provider * @param settingsProvider The settings provider
*/ */
@@ -337,7 +334,6 @@ public:
* @brief Prepared statements holder. * @brief Prepared statements holder.
*/ */
class Statements { class Statements {
protected:
std::reference_wrapper<SettingsProviderType const> settingsProvider_; std::reference_wrapper<SettingsProviderType const> settingsProvider_;
std::reference_wrapper<Handle const> handle_; std::reference_wrapper<Handle const> handle_;
@@ -530,6 +526,20 @@ public:
// Update (and "delete") queries // Update (and "delete") queries
// //
PreparedStatement updateLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
UPDATE {}
SET sequence = ?
WHERE is_latest = ?
IF sequence IN (?, null)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
PreparedStatement deleteLedgerRange = [this]() { PreparedStatement deleteLedgerRange = [this]() {
return handle_.get().prepare( return handle_.get().prepare(
fmt::format( fmt::format(
@@ -644,6 +654,40 @@ public:
); );
}(); }();
PreparedStatement selectLedgerPageKeys = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT key
FROM {}
WHERE TOKEN(key) >= ?
AND sequence <= ?
PER PARTITION LIMIT 1
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")
)
);
}();
PreparedStatement selectLedgerPage = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT object, key
FROM {}
WHERE TOKEN(key) >= ?
AND sequence <= ?
PER PARTITION LIMIT 1
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")
)
);
}();
PreparedStatement getToken = [this]() { PreparedStatement getToken = [this]() {
return handle_.get().prepare( return handle_.get().prepare(
fmt::format( fmt::format(
@@ -673,6 +717,36 @@ public:
); );
}(); }();
PreparedStatement selectAccountFromBeginning = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > 0
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectAccountFromToken = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > token(?)
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectAccountTxForward = [this]() { PreparedStatement selectAccountTxForward = [this]() {
return handle_.get().prepare( return handle_.get().prepare(
fmt::format( fmt::format(
@@ -753,6 +827,22 @@ public:
); );
}(); }();
PreparedStatement selectNFTIDsByIssuer = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND (taxon, token_id) > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() { PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
return handle_.get().prepare( return handle_.get().prepare(
fmt::format( fmt::format(
@@ -870,8 +960,27 @@ public:
* *
* @param handle The handle to the DB * @param handle The handle to the DB
*/ */
virtual void void
prepareStatements(Handle const& handle) = 0; prepareStatements(Handle const& handle)
{
LOG(log_.info()) << "Preparing cassandra statements";
statements_ = std::make_unique<Statements>(settingsProvider_, handle);
LOG(log_.info()) << "Finished preparing statements";
}
/**
* @brief Provides access to statements.
*
* @return The statements
*/
std::unique_ptr<Statements> const&
operator->() const
{
return statements_;
}
private:
std::unique_ptr<Statements> statements_{nullptr};
}; };
} // namespace data::cassandra } // namespace data::cassandra

Some files were not shown because too many files have changed in this diff Show More