mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 11:55:51 +00:00
Compare commits
65 Commits
revert-181
...
release/2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d875702eb | ||
|
|
67e451ec23 | ||
|
|
92789d5a91 | ||
|
|
5e7ff66ba6 | ||
|
|
1b1a5e4068 | ||
|
|
73477fb9d4 | ||
|
|
8ac1ff7699 | ||
|
|
1bba437085 | ||
|
|
41fc67748a | ||
|
|
26842374de | ||
|
|
a46d700390 | ||
|
|
a34d565ea4 | ||
|
|
c57fe1e6e4 | ||
|
|
8a08c5e6ce | ||
|
|
5d2694d36c | ||
|
|
98ff72be66 | ||
|
|
915a8beb40 | ||
|
|
f7db030ad7 | ||
|
|
86e2cd1cc4 | ||
|
|
f0613c945f | ||
|
|
d11e7bc60e | ||
|
|
b909b8879d | ||
|
|
918a92eeee | ||
|
|
c9e8330e0a | ||
|
|
f577139f70 | ||
|
|
491cd58f93 | ||
|
|
25296f8ffa | ||
|
|
4b178805de | ||
|
|
fcebd715ba | ||
|
|
531e1dad6d | ||
|
|
3c008b6bb4 | ||
|
|
624f7ff6d5 | ||
|
|
e503dffc9a | ||
|
|
cd1aa8fb70 | ||
|
|
b5fe22da18 | ||
|
|
cd6289b79a | ||
|
|
f5e6c9576e | ||
|
|
427ba47716 | ||
|
|
7b043025e8 | ||
|
|
67c989081d | ||
|
|
2fd16cd582 | ||
|
|
89af8fe500 | ||
|
|
1753c95910 | ||
|
|
e7702e9c11 | ||
|
|
e549657766 | ||
|
|
7c2742036b | ||
|
|
73f375f20d | ||
|
|
3e200d8b9d | ||
|
|
81fe617816 | ||
|
|
75354fbecd | ||
|
|
540e938223 | ||
|
|
6ef6ca9e65 | ||
|
|
35b9a066e3 | ||
|
|
957028699b | ||
|
|
12e6fcc97e | ||
|
|
f9d9879513 | ||
|
|
278f7b1b58 | ||
|
|
fbedeff697 | ||
|
|
f64d8ecb77 | ||
|
|
3e38ea9b48 | ||
|
|
7834b63b55 | ||
|
|
2cf849dd12 | ||
|
|
c47b96bc68 | ||
|
|
9659d98140 | ||
|
|
1460d590f1 |
6
.github/actions/build_clio/action.yml
vendored
6
.github/actions/build_clio/action.yml
vendored
@@ -4,12 +4,18 @@ inputs:
|
||||
target:
|
||||
description: Build target name
|
||||
default: all
|
||||
substract_threads:
|
||||
description: An option for the action get_number_of_threads. See get_number_of_threads
|
||||
required: true
|
||||
default: '0'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
with:
|
||||
substract_threads: ${{ inputs.substract_threads }}
|
||||
|
||||
- name: Build Clio
|
||||
shell: bash
|
||||
|
||||
20
.github/actions/generate/action.yml
vendored
20
.github/actions/generate/action.yml
vendored
@@ -12,6 +12,10 @@ inputs:
|
||||
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
|
||||
required: true
|
||||
default: 'Release'
|
||||
build_integration_tests:
|
||||
description: Whether to build integration tests
|
||||
required: true
|
||||
default: 'true'
|
||||
code_coverage:
|
||||
description: Whether conan's coverage option should be on or not
|
||||
required: true
|
||||
@@ -20,6 +24,10 @@ inputs:
|
||||
description: Whether Clio is to be statically linked
|
||||
required: true
|
||||
default: 'false'
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
default: 'false' # false, tsan, asan or ubsan
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -33,14 +41,20 @@ runs:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
STATIC_OPTION: "${{ inputs.static == 'true' && 'True' || 'False' }}"
|
||||
INTEGRATION_TESTS_OPTION: "${{ inputs.build_integration_tests == 'true' && 'True' || 'False' }}"
|
||||
run: |
|
||||
cd build
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:static="${STATIC_OPTION}" -o clio:tests=True -o clio:integration_tests=True -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:static="${STATIC_OPTION}" -o clio:tests=True -o clio:integration_tests="${INTEGRATION_TESTS_OPTION}" -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
SANITIZER_OPTION: |
|
||||
${{ inputs.sanitizer == 'tsan' && '-Dsan=thread' ||
|
||||
inputs.sanitizer == 'ubsan' && '-Dsan=undefined' ||
|
||||
inputs.sanitizer == 'asan' && '-Dsan=address' ||
|
||||
'' }}
|
||||
run: |
|
||||
cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} ${{ inputs.extra_cmake_args }} .. -G Ninja
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" ${SANITIZER_OPTION} .. -G Ninja
|
||||
|
||||
14
.github/actions/get_number_of_threads/action.yml
vendored
14
.github/actions/get_number_of_threads/action.yml
vendored
@@ -1,5 +1,10 @@
|
||||
name: Get number of threads
|
||||
description: Determines number of threads to use on macOS and Linux
|
||||
inputs:
|
||||
substract_threads:
|
||||
description: How many threads to substract from the calculated number
|
||||
required: true
|
||||
default: '0'
|
||||
outputs:
|
||||
threads_number:
|
||||
description: Number of threads to use
|
||||
@@ -19,8 +24,11 @@ runs:
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
- name: Shift and export number of threads
|
||||
id: number_of_threads_export
|
||||
shell: bash
|
||||
run: |
|
||||
echo "num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}" >> $GITHUB_OUTPUT
|
||||
num_of_threads=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
|
||||
shift_by=${{ inputs.substract_threads }}
|
||||
shifted=$((num_of_threads - shift_by))
|
||||
echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT
|
||||
|
||||
45
.github/scripts/execute-tests-under-sanitizer
vendored
Executable file
45
.github/scripts/execute-tests-under-sanitizer
vendored
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# Note: This script is intended to be run from the root of the repository.
|
||||
#
|
||||
# This script runs each unit-test separately and generates reports from the currently active sanitizer.
|
||||
# Output is saved in ./.sanitizer-report in the root of the repository
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
Path to clio_tests should be passed as first argument to the script.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TEST_BINARY=$1
|
||||
|
||||
if [[ ! -f "$TEST_BINARY" ]]; then
|
||||
echo "Test binary not found: $TEST_BINARY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {suite=$1}')
|
||||
|
||||
OUTPUT_DIR="./.sanitizer-report"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
for TEST in $TESTS; do
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}"
|
||||
export TSAN_OPTIONS="log_path=\"$OUTPUT_FILE\" die_after_fork=0"
|
||||
export ASAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export UBSAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export MallocNanoZone='0' # for MacOSX
|
||||
$TEST_BINARY --gtest_filter="$TEST" > /dev/null 2>&1
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "'$TEST' failed a sanitizer check."
|
||||
fi
|
||||
done
|
||||
182
.github/workflows/build.yml
vendored
182
.github/workflows/build.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
jobs:
|
||||
check_format:
|
||||
name: Check format
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
check_docs:
|
||||
name: Check documentation
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
@@ -47,133 +47,44 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: true
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
conan_profile: clang
|
||||
build_type: Release
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
conan_profile: clang
|
||||
build_type: Debug
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: macos15
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
static: false
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: ${{ matrix.conan_profile }}
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
static: ${{ matrix.static }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Show ccache's statistics
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
run: strip build/clio_tests && strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_*tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
- name: Run code coverage
|
||||
if: ${{ matrix.code_coverage }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
upload_coverage_report:
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
conan_profile: ${{ matrix.conan_profile }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
static: ${{ matrix.static }}
|
||||
unit_tests: true
|
||||
integration_tests: true
|
||||
clio_server: true
|
||||
|
||||
test:
|
||||
name: Run Tests
|
||||
@@ -183,24 +94,24 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
build_type: Debug
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: macos15
|
||||
conan_profile: apple_clang_16
|
||||
build_type: Release
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
@@ -216,3 +127,44 @@ jobs:
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
|
||||
check_config:
|
||||
name: Check Config Description
|
||||
needs: build
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_server_Linux_Release_gcc
|
||||
- name: Compare Config Description
|
||||
shell: bash
|
||||
run: |
|
||||
repoConfigFile=docs/config-description.md
|
||||
if ! [ -f ${repoConfigFile} ]; then
|
||||
echo "Config Description markdown file is missing in docs folder"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chmod +x ./clio_server
|
||||
configDescriptionFile=config_description_new.md
|
||||
./clio_server -d ${configDescriptionFile}
|
||||
|
||||
configDescriptionHash=$(sha256sum ${configDescriptionFile} | cut -d' ' -f1)
|
||||
repoConfigHash=$(sha256sum ${repoConfigFile} | cut -d' ' -f1)
|
||||
|
||||
if [ ${configDescriptionHash} != ${repoConfigHash} ]; then
|
||||
echo "Markdown file is not up to date"
|
||||
diff -u "${repoConfigFile}" "${configDescriptionFile}"
|
||||
rm -f ${configDescriptionFile}
|
||||
exit 1
|
||||
fi
|
||||
rm -f ${configDescriptionFile}
|
||||
exit 0
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ on:
|
||||
jobs:
|
||||
build_and_publish_image:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
192
.github/workflows/build_impl.yml
vendored
Normal file
192
.github/workflows/build_impl.yml
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
name: Reusable build
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runs_on:
|
||||
description: Runner to run the job on
|
||||
required: true
|
||||
type: string
|
||||
default: heavy
|
||||
|
||||
container:
|
||||
description: "The container object as a JSON string (leave empty to run natively)"
|
||||
required: true
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
conan_profile:
|
||||
description: Conan profile to use
|
||||
required: true
|
||||
type: string
|
||||
|
||||
build_type:
|
||||
description: Build type
|
||||
required: true
|
||||
type: string
|
||||
|
||||
disable_cache:
|
||||
description: Whether ccache and conan cache should be disabled
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
code_coverage:
|
||||
description: Whether to enable code coverage
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
static:
|
||||
description: Whether to build static binaries
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
unit_tests:
|
||||
description: Whether to run unit tests
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
integration_tests:
|
||||
description: Whether to run integration tests
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
clio_server:
|
||||
description: Whether to build clio_server
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
target:
|
||||
description: Build target name
|
||||
required: false
|
||||
type: string
|
||||
default: all
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: false
|
||||
type: string
|
||||
default: 'false'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: ${{ inputs.disable_cache }}
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Restore cache
|
||||
if: ${{ !inputs.disable_cache }}
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ !inputs.disable_cache && steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
static: ${{ inputs.static }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
target: ${{ inputs.target }}
|
||||
|
||||
- name: Show ccache's statistics
|
||||
if: ${{ !inputs.disable_cache }}
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip unit_tests
|
||||
if: ${{ inputs.unit_tests && !inputs.code_coverage && inputs.sanitizer == 'false' }}
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Strip integration_tests
|
||||
if: ${{ inputs.integration_tests && !inputs.code_coverage }}
|
||||
run: strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
if: ${{ inputs.clio_server }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ inputs.unit_tests && !inputs.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Upload clio_integration_tests
|
||||
if: ${{ inputs.integration_tests && !inputs.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_integration_tests
|
||||
|
||||
- name: Save cache
|
||||
if: ${{ !inputs.disable_cache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
- name: Run code coverage
|
||||
if: ${{ inputs.code_coverage }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
upload_coverage_report:
|
||||
if: ${{ inputs.code_coverage }}
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
2
.github/workflows/check_libxrpl.yml
vendored
2
.github/workflows/check_libxrpl.yml
vendored
@@ -71,7 +71,7 @@ jobs:
|
||||
name: Create an issue on failure
|
||||
needs: [build, run_tests]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
2
.github/workflows/check_pr_title.yml
vendored
2
.github/workflows/check_pr_title.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check_title:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# permissions:
|
||||
# pull-requests: write
|
||||
steps:
|
||||
|
||||
4
.github/workflows/clang-tidy.yml
vendored
4
.github/workflows/clang-tidy.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Clang-tidy check
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 6 * * 1-5"
|
||||
- cron: "0 9 * * 1-5"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
@@ -12,7 +12,7 @@ on:
|
||||
|
||||
jobs:
|
||||
clang_tidy:
|
||||
runs-on: [self-hosted, Linux]
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
permissions:
|
||||
|
||||
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
restart_clang_tidy:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
actions: write
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
|
||||
90
.github/workflows/nightly.yml
vendored
90
.github/workflows/nightly.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Nightly release
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 5 * * 1-5'
|
||||
- cron: '0 8 * * 1-5'
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -21,68 +21,23 @@ jobs:
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
static: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
static: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: gcc
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
static: ${{ matrix.static }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests && strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_*tests
|
||||
|
||||
- name: Compress clio_server
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
tar czf ./clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz ./clio_server
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz
|
||||
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
conan_profile: gcc
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: false
|
||||
static: ${{ matrix.static }}
|
||||
unit_tests: true
|
||||
integration_tests: true
|
||||
clio_server: true
|
||||
disable_cache: true
|
||||
|
||||
run_tests:
|
||||
needs: build
|
||||
@@ -91,14 +46,17 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: macos15
|
||||
conan_profile: apple_clang_16
|
||||
build_type: Release
|
||||
integration_tests: false
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
integration_tests: true
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
@@ -122,13 +80,17 @@ jobs:
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
|
||||
# To be enabled back once docker in mac runner arrives
|
||||
# https://github.com/XRPLF/clio/issues/1400
|
||||
- name: Run clio_integration_tests
|
||||
@@ -140,7 +102,7 @@ jobs:
|
||||
nightly_release:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
needs: run_tests
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
@@ -189,14 +151,14 @@ jobs:
|
||||
tags: |
|
||||
type=raw,value=nightly
|
||||
type=raw,value=${{ github.sha }}
|
||||
artifact_name: clio_server_Linux_Release
|
||||
artifact_name: clio_server_Linux_Release_gcc
|
||||
strip_binary: true
|
||||
publish_image: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
create_issue_on_failure:
|
||||
needs: [build, run_tests, nightly_release, build_and_publish_docker_image]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') && github.event_name != 'pull_request' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
106
.github/workflows/sanitizers.yml
vendored
Normal file
106
.github/workflows/sanitizers.yml
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
name: Run tests with sanitizers
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 4 * * 1-5"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/sanitizers.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build clio tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sanitizer: tsan
|
||||
compiler: gcc
|
||||
- sanitizer: asan
|
||||
compiler: gcc
|
||||
# - sanitizer: ubsan # todo: enable when heavy runners are available
|
||||
# compiler: gcc
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ubuntu-latest # todo: change to heavy
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
disable_cache: true
|
||||
conan_profile: ${{ matrix.compiler }}.${{ matrix.sanitizer }}
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
static: false
|
||||
unit_tests: true
|
||||
integration_tests: false
|
||||
clio_server: false
|
||||
target: clio_tests
|
||||
sanitizer: ${{ matrix.sanitizer }}
|
||||
|
||||
# consider combining this with the previous matrix instead
|
||||
run_tests:
|
||||
needs: build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sanitizer: tsan
|
||||
compiler: gcc
|
||||
- sanitizer: asan
|
||||
compiler: gcc
|
||||
# - sanitizer: ubsan # todo: enable when heavy runners are available
|
||||
# compiler: gcc
|
||||
runs-on: ubuntu-latest # todo: change to heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_Release_${{ matrix.compiler }}.${{ matrix.sanitizer }}
|
||||
|
||||
- name: Run clio_tests [${{ matrix.compiler }} / ${{ matrix.sanitizer }}]
|
||||
shell: bash
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./.github/scripts/execute-tests-under-sanitizer ./clio_tests
|
||||
|
||||
- name: Check for sanitizer report
|
||||
shell: bash
|
||||
id: check_report
|
||||
run: |
|
||||
if ls .sanitizer-report/* 1> /dev/null 2>&1; then
|
||||
echo "found_report=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "found_report=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Upload report
|
||||
if: ${{ steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.compiler }}_${{ matrix.sanitizer }}_report
|
||||
path: .sanitizer-report/*
|
||||
include-hidden-files: true
|
||||
|
||||
#
|
||||
# todo: enable when we have fixed all currently existing issues from sanitizers
|
||||
#
|
||||
# - name: Create an issue
|
||||
# if: ${{ steps.check_report.outputs.found_report == 'true' }}
|
||||
# uses: ./.github/actions/create_issue
|
||||
# env:
|
||||
# GH_TOKEN: ${{ github.token }}
|
||||
# with:
|
||||
# labels: 'bug'
|
||||
# title: '[${{ matrix.sanitizer }}/${{ matrix.compiler }}] reported issues'
|
||||
# body: >
|
||||
# Clio tests failed one or more sanitizer checks when built with ${{ matrix.compiler }}`.
|
||||
|
||||
# Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
# Reports are available as artifacts.
|
||||
2
.github/workflows/upload_coverage_report.yml
vendored
2
.github/workflows/upload_coverage_report.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
jobs:
|
||||
upload_report:
|
||||
name: Upload report
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -6,6 +6,7 @@
|
||||
.vscode
|
||||
.python-version
|
||||
.DS_Store
|
||||
.sanitizer-report
|
||||
CMakeUserPresets.json
|
||||
config.json
|
||||
src/util/build/Build.cpp
|
||||
|
||||
@@ -16,6 +16,8 @@ option(coverage "Build test coverage report" FALSE)
|
||||
option(packaging "Create distribution packages" FALSE)
|
||||
option(lint "Run clang-tidy checks during compilation" FALSE)
|
||||
option(static "Statically linked Clio" FALSE)
|
||||
option(snapshot "Build snapshot tool" FALSE)
|
||||
|
||||
# ========================================================================== #
|
||||
set(san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
@@ -65,15 +67,21 @@ endif ()
|
||||
|
||||
# Enable selected sanitizer if enabled via `san`
|
||||
if (san)
|
||||
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
|
||||
list(FIND SUPPORTED_SANITIZERS "${san}" INDEX)
|
||||
if (INDEX EQUAL -1)
|
||||
message(FATAL_ERROR "Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}.")
|
||||
endif ()
|
||||
|
||||
target_compile_options(
|
||||
clio PUBLIC # Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1> ${SAN_FLAG} -fno-omit-frame-pointer
|
||||
clio_options INTERFACE # Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1> ${SAN_FLAG} -fno-omit-frame-pointer
|
||||
)
|
||||
target_compile_definitions(
|
||||
clio PUBLIC $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
|
||||
clio_options INTERFACE $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
|
||||
)
|
||||
target_link_libraries(clio INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
# Generate `docs` target for doxygen documentation if enabled Note: use `make docs` to generate the documentation
|
||||
@@ -85,3 +93,7 @@ include(install/install)
|
||||
if (packaging)
|
||||
include(cmake/packaging.cmake) # This file exists only in build runner
|
||||
endif ()
|
||||
|
||||
if (snapshot)
|
||||
add_subdirectory(tools/snapshot)
|
||||
endif ()
|
||||
|
||||
@@ -28,7 +28,6 @@ Below are some useful docs to learn more about Clio.
|
||||
**For Developers**:
|
||||
|
||||
- [How to build Clio](./docs/build-clio.md)
|
||||
- [Metrics and static analysis](./docs/metrics-and-static-analysis.md)
|
||||
- [Coverage report](./docs/coverage-report.md)
|
||||
|
||||
**For Operators**:
|
||||
|
||||
@@ -39,6 +39,34 @@ if (is_appleclang)
|
||||
list(APPEND COMPILER_FLAGS -Wreorder-init-list)
|
||||
endif ()
|
||||
|
||||
if (san)
|
||||
# When building with sanitizers some compilers will actually produce extra warnings/errors. We don't want this yet, at
|
||||
# least not until we have fixed all runtime issues reported by the sanitizers. Once that is done we can start removing
|
||||
# some of these and trying to fix it in our codebase. We can never remove all of below because most of them are
|
||||
# reported from deep inside libraries like boost or libxrpl.
|
||||
#
|
||||
# TODO: Address in https://github.com/XRPLF/clio/issues/1885
|
||||
list(
|
||||
APPEND
|
||||
COMPILER_FLAGS
|
||||
-Wno-error=tsan # Disables treating TSAN warnings as errors
|
||||
-Wno-tsan # Disables TSAN warnings (thread-safety analysis)
|
||||
-Wno-uninitialized # Disables warnings about uninitialized variables (AddressSanitizer, UndefinedBehaviorSanitizer,
|
||||
# etc.)
|
||||
-Wno-stringop-overflow # Disables warnings about potential string operation overflows (AddressSanitizer)
|
||||
-Wno-unsafe-buffer-usage # Disables warnings about unsafe memory operations (AddressSanitizer)
|
||||
-Wno-frame-larger-than # Disables warnings about stack frame size being too large (AddressSanitizer)
|
||||
-Wno-unused-function # Disables warnings about unused functions (LeakSanitizer, memory-related issues)
|
||||
-Wno-unused-but-set-variable # Disables warnings about unused variables (MemorySanitizer)
|
||||
-Wno-thread-safety-analysis # Disables warnings related to thread safety usage (ThreadSanitizer)
|
||||
-Wno-thread-safety # Disables warnings related to thread safety usage (ThreadSanitizer)
|
||||
-Wno-sign-compare # Disables warnings about signed/unsigned comparison (UndefinedBehaviorSanitizer)
|
||||
-Wno-nonnull # Disables warnings related to null pointer dereferencing (UndefinedBehaviorSanitizer)
|
||||
-Wno-address # Disables warnings about address-related issues (UndefinedBehaviorSanitizer)
|
||||
-Wno-array-bounds # Disables array bounds checks (UndefinedBehaviorSanitizer)
|
||||
)
|
||||
endif ()
|
||||
|
||||
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for
|
||||
# the flags description
|
||||
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
|
||||
find_package(libbacktrace REQUIRED CONFIG)
|
||||
if ("${san}" STREQUAL "")
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
|
||||
find_package(libbacktrace REQUIRED CONFIG)
|
||||
else ()
|
||||
# Some sanitizers (TSAN and ASAN for sure) can't be used with libbacktrace because they have their own backtracing
|
||||
# capabilities and there are conflicts. In any case, this makes sure Clio code knows that backtrace is not available.
|
||||
# See relevant conan profiles for sanitizers where we disable stacktrace in Boost explicitly.
|
||||
target_compile_definitions(clio_options INTERFACE CLIO_WITHOUT_STACKTRACE)
|
||||
message(STATUS "Sanitizer enabled, disabling stacktrace")
|
||||
endif ()
|
||||
|
||||
@@ -19,16 +19,17 @@ class Clio(ConanFile):
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
'snapshot': [True, False], # build export/import snapshot tool
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'boost/1.83.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/2.4.0-b1',
|
||||
'openssl/1.1.1v',
|
||||
'xrpl/2.4.0',
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
@@ -44,6 +45,7 @@ class Clio(ConanFile):
|
||||
'coverage': False,
|
||||
'lint': False,
|
||||
'docs': False,
|
||||
'snapshot': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
'xrpl/*:rocksdb': False,
|
||||
@@ -92,6 +94,7 @@ class Clio(ConanFile):
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.variables['benchmark'] = self.options.benchmark
|
||||
tc.variables['snapshot'] = self.options.snapshot
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
|
||||
@@ -4,12 +4,13 @@ This image contains an environment to build [Clio](https://github.com/XRPLF/clio
|
||||
It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but can also be used to compile Clio locally.
|
||||
|
||||
The image is based on Ubuntu 20.04 and contains:
|
||||
- clang 16
|
||||
- clang 16.0.6
|
||||
- gcc 12.3
|
||||
- doxygen 1.10
|
||||
- doxygen 1.12
|
||||
- gh 2.40
|
||||
- ccache 4.8.3
|
||||
- conan
|
||||
- ccache 4.10.2
|
||||
- conan 1.62
|
||||
- and some other useful tools
|
||||
|
||||
Conan is set up to build Clio without any additional steps. There are two preset conan profiles: `clang` and `gcc` to use corresponding compiler.
|
||||
Conan is set up to build Clio without any additional steps. There are two preset conan profiles: `clang` and `gcc` to use corresponding compiler. By default conan is setup to use `gcc`.
|
||||
Sanitizer builds for `ASAN`, `TSAN` and `UBSAN` are enabled via conan profiles for each of the supported compilers. These can be selected using the following pattern (all lowercase): `[compiler].[sanitizer]` (e.g. `--profile gcc.tsan`).
|
||||
|
||||
9
docker/ci/conan/clang.asan
Normal file
9
docker/ci/conan/clang.asan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=address\" linkflags=\"-fsanitize=address\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=address"
|
||||
CXXFLAGS="-fsanitize=address"
|
||||
LDFLAGS="-fsanitize=address"
|
||||
9
docker/ci/conan/clang.tsan
Normal file
9
docker/ci/conan/clang.tsan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=thread\" linkflags=\"-fsanitize=thread\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=thread"
|
||||
CXXFLAGS="-fsanitize=thread"
|
||||
LDFLAGS="-fsanitize=thread"
|
||||
9
docker/ci/conan/clang.ubsan
Normal file
9
docker/ci/conan/clang.ubsan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=undefined\" linkflags=\"-fsanitize=undefined\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=undefined"
|
||||
CXXFLAGS="-fsanitize=undefined"
|
||||
LDFLAGS="-fsanitize=undefined"
|
||||
9
docker/ci/conan/gcc.asan
Normal file
9
docker/ci/conan/gcc.asan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=address\" linkflags=\"-fsanitize=address\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=address"
|
||||
CXXFLAGS="-fsanitize=address"
|
||||
LDFLAGS="-fsanitize=address"
|
||||
9
docker/ci/conan/gcc.tsan
Normal file
9
docker/ci/conan/gcc.tsan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=thread\" linkflags=\"-fsanitize=thread\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=thread"
|
||||
CXXFLAGS="-fsanitize=thread"
|
||||
LDFLAGS="-fsanitize=thread"
|
||||
9
docker/ci/conan/gcc.ubsan
Normal file
9
docker/ci/conan/gcc.ubsan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=undefined\" linkflags=\"-fsanitize=undefined\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=undefined"
|
||||
CXXFLAGS="-fsanitize=undefined"
|
||||
LDFLAGS="-fsanitize=undefined"
|
||||
@@ -98,3 +98,10 @@ RUN conan profile new clang --detect \
|
||||
&& conan profile update "conf.tools.build:compiler_executables={\"c\": \"/usr/bin/clang-16\", \"cpp\": \"/usr/bin/clang++-16\"}" clang
|
||||
|
||||
RUN echo "include(gcc)" >> .conan/profiles/default
|
||||
|
||||
COPY conan/gcc.asan /root/.conan/profiles
|
||||
COPY conan/gcc.tsan /root/.conan/profiles
|
||||
COPY conan/gcc.ubsan /root/.conan/profiles
|
||||
COPY conan/clang.asan /root/.conan/profiles
|
||||
COPY conan/clang.tsan /root/.conan/profiles
|
||||
COPY conan/clang.ubsan /root/.conan/profiles
|
||||
|
||||
@@ -181,3 +181,20 @@ Sometimes, during development, you need to build against a custom version of `li
|
||||
4. Build Clio as you would have before.
|
||||
|
||||
See [Building Clio](#building-clio) for details.
|
||||
|
||||
## Using `clang-tidy` for static analysis
|
||||
|
||||
The minimum [clang-tidy](https://clang.llvm.org/extra/clang-tidy/) version required is 19.0.
|
||||
|
||||
Clang-tidy can be run by Cmake when building the project. To achieve this, you just need to provide the option `-o lint=True` for the `conan install` command:
|
||||
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
|
||||
By default Cmake will try to find `clang-tidy` automatically in your system.
|
||||
To force Cmake to use your desired binary, set the `CLIO_CLANG_TIDY_BIN` environment variable to the path of the `clang-tidy` binary. For example:
|
||||
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@19/bin/clang-tidy
|
||||
```
|
||||
|
||||
452
docs/config-description.md
Normal file
452
docs/config-description.md
Normal file
@@ -0,0 +1,452 @@
|
||||
# Clio Config Description
|
||||
This file lists all Clio Configuration definitions in detail.
|
||||
|
||||
## Configuration Details
|
||||
|
||||
### Key: database.type
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: cassandra
|
||||
- **Constraints**: The value must be one of the following: `cassandra`
|
||||
- **Description**: Type of database to use. We currently support Cassandra and Scylladb. We default to Scylladb.
|
||||
### Key: database.cassandra.contact_points
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: localhost
|
||||
- **Constraints**: None
|
||||
- **Description**: A list of IP addresses or hostnames of the initial nodes (Cassandra/Scylladb cluster nodes) that the client will connect to when establishing a connection with the database. If you're running locally, it should be 'localhost' or 127.0.0.1
|
||||
### Key: database.cassandra.secure_connect_bundle
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Configuration file that contains the necessary security credentials and connection details for securely connecting to a Cassandra database cluster.
|
||||
### Key: database.cassandra.port
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: Port number to connect to the database.
|
||||
### Key: database.cassandra.keyspace
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: clio
|
||||
- **Constraints**: None
|
||||
- **Description**: Keyspace to use for the database.
|
||||
### Key: database.cassandra.replication_factor
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 3
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of replicated nodes for Scylladb. Visit this link for more details : https://university.scylladb.com/courses/scylla-essentials-overview/lessons/high-availability/topic/fault-tolerance-replication-factor/
|
||||
### Key: database.cassandra.table_prefix
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Prefix for Database table names.
|
||||
### Key: database.cassandra.max_write_requests_outstanding
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 10000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of outstanding write requests. Write requests are api calls that write to database
|
||||
### Key: database.cassandra.max_read_requests_outstanding
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 100000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of outstanding read requests, which reads from database
|
||||
### Key: database.cassandra.threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: The number of available CPU cores.
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of threads that will be used for database operations.
|
||||
### Key: database.cassandra.core_connections_per_host
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of core connections per host for Cassandra.
|
||||
### Key: database.cassandra.queue_size_io
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Queue size for I/O operations in Cassandra.
|
||||
### Key: database.cassandra.write_batch_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Batch size for write operations in Cassandra.
|
||||
### Key: database.cassandra.connect_timeout
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The maximum amount of time in seconds the system will wait for a connection to be successfully established with the database.
|
||||
### Key: database.cassandra.request_timeout
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The maximum amount of time in seconds the system will wait for a request to be fetched from database.
|
||||
### Key: database.cassandra.username
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The username used for authenticating with the database.
|
||||
### Key: database.cassandra.password
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The password used for authenticating with the database.
|
||||
### Key: database.cassandra.certfile
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The path to the SSL/TLS certificate file used to establish a secure connection between the client and the Cassandra database.
|
||||
### Key: allow_no_etl
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: If True, no ETL nodes will run with Clio.
|
||||
### Key: etl_sources.[].ip
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be a valid IP address
|
||||
- **Description**: IP address of the ETL source.
|
||||
### Key: etl_sources.[].ws_port
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: WebSocket port of the ETL source.
|
||||
### Key: etl_sources.[].grpc_port
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: gRPC port of the ETL source.
|
||||
### Key: forwarding.cache_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for the forwarding cache used in Rippled communication.
|
||||
### Key: forwarding.request_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 10
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for the forwarding request used in Rippled communication.
|
||||
### Key: rpc.cache_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for RPC requests.
|
||||
### Key: num_markers
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `256`
|
||||
- **Description**: The number of markers is the number of coroutines to download the initial ledger
|
||||
### Key: dos_guard.whitelist.[]
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: List of IP addresses to whitelist for DOS protection.
|
||||
### Key: dos_guard.max_fetches
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1000000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of fetch operations allowed by DOS guard.
|
||||
### Key: dos_guard.max_connections
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of concurrent connections allowed by DOS guard.
|
||||
### Key: dos_guard.max_requests
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of requests allowed by DOS guard.
|
||||
### Key: dos_guard.sweep_interval
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Interval in seconds for DOS guard to sweep/clear its state.
|
||||
### Key: workers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: The number of available CPU cores.
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of threads to process RPC requests.
|
||||
### Key: server.ip
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be a valid IP address
|
||||
- **Description**: IP address of the Clio HTTP server.
|
||||
### Key: server.port
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: Port number of the Clio HTTP server.
|
||||
### Key: server.max_queue_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the server's request queue. Value of 0 is no limit.
|
||||
### Key: server.local_admin
|
||||
- **Required**: False
|
||||
- **Type**: boolean
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Indicates if the server should run with admin privileges. Only one of local_admin or admin_password can be set.
|
||||
### Key: server.admin_password
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Password for Clio admin-only APIs. Only one of local_admin or admin_password can be set.
|
||||
### Key: server.processing_policy
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: parallel
|
||||
- **Constraints**: The value must be one of the following: `parallel, sequent`
|
||||
- **Description**: Could be "sequent" or "parallel". For the sequent policy, requests from a single client
|
||||
connection are processed one by one, with the next request read only after the previous one is processed. For the parallel policy, Clio will accept
|
||||
all requests and process them in parallel, sending a reply for each request as soon as it is ready.
|
||||
### Key: server.parallel_requests_limit
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Optional parameter, used only if processing_strategy `parallel`. It limits the number of requests for a single client connection that are processed in parallel. If not specified, the limit is infinite.
|
||||
### Key: server.ws_max_sending_queue_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1500
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the websocket sending queue.
|
||||
### Key: prometheus.enabled
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: False
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable Prometheus metrics.
|
||||
### Key: prometheus.compress_reply
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: False
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable compression of Prometheus responses.
|
||||
### Key: io_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535`
|
||||
- **Description**: Number of I/O threads. Value cannot be less than 1
|
||||
### Key: subscription_workers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of worker threads or processes that are responsible for managing and processing subscription-based tasks from rippled
|
||||
### Key: graceful_period
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 10
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Number of milliseconds server will wait to shutdown gracefully.
|
||||
### Key: cache.num_diffs
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 32
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of diffs to cache. For more info, consult readme.md in etc
|
||||
### Key: cache.num_markers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 48
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of markers to cache.
|
||||
### Key: cache.num_cursors_from_diff
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Num of cursors that are different.
|
||||
### Key: cache.num_cursors_from_account
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of cursors from an account.
|
||||
### Key: cache.page_fetch_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 512
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Page fetch size for cache operations.
|
||||
### Key: cache.load
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: async
|
||||
- **Constraints**: The value must be one of the following: `sync, async, none`
|
||||
- **Description**: Cache loading strategy ('sync' or 'async').
|
||||
### Key: log_channels.[].channel
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be one of the following: `General, WebServer, Backend, RPC, ETL, Subscriptions, Performance, Migration`
|
||||
- **Description**: Name of the log channel.'RPC', 'ETL', and 'Performance'
|
||||
### Key: log_channels.[].log_level
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be one of the following: `trace, debug, info, warning, error, fatal, count`
|
||||
- **Description**: Log level for the specific log channel.`warning`, `error`, `fatal`
|
||||
### Key: log_level
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: info
|
||||
- **Constraints**: The value must be one of the following: `trace, debug, info, warning, error, fatal, count`
|
||||
- **Description**: General logging level of Clio. This level will be applied to all log channels that do not have an explicitly defined logging level.
|
||||
### Key: log_format
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: %TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%
|
||||
- **Constraints**: None
|
||||
- **Description**: Format string for log messages.
|
||||
### Key: log_to_console
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable logging to console.
|
||||
### Key: log_directory
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Directory path for log files.
|
||||
### Key: log_rotation_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2048
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Log rotation size in megabytes. When the log file reaches this particular size, a new log file starts.
|
||||
### Key: log_directory_max_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 51200
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the log directory in megabytes.
|
||||
### Key: log_rotation_hour_interval
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 12
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Interval in hours for log rotation. If the current log file reaches this value in logging, a new log file starts.
|
||||
### Key: log_tag_style
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: none
|
||||
- **Constraints**: The value must be one of the following: `int, uint, null, none, uuid`
|
||||
- **Description**: Style for log tags.
|
||||
### Key: extractor_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of extractor threads.
|
||||
### Key: read_only
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: Indicates if the server should have read-only privileges.
|
||||
### Key: txn_threshold
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Transaction threshold value.
|
||||
### Key: start_sequence
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Starting ledger index.
|
||||
### Key: finish_sequence
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Ending ledger index.
|
||||
### Key: ssl_cert_file
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Path to the SSL certificate file.
|
||||
### Key: ssl_key_file
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Path to the SSL key file.
|
||||
### Key: api_version.default
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Default API version Clio will run on.
|
||||
### Key: api_version.min
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Minimum API version.
|
||||
### Key: api_version.max
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 3
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Maximum API version.
|
||||
### Key: migration.full_scan_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of threads used to scan the table.
|
||||
### Key: migration.full_scan_jobs
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 4
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of coroutines used to scan the table.
|
||||
### Key: migration.cursors_per_job
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 100
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of cursors each coroutine will scan.
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# Example of clio monitoring infrastructure
|
||||
|
||||
> [!WARNING]
|
||||
> This is only an example of Grafana dashboard for Clio. It was created for demonstration purposes only and may contain errors.
|
||||
> Clio team would not recommend to relate on data from this dashboard or use it for monitoring your Clio instances.
|
||||
|
||||
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
|
||||
|
||||
The structure of the directory:
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
"graphTooltip": 0,
|
||||
"id": 1,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -79,6 +78,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -90,7 +90,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -159,6 +159,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -170,7 +171,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -243,6 +244,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -254,7 +256,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -327,6 +329,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -338,7 +341,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -373,6 +376,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -435,6 +439,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -491,6 +496,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -552,6 +558,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -586,6 +593,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -647,6 +655,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -681,6 +690,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -742,6 +752,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -776,6 +787,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -837,6 +849,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -872,6 +885,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -934,6 +948,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -941,7 +956,7 @@
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rpc_method_duration_us{job=\"clio\"}",
|
||||
"expr": "sum by (method) (increase(rpc_method_duration_us[$__interval]))\n / \n sum by (method,) (increase(rpc_method_total_number{status=\"finished\"}[$__interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{method}}",
|
||||
"range": true,
|
||||
@@ -968,6 +983,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1029,6 +1045,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1063,6 +1080,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1124,6 +1142,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1158,6 +1177,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
@@ -1223,7 +1243,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "10.2.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1296,6 +1316,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1357,6 +1378,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1404,6 +1426,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1465,6 +1488,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1510,6 +1534,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1572,6 +1597,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1590,8 +1616,9 @@
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
"refresh": "5s",
|
||||
"schemaVersion": 39,
|
||||
"schemaVersion": 40,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Metrics and static analysis
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports [Prometheus](https://prometheus.io/) metrics collection. It accepts Prometheus requests on the port configured in the `server` section of the config.
|
||||
|
||||
Prometheus metrics are enabled by default, and replies to `/metrics` are compressed. To disable compression, and have human readable metrics, add `"prometheus": { "enabled": true, "compress_reply": false }` to Clio's config.
|
||||
|
||||
To completely disable Prometheus metrics add `"prometheus": { "enabled": false }` to Clio's config.
|
||||
|
||||
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
|
||||
|
||||
You can find an example docker-compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
|
||||
|
||||
## Using `clang-tidy` for static analysis
|
||||
|
||||
The minimum [clang-tidy](https://clang.llvm.org/extra/clang-tidy/) version required is 19.0.
|
||||
|
||||
Clang-tidy can be run by Cmake when building the project. To achieve this, you just need to provide the option `-o lint=True` for the `conan install` command:
|
||||
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
|
||||
By default Cmake will try to find `clang-tidy` automatically in your system.
|
||||
To force Cmake to use your desired binary, set the `CLIO_CLANG_TIDY_BIN` environment variable to the path of the `clang-tidy` binary. For example:
|
||||
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@19/bin/clang-tidy
|
||||
```
|
||||
@@ -80,3 +80,15 @@ Clio will fallback to hardcoded defaults when these values are not specified in
|
||||
|
||||
> [!TIP]
|
||||
> See the [example-config.json](../docs/examples/config/example-config.json) for more details.
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports [Prometheus](https://prometheus.io/) metrics collection. It accepts Prometheus requests on the port configured in the `server` section of the config.
|
||||
|
||||
Prometheus metrics are enabled by default, and replies to `/metrics` are compressed. To disable compression, and have human readable metrics, add `"prometheus": { "enabled": true, "compress_reply": false }` to Clio's config.
|
||||
|
||||
To completely disable Prometheus metrics add `"prometheus": { "enabled": false }` to Clio's config.
|
||||
|
||||
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
|
||||
|
||||
You can find an example docker-compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
add_library(clio_app)
|
||||
target_sources(clio_app PRIVATE CliArgs.cpp ClioApplication.cpp WebHandlers.cpp)
|
||||
target_sources(clio_app PRIVATE CliArgs.cpp ClioApplication.cpp Stopper.cpp WebHandlers.cpp)
|
||||
|
||||
target_link_libraries(clio_app PUBLIC clio_etl clio_etlng clio_feed clio_web clio_rpc clio_migration)
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "migration/MigrationApplication.hpp"
|
||||
#include "util/build/Build.hpp"
|
||||
#include "util/newconfig/ConfigDescription.hpp"
|
||||
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <boost/program_options/parsers.hpp>
|
||||
@@ -29,6 +30,7 @@
|
||||
#include <boost/program_options/variables_map.hpp>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
@@ -42,12 +44,13 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
// clang-format off
|
||||
po::options_description description("Options");
|
||||
description.add_options()
|
||||
("help,h", "print help message and exit")
|
||||
("version,v", "print version and exit")
|
||||
("conf,c", po::value<std::string>()->default_value(kDEFAULT_CONFIG_PATH), "configuration file")
|
||||
("help,h", "Print help message and exit")
|
||||
("version,v", "Print version and exit")
|
||||
("conf,c", po::value<std::string>()->default_value(kDEFAULT_CONFIG_PATH), "Configuration file")
|
||||
("ng-web-server,w", "Use ng-web-server")
|
||||
("migrate", po::value<std::string>(), "start migration helper")
|
||||
("migrate", po::value<std::string>(), "Start migration helper")
|
||||
("verify", "Checks the validity of config values")
|
||||
("config-description,d", po::value<std::string>(), "Generate config description markdown file")
|
||||
;
|
||||
// clang-format on
|
||||
po::positional_options_description positional;
|
||||
@@ -67,6 +70,17 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
}
|
||||
|
||||
if (parsed.count("config-description") != 0u) {
|
||||
std::filesystem::path const filePath = parsed["config-description"].as<std::string>();
|
||||
|
||||
auto const res = util::config::ClioConfigDescription::generateConfigDescriptionToFile(filePath);
|
||||
if (res.has_value())
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
|
||||
std::cerr << res.error().error << std::endl;
|
||||
return Action{Action::Exit{EXIT_FAILURE}};
|
||||
}
|
||||
|
||||
auto configPath = parsed["conf"].as<std::string>();
|
||||
|
||||
if (parsed.count("migrate") != 0u) {
|
||||
|
||||
@@ -19,13 +19,16 @@
|
||||
|
||||
#include "app/ClioApplication.hpp"
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
#include "app/WebHandlers.hpp"
|
||||
#include "data/AmendmentCenter.hpp"
|
||||
#include "data/BackendFactory.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/NetworkValidatedLedgers.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "migration/MigrationInspectorFactory.hpp"
|
||||
#include "rpc/Counters.hpp"
|
||||
#include "rpc/RPCEngine.hpp"
|
||||
#include "rpc/WorkQueue.hpp"
|
||||
@@ -83,6 +86,7 @@ ClioApplication::ClioApplication(util::config::ClioConfigDefinition const& confi
|
||||
{
|
||||
LOG(util::LogService::info()) << "Clio version: " << util::build::getClioFullVersionString();
|
||||
PrometheusService::init(config);
|
||||
signalsHandler_.subscribeToStop([this]() { appStopper_.stop(); });
|
||||
}
|
||||
|
||||
int
|
||||
@@ -99,12 +103,25 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
auto whitelistHandler = web::dosguard::WhitelistHandler{config_};
|
||||
auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler};
|
||||
auto sweepHandler = web::dosguard::IntervalSweepHandler{config_, ioc, dosGuard};
|
||||
auto cache = data::LedgerCache{};
|
||||
|
||||
// Interface to the database
|
||||
auto backend = data::makeBackend(config_);
|
||||
auto backend = data::makeBackend(config_, cache);
|
||||
|
||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||
|
||||
{
|
||||
auto const migrationInspector = migration::makeMigrationInspector(config_, backend);
|
||||
// Check if any migration is blocking Clio server starting.
|
||||
if (migrationInspector->isBlockingClio() and backend->hardFetchLedgerRangeNoThrow()) {
|
||||
LOG(util::LogService::error())
|
||||
<< "Existing Migration is blocking Clio, Please complete the database migration first.";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
// Manages clients subscribed to streams
|
||||
auto subscriptions = feed::SubscriptionManager::makeSubscriptionManager(config_, backend);
|
||||
auto subscriptions = feed::SubscriptionManager::makeSubscriptionManager(config_, backend, amendmentCenter);
|
||||
|
||||
// Tracks which ledgers have been validated by the network
|
||||
auto ledgers = etl::NetworkValidatedLedgers::makeValidatedLedgers();
|
||||
@@ -120,7 +137,7 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
|
||||
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
||||
auto counters = rpc::Counters::makeCounters(workQueue);
|
||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||
|
||||
auto const handlerProvider = std::make_shared<rpc::impl::ProductionHandlerProvider const>(
|
||||
config_, backend, subscriptions, balancer, etl, amendmentCenter, counters
|
||||
);
|
||||
@@ -158,6 +175,10 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
appStopper_.setOnStop(
|
||||
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, ioc)
|
||||
);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
// Calls destructors on all resources, and destructs in order
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
#include "util/SignalsHandler.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
@@ -30,6 +31,7 @@ namespace app {
|
||||
class ClioApplication {
|
||||
util::config::ClioConfigDefinition const& config_;
|
||||
util::SignalsHandler signalsHandler_;
|
||||
Stopper appStopper_;
|
||||
|
||||
public:
|
||||
/**
|
||||
|
||||
52
src/app/Stopper.cpp
Normal file
52
src/app/Stopper.cpp
Normal file
@@ -0,0 +1,52 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
|
||||
Stopper::~Stopper()
|
||||
{
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
}
|
||||
|
||||
void
|
||||
Stopper::setOnStop(std::function<void(boost::asio::yield_context)> cb)
|
||||
{
|
||||
boost::asio::spawn(ctx_, std::move(cb));
|
||||
}
|
||||
|
||||
void
|
||||
Stopper::stop()
|
||||
{
|
||||
// Do nothing if worker_ is already running
|
||||
if (worker_.joinable())
|
||||
return;
|
||||
|
||||
worker_ = std::thread{[this]() { ctx_.run(); }};
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
118
src/app/Stopper.hpp
Normal file
118
src/app/Stopper.hpp
Normal file
@@ -0,0 +1,118 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/CoroutineGroup.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/ng/Server.hpp"
|
||||
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <thread>
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief Application stopper class. On stop it will create a new thread to run all the shutdown tasks.
|
||||
*/
|
||||
class Stopper {
|
||||
boost::asio::io_context ctx_;
|
||||
std::thread worker_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Destroy the Stopper object
|
||||
*/
|
||||
~Stopper();
|
||||
|
||||
/**
|
||||
* @brief Set the callback to be called when the application is stopped.
|
||||
*
|
||||
* @param cb The callback to be called on application stop.
|
||||
*/
|
||||
void
|
||||
setOnStop(std::function<void(boost::asio::yield_context)> cb);
|
||||
|
||||
/**
|
||||
* @brief Stop the application and run the shutdown tasks.
|
||||
*/
|
||||
void
|
||||
stop();
|
||||
|
||||
/**
|
||||
* @brief Create a callback to be called on application stop.
|
||||
*
|
||||
* @param server The server to stop.
|
||||
* @param balancer The load balancer to stop.
|
||||
* @param etl The ETL service to stop.
|
||||
* @param subscriptions The subscription manager to stop.
|
||||
* @param backend The backend to stop.
|
||||
* @param ioc The io_context to stop.
|
||||
* @return The callback to be called on application stop.
|
||||
*/
|
||||
template <
|
||||
web::ng::SomeServer ServerType,
|
||||
etl::SomeLoadBalancer LoadBalancerType,
|
||||
etl::SomeETLService ETLServiceType>
|
||||
static std::function<void(boost::asio::yield_context)>
|
||||
makeOnStopCallback(
|
||||
ServerType& server,
|
||||
LoadBalancerType& balancer,
|
||||
ETLServiceType& etl,
|
||||
feed::SubscriptionManagerInterface& subscriptions,
|
||||
data::BackendInterface& backend,
|
||||
boost::asio::io_context& ioc
|
||||
)
|
||||
{
|
||||
return [&](boost::asio::yield_context yield) {
|
||||
util::CoroutineGroup coroutineGroup{yield};
|
||||
coroutineGroup.spawn(yield, [&server](auto innerYield) {
|
||||
server.stop(innerYield);
|
||||
LOG(util::LogService::info()) << "Server stopped";
|
||||
});
|
||||
coroutineGroup.spawn(yield, [&balancer](auto innerYield) {
|
||||
balancer.stop(innerYield);
|
||||
LOG(util::LogService::info()) << "LoadBalancer stopped";
|
||||
});
|
||||
coroutineGroup.asyncWait(yield);
|
||||
|
||||
etl.stop();
|
||||
LOG(util::LogService::info()) << "ETL stopped";
|
||||
|
||||
subscriptions.stop();
|
||||
LOG(util::LogService::info()) << "SubscriptionManager stopped";
|
||||
|
||||
backend.waitForWritesToFinish();
|
||||
LOG(util::LogService::info()) << "Backend writes finished";
|
||||
|
||||
ioc.stop();
|
||||
LOG(util::LogService::info()) << "io_context stopped";
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace app
|
||||
@@ -35,21 +35,24 @@ namespace app {
|
||||
* @return true if config values are all correct, false otherwise
|
||||
*/
|
||||
inline bool
|
||||
verifyConfig(std::string_view configPath)
|
||||
parseConfig(std::string_view configPath)
|
||||
{
|
||||
using namespace util::config;
|
||||
|
||||
auto const json = ConfigFileJson::makeConfigFileJson(configPath);
|
||||
if (!json.has_value()) {
|
||||
std::cerr << json.error().error << std::endl;
|
||||
std::cerr << "Error parsing json from config: " << configPath << "\n" << json.error().error << std::endl;
|
||||
return false;
|
||||
}
|
||||
auto const errors = gClioConfig.parse(json.value());
|
||||
if (errors.has_value()) {
|
||||
for (auto const& err : errors.value())
|
||||
for (auto const& err : errors.value()) {
|
||||
std::cerr << "Issues found in provided config '" << configPath << "':\n";
|
||||
std::cerr << err.error << std::endl;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
|
||||
@@ -132,6 +132,11 @@ struct Amendments {
|
||||
REGISTER(fixAMMv1_2);
|
||||
REGISTER(AMMClawback);
|
||||
REGISTER(Credentials);
|
||||
REGISTER(DynamicNFT);
|
||||
REGISTER(PermissionedDomains);
|
||||
REGISTER(fixInvalidTxFlags);
|
||||
REGISTER(fixFrozenLPTokenTransfer);
|
||||
REGISTER(DeepFreeze);
|
||||
|
||||
// Obsolete but supported by libxrpl
|
||||
REGISTER(CryptoConditionsSuite);
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/CassandraBackend.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
@@ -38,10 +39,11 @@ namespace data {
|
||||
* @brief A factory function that creates the backend based on a config.
|
||||
*
|
||||
* @param config The clio config to use
|
||||
* @param cache The ledger cache to use
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
makeBackend(util::config::ClioConfigDefinition const& config)
|
||||
makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
|
||||
{
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
@@ -53,7 +55,9 @@ makeBackend(util::config::ClioConfigDefinition const& config)
|
||||
|
||||
if (boost::iequals(type, "cassandra")) {
|
||||
auto const cfg = config.getObject("database." + type);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(
|
||||
data::cassandra::SettingsProvider{cfg}, cache, readOnly
|
||||
);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
|
||||
@@ -87,7 +87,7 @@ BackendInterface::fetchLedgerObject(
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
auto obj = cache_.get().get(key, sequence);
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return obj;
|
||||
@@ -126,7 +126,7 @@ BackendInterface::fetchLedgerObjects(
|
||||
results.resize(keys.size());
|
||||
std::vector<ripple::uint256> misses;
|
||||
for (size_t i = 0; i < keys.size(); ++i) {
|
||||
auto obj = cache_.get(keys[i], sequence);
|
||||
auto obj = cache_.get().get(keys[i], sequence);
|
||||
if (obj) {
|
||||
results[i] = *obj;
|
||||
} else {
|
||||
@@ -156,7 +156,7 @@ BackendInterface::fetchSuccessorKey(
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
auto succ = cache_.get().getSuccessor(key, ledgerSequence);
|
||||
if (succ) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
} else {
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
@@ -139,18 +140,27 @@ class BackendInterface {
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range_;
|
||||
LedgerCache cache_;
|
||||
std::optional<etl::CorruptionDetector<LedgerCache>> corruptionDetector_;
|
||||
std::reference_wrapper<LedgerCacheInterface> cache_;
|
||||
std::optional<etl::CorruptionDetector> corruptionDetector_;
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
/**
|
||||
* @brief Construct a new backend interface instance.
|
||||
*
|
||||
* @param cache The ledger cache to use
|
||||
*/
|
||||
BackendInterface(LedgerCacheInterface& cache) : cache_{cache}
|
||||
{
|
||||
}
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
// TODO: Remove this hack. Cache should not be exposed thru BackendInterface
|
||||
// TODO: Remove this hack once old ETL is removed.
|
||||
// Cache should not be exposed thru BackendInterface
|
||||
|
||||
/**
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
LedgerCacheInterface const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
@@ -159,7 +169,7 @@ public:
|
||||
/**
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
LedgerCacheInterface&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
@@ -171,7 +181,7 @@ public:
|
||||
* @param detector The corruption detector to set
|
||||
*/
|
||||
void
|
||||
setCorruptionDetector(etl::CorruptionDetector<LedgerCache> detector)
|
||||
setCorruptionDetector(etl::CorruptionDetector detector)
|
||||
{
|
||||
corruptionDetector_ = std::move(detector);
|
||||
}
|
||||
@@ -683,6 +693,12 @@ public:
|
||||
bool
|
||||
finishWrites(std::uint32_t ledgerSequence);
|
||||
|
||||
/**
|
||||
* @brief Wait for all pending writes to finish.
|
||||
*/
|
||||
virtual void
|
||||
waitForWritesToFinish() = 0;
|
||||
|
||||
/**
|
||||
* @brief Mark the migration status of a migrator as Migrated in the database
|
||||
*
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
@@ -36,6 +37,7 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
@@ -87,10 +89,12 @@ public:
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
*
|
||||
* @param settingsProvider The settings provider to use
|
||||
* @param cache The ledger cache to use
|
||||
* @param readOnly Whether the database should be in readonly mode
|
||||
*/
|
||||
BasicCassandraBackend(SettingsProviderType settingsProvider, bool readOnly)
|
||||
: settingsProvider_{std::move(settingsProvider)}
|
||||
BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
|
||||
: BackendInterface(cache)
|
||||
, settingsProvider_{std::move(settingsProvider)}
|
||||
, schema_{settingsProvider_}
|
||||
, handle_{settingsProvider_.getSettings()}
|
||||
, executor_{settingsProvider_.getSettings(), handle_}
|
||||
@@ -113,13 +117,24 @@ public:
|
||||
try {
|
||||
schema_.prepareStatements(handle_);
|
||||
} catch (std::runtime_error const& ex) {
|
||||
LOG(log_.error()) << "Failed to prepare the statements: " << ex.what() << "; readOnly: " << readOnly;
|
||||
throw;
|
||||
auto const error = fmt::format(
|
||||
"Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
|
||||
"node with write access to DB should be started first.",
|
||||
ex.what(),
|
||||
readOnly
|
||||
);
|
||||
LOG(log_.error()) << error;
|
||||
throw std::runtime_error(error);
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Created (revamped) CassandraBackend";
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Move constructor is deleted because handle_ is shared by reference with executor
|
||||
*/
|
||||
BasicCassandraBackend(BasicCassandraBackend&&) = delete;
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
@@ -188,11 +203,16 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
void
|
||||
waitForWritesToFinish() override
|
||||
{
|
||||
executor_.sync();
|
||||
}
|
||||
|
||||
bool
|
||||
doFinishWrites() override
|
||||
{
|
||||
// wait for other threads to finish their writes
|
||||
executor_.sync();
|
||||
waitForWritesToFinish();
|
||||
|
||||
if (!range_) {
|
||||
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
|
||||
@@ -619,7 +639,6 @@ public:
|
||||
return seq;
|
||||
}
|
||||
LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
|
||||
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
|
||||
}
|
||||
@@ -943,28 +962,35 @@ public:
|
||||
statements.reserve(data.size() * 3);
|
||||
|
||||
for (NFTsData const& record : data) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
if (!record.onlyUriChanged) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID
|
||||
));
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID
|
||||
));
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// only uri changed, we update the uri table only
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
executor_.writeEach(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -107,6 +107,7 @@ struct NFTsData {
|
||||
ripple::AccountID owner;
|
||||
std::optional<ripple::Blob> uri;
|
||||
bool isBurned = false;
|
||||
bool onlyUriChanged = false; // Whether only the URI was changed
|
||||
|
||||
/**
|
||||
* @brief Construct a new NFTsData object
|
||||
@@ -170,6 +171,23 @@ struct NFTsData {
|
||||
: tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a new NFTsData object with only the URI changed
|
||||
*
|
||||
* @param tokenID The token ID
|
||||
* @param meta The transaction metadata
|
||||
* @param uri The new URI
|
||||
*
|
||||
*/
|
||||
NFTsData(ripple::uint256 const& tokenID, ripple::TxMeta const& meta, ripple::Blob const& uri)
|
||||
: tokenID(tokenID)
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
, uri(uri)
|
||||
, onlyUriChanged(true)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -19,7 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "util/prometheus/Bool.hpp"
|
||||
#include "util/prometheus/Counter.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
@@ -43,7 +45,7 @@ namespace data {
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCache {
|
||||
class LedgerCache : public LedgerCacheInterface {
|
||||
struct CacheEntry {
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
@@ -76,116 +78,59 @@ class LedgerCache {
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::condition_variable_any cv_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
util::prometheus::Bool full_{PrometheusService::boolMetric(
|
||||
"ledger_cache_full",
|
||||
util::prometheus::Labels{},
|
||||
"Whether ledger cache full or not"
|
||||
)};
|
||||
util::prometheus::Bool disabled_{PrometheusService::boolMetric(
|
||||
"ledger_cache_disabled",
|
||||
util::prometheus::Labels{},
|
||||
"Whether ledger cache is disabled or not"
|
||||
)};
|
||||
|
||||
// temporary set to prevent background thread from writing already deleted data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param objs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
void
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false);
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false) override;
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
get(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached successor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached predcessor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Disables the cache.
|
||||
*/
|
||||
void
|
||||
setDisabled();
|
||||
setDisabled() override;
|
||||
|
||||
/**
|
||||
* @return true if the cache is disabled; false otherwise
|
||||
*/
|
||||
bool
|
||||
isDisabled() const;
|
||||
isDisabled() const override;
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
|
||||
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
|
||||
* config).
|
||||
*/
|
||||
void
|
||||
setFull();
|
||||
setFull() override;
|
||||
|
||||
/**
|
||||
* @return The latest ledger sequence for which cache is available.
|
||||
*/
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
latestLedgerSequence() const override;
|
||||
|
||||
/**
|
||||
* @return true if the cache has all data for the most recent ledger; false otherwise
|
||||
*/
|
||||
bool
|
||||
isFull() const;
|
||||
isFull() const override;
|
||||
|
||||
/**
|
||||
* @return The total size of the cache.
|
||||
*/
|
||||
size_t
|
||||
size() const;
|
||||
size() const override;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting an object in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
getObjectHitRate() const override;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
getSuccessorHitRate() const override;
|
||||
|
||||
/**
|
||||
* @brief Waits until the cache contains a specific sequence.
|
||||
*
|
||||
* @param seq The sequence to wait for
|
||||
*/
|
||||
void
|
||||
waitUntilCacheContainsSeq(uint32_t seq);
|
||||
waitUntilCacheContainsSeq(uint32_t seq) override;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
|
||||
153
src/data/LedgerCacheInterface.hpp
Normal file
153
src/data/LedgerCacheInterface.hpp
Normal file
@@ -0,0 +1,153 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/Types.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/hardened_hash.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCacheInterface {
|
||||
public:
|
||||
virtual ~LedgerCacheInterface() = default;
|
||||
LedgerCacheInterface() = default;
|
||||
LedgerCacheInterface(LedgerCacheInterface&&) = delete;
|
||||
LedgerCacheInterface(LedgerCacheInterface const&) = delete;
|
||||
LedgerCacheInterface&
|
||||
operator=(LedgerCacheInterface&&) = delete;
|
||||
LedgerCacheInterface&
|
||||
operator=(LedgerCacheInterface const&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param objs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
virtual void
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false) = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached successor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached predcessor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Disables the cache.
|
||||
*/
|
||||
virtual void
|
||||
setDisabled() = 0;
|
||||
|
||||
/**
|
||||
* @return true if the cache is disabled; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isDisabled() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
|
||||
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
|
||||
* config).
|
||||
*/
|
||||
virtual void
|
||||
setFull() = 0;
|
||||
|
||||
/**
|
||||
* @return The latest ledger sequence for which cache is available.
|
||||
*/
|
||||
virtual uint32_t
|
||||
latestLedgerSequence() const = 0;
|
||||
|
||||
/**
|
||||
* @return true if the cache has all data for the most recent ledger; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isFull() const = 0;
|
||||
|
||||
/**
|
||||
* @return The total size of the cache.
|
||||
*/
|
||||
virtual size_t
|
||||
size() const = 0;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting an object in the cache versus missing it.
|
||||
*/
|
||||
virtual float
|
||||
getObjectHitRate() const = 0;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
|
||||
*/
|
||||
virtual float
|
||||
getSuccessorHitRate() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Waits until the cache contains a specific sequence.
|
||||
*
|
||||
* @param seq The sequence to wait for
|
||||
*/
|
||||
virtual void
|
||||
waitUntilCacheContainsSeq(uint32_t seq) = 0;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -23,6 +23,7 @@
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/RetryPolicy.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
@@ -64,8 +65,8 @@ class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<Statemen
|
||||
RetryCallbackType onRetry_;
|
||||
|
||||
// does not exist during initial construction, hence optional
|
||||
std::optional<FutureWithCallbackType> future_;
|
||||
std::mutex mtx_;
|
||||
using OptionalFuture = std::optional<FutureWithCallbackType>;
|
||||
util::Mutex<OptionalFuture> future_;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -127,8 +128,8 @@ private:
|
||||
self = nullptr; // explicitly decrement refcount
|
||||
};
|
||||
|
||||
std::scoped_lock const lck{mtx_};
|
||||
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
auto future = future_.template lock<std::scoped_lock>();
|
||||
future->emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
@@ -192,10 +193,24 @@ public:
|
||||
template <typename... Args>
|
||||
void
|
||||
write(PreparedStatementType const& preparedStatement, Args&&... args)
|
||||
{
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
write(std::move(statement));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor
|
||||
*
|
||||
* @param statement Statement to execute
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
void
|
||||
write(StatementType&& statement)
|
||||
{
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
@@ -251,6 +266,21 @@ public:
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data. Constrast with write, this method does not execute
|
||||
* the statements in a batch.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor.
|
||||
*
|
||||
* @param statements Vector of statements to execute
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
void
|
||||
writeEach(std::vector<StatementType>&& statements)
|
||||
{
|
||||
std::ranges::for_each(std::move(statements), [this](auto& statement) { this->write(std::move(statement)); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Coroutine-based query execution used for reading data.
|
||||
*
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "etl/CacheLoaderSettings.hpp"
|
||||
#include "etl/impl/CacheLoader.hpp"
|
||||
#include "etl/impl/CursorFromAccountProvider.hpp"
|
||||
@@ -44,13 +45,13 @@ namespace etl {
|
||||
* @tparam CursorProviderType The type of the cursor provider to use
|
||||
* @tparam ExecutionContextType The type of the execution context to use
|
||||
*/
|
||||
template <typename CacheType, typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
template <typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
class CacheLoader {
|
||||
using CacheLoaderType = impl::CacheLoaderImpl<CacheType>;
|
||||
using CacheLoaderType = impl::CacheLoaderImpl<data::LedgerCacheInterface>;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
|
||||
CacheLoaderSettings settings_;
|
||||
ExecutionContextType ctx_;
|
||||
@@ -67,7 +68,7 @@ public:
|
||||
CacheLoader(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
std::shared_ptr<BackendInterface> const& backend,
|
||||
CacheType& cache
|
||||
data::LedgerCacheInterface& cache
|
||||
)
|
||||
: backend_{backend}, cache_{cache}, settings_{makeCacheLoaderSettings(config)}, ctx_{settings_.numThreads}
|
||||
{
|
||||
@@ -130,7 +131,8 @@ public:
|
||||
void
|
||||
stop() noexcept
|
||||
{
|
||||
loader_->stop();
|
||||
if (loader_ != nullptr)
|
||||
loader_->stop();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -139,7 +141,8 @@ public:
|
||||
void
|
||||
wait() noexcept
|
||||
{
|
||||
loader_->wait();
|
||||
if (loader_ != nullptr)
|
||||
loader_->wait();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
@@ -31,10 +32,9 @@ namespace etl {
|
||||
*
|
||||
* @tparam CacheType The type of the cache to disable on corruption
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class CorruptionDetector {
|
||||
std::reference_wrapper<SystemState> state_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
@@ -45,7 +45,8 @@ public:
|
||||
* @param state The system state
|
||||
* @param cache The cache to disable on corruption
|
||||
*/
|
||||
CorruptionDetector(SystemState& state, CacheType& cache) : state_{std::ref(state)}, cache_{std::ref(cache)}
|
||||
CorruptionDetector(SystemState& state, data::LedgerCacheInterface& cache)
|
||||
: state_{std::ref(state)}, cache_{std::ref(cache)}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#include "etl/ETLService.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -285,6 +284,6 @@ ETLService::ETLService(
|
||||
txnThreshold_ = config.get<std::size_t>("txn_threshold");
|
||||
|
||||
// This should probably be done in the backend factory but we don't have state available until here
|
||||
backend_->setCorruptionDetector(CorruptionDetector<data::LedgerCache>{state_, backend->cache()});
|
||||
backend_->setCorruptionDetector(CorruptionDetector{state_, backend->cache()});
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/CacheLoader.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
@@ -42,6 +41,7 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <concepts>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
@@ -58,6 +58,16 @@ struct NFTsData;
|
||||
*/
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief A tag class to help identify ETLService in templated code.
|
||||
*/
|
||||
struct ETLServiceTag {
|
||||
virtual ~ETLServiceTag() = default;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
concept SomeETLService = std::derived_from<T, ETLServiceTag>;
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for continuously extracting data from a p2p node, and writing that data to the
|
||||
* databases.
|
||||
@@ -71,16 +81,15 @@ namespace etl {
|
||||
* the others will fall back to monitoring/publishing. In this sense, this class dynamically transitions from monitoring
|
||||
* to writing and from writing to monitoring, based on the activity of other processes running on different machines.
|
||||
*/
|
||||
class ETLService {
|
||||
class ETLService : public ETLServiceTag {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using DataPipeType = etl::impl::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheType = data::LedgerCache;
|
||||
using CacheLoaderType = etl::CacheLoader<CacheType>;
|
||||
using CacheLoaderType = etl::CacheLoader<>;
|
||||
using LedgerFetcherType = etl::impl::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = etl::impl::Extractor<DataPipeType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::impl::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher<CacheType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher;
|
||||
using AmendmentBlockHandlerType = etl::impl::AmendmentBlockHandler;
|
||||
using TransformerType =
|
||||
etl::impl::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
@@ -127,6 +136,11 @@ public:
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Move constructor is deleted because ETL service shares its fields by reference
|
||||
*/
|
||||
ETLService(ETLService&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief A factory function to spawn new ETLService instances.
|
||||
*
|
||||
@@ -159,10 +173,20 @@ public:
|
||||
/**
|
||||
* @brief Stops components and joins worker thread.
|
||||
*/
|
||||
~ETLService()
|
||||
~ETLService() override
|
||||
{
|
||||
LOG(log_.info()) << "onStop called";
|
||||
LOG(log_.debug()) << "Stopping Reporting ETL";
|
||||
if (not state_.isStopping)
|
||||
stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stop the ETL service.
|
||||
* @note This method blocks until the ETL service has stopped.
|
||||
*/
|
||||
void
|
||||
stop()
|
||||
{
|
||||
LOG(log_.info()) << "Stop called";
|
||||
|
||||
state_.isStopping = true;
|
||||
cacheLoader_.stop();
|
||||
|
||||
@@ -31,15 +31,12 @@
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::optional<ETLState>
|
||||
tag_invoke(boost::json::value_to_tag<std::optional<ETLState>>, boost::json::value const& jv)
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv)
|
||||
{
|
||||
ETLState state;
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
if (jsonObject.contains(JS(error)))
|
||||
return std::nullopt;
|
||||
|
||||
if (jsonObject.contains(JS(result)) && jsonObject.at(JS(result)).as_object().contains(JS(info))) {
|
||||
auto const rippledInfo = jsonObject.at(JS(result)).as_object().at(JS(info)).as_object();
|
||||
if (rippledInfo.contains(JS(network_id)))
|
||||
|
||||
@@ -20,12 +20,14 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -54,8 +56,9 @@ struct ETLState {
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
if (serverInfoRippled)
|
||||
return boost::json::value_to<std::optional<ETLState>>(boost::json::value(*serverInfoRippled));
|
||||
if (serverInfoRippled && not serverInfoRippled->contains(JS(error))) {
|
||||
return boost::json::value_to<ETLState>(boost::json::value(*serverInfoRippled));
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -67,7 +70,7 @@ struct ETLState {
|
||||
* @param jv The json value to convert
|
||||
* @return The ETLState
|
||||
*/
|
||||
std::optional<ETLState>
|
||||
tag_invoke(boost::json::value_to_tag<std::optional<ETLState>>, boost::json::value const& jv);
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv);
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/CoroutineGroup.hpp"
|
||||
#include "util/Random.hpp"
|
||||
#include "util/ResponseExpirationCache.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -336,6 +337,16 @@ LoadBalancer::getETLState() noexcept
|
||||
return etlState_;
|
||||
}
|
||||
|
||||
void
|
||||
LoadBalancer::stop(boost::asio::yield_context yield)
|
||||
{
|
||||
util::CoroutineGroup group{yield};
|
||||
std::ranges::for_each(sources_, [&group, yield](auto& source) {
|
||||
group.spawn(yield, [&source](boost::asio::yield_context innerYield) { source->stop(innerYield); });
|
||||
});
|
||||
group.asyncWait(yield);
|
||||
}
|
||||
|
||||
void
|
||||
LoadBalancer::chooseForwardingSource()
|
||||
{
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
#include <memory>
|
||||
@@ -51,6 +52,16 @@
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief A tag class to help identify LoadBalancer in templated code.
|
||||
*/
|
||||
struct LoadBalancerTag {
|
||||
virtual ~LoadBalancerTag() = default;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
concept SomeLoadBalancer = std::derived_from<T, LoadBalancerTag>;
|
||||
|
||||
/**
|
||||
* @brief This class is used to manage connections to transaction processing processes.
|
||||
*
|
||||
@@ -58,7 +69,7 @@ namespace etl {
|
||||
* which ledgers have been validated by the network, and the range of ledgers each etl source has). This class also
|
||||
* allows requests for ledger data to be load balanced across all possible ETL sources.
|
||||
*/
|
||||
class LoadBalancer {
|
||||
class LoadBalancer : public LoadBalancerTag {
|
||||
public:
|
||||
using RawLedgerObjectType = org::xrpl::rpc::v1::RawLedgerObject;
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
@@ -132,7 +143,7 @@ public:
|
||||
SourceFactory sourceFactory = makeSource
|
||||
);
|
||||
|
||||
~LoadBalancer();
|
||||
~LoadBalancer() override;
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
@@ -203,6 +214,15 @@ public:
|
||||
std::optional<ETLState>
|
||||
getETLState() noexcept;
|
||||
|
||||
/**
|
||||
* @brief Stop the load balancer. This will stop all subscription sources.
|
||||
* @note This function will asynchronously wait for all sources to stop.
|
||||
*
|
||||
* @param yield The coroutine context
|
||||
*/
|
||||
void
|
||||
stop(boost::asio::yield_context yield);
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Execute a function on a randomly selected source.
|
||||
|
||||
@@ -47,6 +47,17 @@
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNftokenModifyData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
auto const tokenID = sttx.getFieldH256(ripple::sfNFTokenID);
|
||||
// note: sfURI is optional, if it is absent, we will update the uri as empty string
|
||||
return {
|
||||
{NFTTransactionsData(sttx.getFieldH256(ripple::sfNFTokenID), txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenID, txMeta, sttx.getFieldVL(ripple::sfURI))
|
||||
};
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
@@ -166,7 +177,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
node.peekAtField(ripple::sfPreviousFields).downcast<ripple::STObject>();
|
||||
if (previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
} else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode) {
|
||||
} else if (node.getFName() == ripple::sfDeletedNode) {
|
||||
prevNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
@@ -336,6 +347,9 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
case ripple::TxType::ttNFTOKEN_CREATE_OFFER:
|
||||
return getNFTokenCreateOfferData(txMeta, sttx);
|
||||
|
||||
case ripple::TxType::ttNFTOKEN_MODIFY:
|
||||
return getNftokenModifyData(txMeta, sttx);
|
||||
|
||||
default:
|
||||
return {{}, {}};
|
||||
}
|
||||
|
||||
@@ -33,6 +33,16 @@
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief Get the NFT URI change data from a NFToken Modify transaction
|
||||
*
|
||||
* @param txMeta Transaction metadata
|
||||
* @param sttx The transaction
|
||||
* @return NFT URI change data as a pair of transactions and optional NFTsData
|
||||
*/
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNftokenModifyData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||
|
||||
/**
|
||||
* @brief Get the NFT Token mint data from a transaction
|
||||
*
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
#include "etl/NetworkValidatedLedgers.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
@@ -35,25 +37,27 @@ NetworkValidatedLedgers::makeValidatedLedgers()
|
||||
void
|
||||
NetworkValidatedLedgers::push(uint32_t idx)
|
||||
{
|
||||
std::lock_guard const lck(m_);
|
||||
if (!max_ || idx > *max_)
|
||||
max_ = idx;
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (!latest_ || idx > *latest_)
|
||||
latest_ = idx;
|
||||
|
||||
notificationChannel_(idx);
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
std::optional<uint32_t>
|
||||
NetworkValidatedLedgers::getMostRecent()
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
cv_.wait(lck, [this]() { return max_; });
|
||||
return max_;
|
||||
std::unique_lock lck(mtx_);
|
||||
cv_.wait(lck, [this]() { return latest_; });
|
||||
return latest_;
|
||||
}
|
||||
|
||||
bool
|
||||
NetworkValidatedLedgers::waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs)
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
|
||||
std::unique_lock lck(mtx_);
|
||||
auto pred = [sequence, this]() -> bool { return (latest_ && sequence <= *latest_); };
|
||||
if (maxWaitMs) {
|
||||
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
|
||||
} else {
|
||||
@@ -62,4 +66,10 @@ NetworkValidatedLedgers::waitUntilValidatedByNetwork(uint32_t sequence, std::opt
|
||||
return pred();
|
||||
}
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
NetworkValidatedLedgers::subscribe(SignalType::slot_type const& subscriber)
|
||||
{
|
||||
return notificationChannel_.connect(subscriber);
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -21,6 +21,10 @@
|
||||
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <boost/signals2/signal.hpp>
|
||||
#include <boost/signals2/variadic_signal.hpp>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
@@ -38,12 +42,13 @@ namespace etl {
|
||||
* remains stopped for the rest of its lifetime.
|
||||
*/
|
||||
class NetworkValidatedLedgers : public NetworkValidatedLedgersInterface {
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
std::optional<uint32_t> latest_; // currently known latest sequence validated by network
|
||||
|
||||
mutable std::mutex m_;
|
||||
mutable std::mutex mtx_;
|
||||
std::condition_variable cv_;
|
||||
|
||||
SignalType notificationChannel_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief A factory function for NetworkValidatedLedgers
|
||||
@@ -81,6 +86,9 @@ public:
|
||||
*/
|
||||
bool
|
||||
waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs = {}) final;
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) override;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,6 +20,10 @@
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <boost/signals2/signal.hpp>
|
||||
#include <boost/signals2/variadic_signal.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
namespace etl {
|
||||
@@ -29,6 +33,8 @@ namespace etl {
|
||||
*/
|
||||
class NetworkValidatedLedgersInterface {
|
||||
public:
|
||||
using SignalType = boost::signals2::signal<void(uint32_t)>;
|
||||
|
||||
virtual ~NetworkValidatedLedgersInterface() = default;
|
||||
|
||||
/**
|
||||
@@ -46,7 +52,7 @@ public:
|
||||
*
|
||||
* @return Sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
*/
|
||||
virtual std::optional<uint32_t>
|
||||
[[nodiscard]] virtual std::optional<uint32_t>
|
||||
getMostRecent() = 0;
|
||||
|
||||
/**
|
||||
@@ -59,6 +65,15 @@ public:
|
||||
*/
|
||||
virtual bool
|
||||
waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs = {}) = 0;
|
||||
|
||||
/**
|
||||
* @brief Allows clients to get notified when a new validated ledger becomes known to Clio
|
||||
*
|
||||
* @param subscriber The slot to connect
|
||||
* @return A connection object that automatically disconnects the subscription once destroyed
|
||||
*/
|
||||
[[nodiscard]] virtual boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) = 0;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -65,6 +65,15 @@ public:
|
||||
virtual void
|
||||
run() = 0;
|
||||
|
||||
/**
|
||||
* @brief Stop Source.
|
||||
* @note This method will asynchronously wait for source to be stopped.
|
||||
*
|
||||
* @param yield The coroutine context.
|
||||
*/
|
||||
virtual void
|
||||
stop(boost::asio::yield_context yield) = 0;
|
||||
|
||||
/**
|
||||
* @brief Check if source is connected
|
||||
*
|
||||
|
||||
@@ -57,6 +57,7 @@ struct FormattedTransactionsData {
|
||||
std::vector<NFTTransactionsData> nfTokenTxData;
|
||||
std::vector<NFTsData> nfTokensData;
|
||||
std::vector<MPTHolderData> mptHoldersData;
|
||||
std::vector<NFTsData> nfTokenURIChanges;
|
||||
};
|
||||
|
||||
namespace etl::impl {
|
||||
@@ -111,6 +112,7 @@ public:
|
||||
{
|
||||
FormattedTransactionsData result;
|
||||
|
||||
std::vector<NFTsData> nfTokenURIChanges;
|
||||
for (auto& txn : *(data.mutable_transactions_list()->mutable_transactions())) {
|
||||
std::string* raw = txn.mutable_transaction_blob();
|
||||
|
||||
@@ -123,8 +125,15 @@ public:
|
||||
|
||||
auto const [nftTxs, maybeNFT] = getNFTDataFromTx(txMeta, sttx);
|
||||
result.nfTokenTxData.insert(result.nfTokenTxData.end(), nftTxs.begin(), nftTxs.end());
|
||||
if (maybeNFT)
|
||||
result.nfTokensData.push_back(*maybeNFT);
|
||||
|
||||
// We need to unique the URI changes separately, in case the URI changes are discarded
|
||||
if (maybeNFT) {
|
||||
if (maybeNFT->onlyUriChanged) {
|
||||
nfTokenURIChanges.push_back(*maybeNFT);
|
||||
} else {
|
||||
result.nfTokensData.push_back(*maybeNFT);
|
||||
}
|
||||
}
|
||||
|
||||
auto const maybeMPTHolder = getMPTHolderFromTx(txMeta, sttx);
|
||||
if (maybeMPTHolder)
|
||||
@@ -143,6 +152,10 @@ public:
|
||||
}
|
||||
|
||||
result.nfTokensData = getUniqueNFTsDatas(result.nfTokensData);
|
||||
nfTokenURIChanges = getUniqueNFTsDatas(nfTokenURIChanges);
|
||||
|
||||
// Put uri change at the end to ensure the uri not overwritten
|
||||
result.nfTokensData.insert(result.nfTokensData.end(), nfTokenURIChanges.begin(), nfTokenURIChanges.end());
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -38,6 +39,7 @@
|
||||
#include <xrpl/protocol/STObject.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -64,14 +66,13 @@ namespace etl::impl {
|
||||
* includes reading all of the transactions from the database) is done from the application wide asio io_service, and a
|
||||
* strand is used to ensure ledgers are published in order.
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class LedgerPublisher {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> publishStrand_;
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions_;
|
||||
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
||||
|
||||
@@ -94,7 +95,7 @@ public:
|
||||
LedgerPublisher(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
CacheType& cache,
|
||||
data::LedgerCacheInterface& cache,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
SystemState const& state
|
||||
)
|
||||
@@ -205,7 +206,7 @@ public:
|
||||
subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size());
|
||||
|
||||
// order with transaction index
|
||||
std::sort(transactions.begin(), transactions.end(), [](auto const& t1, auto const& t2) {
|
||||
std::ranges::sort(transactions, [](auto const& t1, auto const& t2) {
|
||||
ripple::SerialIter iter1{t1.metadata.data(), t1.metadata.size()};
|
||||
ripple::STObject const object1(iter1, ripple::sfMetadata);
|
||||
ripple::SerialIter iter2{t2.metadata.data(), t2.metadata.size()};
|
||||
|
||||
@@ -102,6 +102,12 @@ public:
|
||||
subscriptionSource_->run();
|
||||
}
|
||||
|
||||
void
|
||||
stop(boost::asio::yield_context yield) final
|
||||
{
|
||||
subscriptionSource_->stop(yield);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if source is connected
|
||||
*
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/asio/use_future.hpp>
|
||||
#include <boost/beast/http/field.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
@@ -49,7 +48,6 @@
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <expected>
|
||||
#include <future>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
@@ -92,15 +90,6 @@ SubscriptionSource::SubscriptionSource(
|
||||
.setConnectionTimeout(wsTimeout_);
|
||||
}
|
||||
|
||||
SubscriptionSource::~SubscriptionSource()
|
||||
{
|
||||
stop();
|
||||
retry_.cancel();
|
||||
|
||||
if (runFuture_.valid())
|
||||
runFuture_.wait();
|
||||
}
|
||||
|
||||
void
|
||||
SubscriptionSource::run()
|
||||
{
|
||||
@@ -157,59 +146,53 @@ SubscriptionSource::validatedRange() const
|
||||
}
|
||||
|
||||
void
|
||||
SubscriptionSource::stop()
|
||||
SubscriptionSource::stop(boost::asio::yield_context yield)
|
||||
{
|
||||
stop_ = true;
|
||||
stopHelper_.asyncWaitForStop(yield);
|
||||
}
|
||||
|
||||
void
|
||||
SubscriptionSource::subscribe()
|
||||
{
|
||||
runFuture_ = boost::asio::spawn(
|
||||
strand_,
|
||||
[this, _ = boost::asio::make_work_guard(strand_)](boost::asio::yield_context yield) {
|
||||
auto connection = wsConnectionBuilder_.connect(yield);
|
||||
if (not connection) {
|
||||
handleError(connection.error(), yield);
|
||||
return;
|
||||
}
|
||||
|
||||
boost::asio::spawn(strand_, [this, _ = boost::asio::make_work_guard(strand_)](boost::asio::yield_context yield) {
|
||||
if (auto connection = wsConnectionBuilder_.connect(yield); connection) {
|
||||
wsConnection_ = std::move(connection).value();
|
||||
} else {
|
||||
handleError(connection.error(), yield);
|
||||
return;
|
||||
}
|
||||
|
||||
auto const& subscribeCommand = getSubscribeCommandJson();
|
||||
auto const writeErrorOpt = wsConnection_->write(subscribeCommand, yield, wsTimeout_);
|
||||
if (writeErrorOpt) {
|
||||
handleError(writeErrorOpt.value(), yield);
|
||||
auto const& subscribeCommand = getSubscribeCommandJson();
|
||||
|
||||
if (auto const writeErrorOpt = wsConnection_->write(subscribeCommand, yield, wsTimeout_); writeErrorOpt) {
|
||||
handleError(writeErrorOpt.value(), yield);
|
||||
return;
|
||||
}
|
||||
|
||||
isConnected_ = true;
|
||||
LOG(log_.info()) << "Connected";
|
||||
onConnect_();
|
||||
|
||||
retry_.reset();
|
||||
|
||||
while (!stop_) {
|
||||
auto const message = wsConnection_->read(yield, wsTimeout_);
|
||||
if (not message) {
|
||||
handleError(message.error(), yield);
|
||||
return;
|
||||
}
|
||||
|
||||
isConnected_ = true;
|
||||
LOG(log_.info()) << "Connected";
|
||||
onConnect_();
|
||||
|
||||
retry_.reset();
|
||||
|
||||
while (!stop_) {
|
||||
auto const message = wsConnection_->read(yield, wsTimeout_);
|
||||
if (not message) {
|
||||
handleError(message.error(), yield);
|
||||
return;
|
||||
}
|
||||
|
||||
auto const handleErrorOpt = handleMessage(message.value());
|
||||
if (handleErrorOpt) {
|
||||
handleError(handleErrorOpt.value(), yield);
|
||||
return;
|
||||
}
|
||||
if (auto const handleErrorOpt = handleMessage(message.value()); handleErrorOpt) {
|
||||
handleError(handleErrorOpt.value(), yield);
|
||||
return;
|
||||
}
|
||||
// Close the connection
|
||||
handleError(
|
||||
util::requests::RequestError{"Subscription source stopped", boost::asio::error::operation_aborted},
|
||||
yield
|
||||
);
|
||||
},
|
||||
boost::asio::use_future
|
||||
);
|
||||
}
|
||||
// Close the connection
|
||||
handleError(
|
||||
util::requests::RequestError{"Subscription source stopped", boost::asio::error::operation_aborted}, yield
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
std::optional<util::requests::RequestError>
|
||||
@@ -299,6 +282,8 @@ SubscriptionSource::handleError(util::requests::RequestError const& error, boost
|
||||
logError(error);
|
||||
if (not stop_) {
|
||||
retry_.retry([this] { subscribe(); });
|
||||
} else {
|
||||
stopHelper_.readyToStop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/Retry.hpp"
|
||||
#include "util/StopHelper.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/prometheus/Gauge.hpp"
|
||||
#include "util/requests/Types.hpp"
|
||||
@@ -39,7 +40,6 @@
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <future>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
@@ -50,6 +50,7 @@ namespace etl::impl {
|
||||
|
||||
/**
|
||||
* @brief This class is used to subscribe to a source of ledger data and forward it to the subscription manager.
|
||||
* @note This class is safe to delete only if io_context is stopped.
|
||||
*/
|
||||
class SubscriptionSource {
|
||||
public:
|
||||
@@ -89,7 +90,7 @@ private:
|
||||
|
||||
std::reference_wrapper<util::prometheus::GaugeInt> lastMessageTimeSecondsSinceEpoch_;
|
||||
|
||||
std::future<void> runFuture_;
|
||||
util::StopHelper stopHelper_;
|
||||
|
||||
static constexpr std::chrono::seconds kWS_TIMEOUT{30};
|
||||
static constexpr std::chrono::seconds kRETRY_MAX_DELAY{30};
|
||||
@@ -124,13 +125,6 @@ public:
|
||||
std::chrono::steady_clock::duration const retryDelay = SubscriptionSource::kRETRY_DELAY
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Destroy the Subscription Source object
|
||||
*
|
||||
* @note This will block to wait for all the async operations to complete. io_context must be still running
|
||||
*/
|
||||
~SubscriptionSource();
|
||||
|
||||
/**
|
||||
* @brief Run the source
|
||||
*/
|
||||
@@ -192,7 +186,7 @@ public:
|
||||
* @brief Stop the source. The source will complete already scheduled operations but will not schedule new ones
|
||||
*/
|
||||
void
|
||||
stop();
|
||||
stop(boost::asio::yield_context yield);
|
||||
|
||||
private:
|
||||
void
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
add_library(clio_etlng)
|
||||
|
||||
target_sources(
|
||||
clio_etlng PRIVATE impl/AmendmentBlockHandler.cpp impl/AsyncGrpcCall.cpp impl/Extraction.cpp impl/GrpcSource.cpp
|
||||
impl/Loading.cpp
|
||||
clio_etlng
|
||||
PRIVATE impl/AmendmentBlockHandler.cpp
|
||||
impl/AsyncGrpcCall.cpp
|
||||
impl/Extraction.cpp
|
||||
impl/GrpcSource.cpp
|
||||
impl/Loading.cpp
|
||||
impl/Monitor.cpp
|
||||
impl/TaskManager.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(clio_etlng PUBLIC clio_data)
|
||||
|
||||
49
src/etlng/LedgerPublisherInterface.hpp
Normal file
49
src/etlng/LedgerPublisherInterface.hpp
Normal file
@@ -0,0 +1,49 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief The interface of a scheduler for the extraction proccess
|
||||
*/
|
||||
struct LedgerPublisherInterface {
|
||||
virtual ~LedgerPublisherInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Publish the ledger by its sequence number
|
||||
*
|
||||
* @param seq The sequence number of the ledger
|
||||
* @param maxAttempts The maximum number of attempts to publish the ledger; no limit if nullopt
|
||||
* @param attemptsDelay The delay between attempts
|
||||
*/
|
||||
virtual void
|
||||
publish(
|
||||
uint32_t seq,
|
||||
std::optional<uint32_t> maxAttempts,
|
||||
std::chrono::steady_clock::duration attemptsDelay = std::chrono::seconds{1}
|
||||
) = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
@@ -174,4 +174,20 @@ struct LedgerData {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a task for the extractors
|
||||
*/
|
||||
struct Task {
|
||||
/**
|
||||
* @brief Represents the priority of the task
|
||||
*/
|
||||
enum class Priority : uint8_t {
|
||||
Lower = 0u,
|
||||
Higher = 1u,
|
||||
};
|
||||
|
||||
Priority priority;
|
||||
uint32_t seq;
|
||||
};
|
||||
|
||||
} // namespace etlng::model
|
||||
|
||||
67
src/etlng/MonitorInterface.hpp
Normal file
67
src/etlng/MonitorInterface.hpp
Normal file
@@ -0,0 +1,67 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <boost/signals2/signal.hpp>
|
||||
#include <boost/signals2/variadic_signal.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief An interface for the monitor service
|
||||
* An implementation of this service is responsible for periodically checking various datasources to detect newly
|
||||
* ingested ledgers.
|
||||
*/
|
||||
class MonitorInterface {
|
||||
public:
|
||||
static constexpr auto kDEFAULT_REPEAT_INTERVAL = std::chrono::seconds{1};
|
||||
using SignalType = boost::signals2::signal<void(uint32_t)>;
|
||||
|
||||
virtual ~MonitorInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Allows clients to get notified when a new ledger becomes available in Clio's database
|
||||
*
|
||||
* @param subscriber The slot to connect
|
||||
* @return A connection object that automatically disconnects the subscription once destroyed
|
||||
*/
|
||||
[[nodiscard]] virtual boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) = 0;
|
||||
|
||||
/**
|
||||
* @brief Run the monitor service
|
||||
*
|
||||
* @param repeatInterval The interval between attempts to check the database for new ledgers
|
||||
*/
|
||||
virtual void
|
||||
run(std::chrono::steady_clock::duration repeatInterval = kDEFAULT_REPEAT_INTERVAL) = 0;
|
||||
|
||||
/**
|
||||
* @brief Stops the monitor service
|
||||
*/
|
||||
virtual void
|
||||
stop() = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
42
src/etlng/SchedulerInterface.hpp
Normal file
42
src/etlng/SchedulerInterface.hpp
Normal file
@@ -0,0 +1,42 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etlng/Models.hpp"
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief The interface of a scheduler for the extraction proccess
|
||||
*/
|
||||
struct SchedulerInterface {
|
||||
virtual ~SchedulerInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Attempt to obtain the next task
|
||||
* @return A task if one exists; std::nullopt otherwise
|
||||
*/
|
||||
[[nodiscard]] virtual std::optional<model::Task>
|
||||
next() = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
99
src/etlng/impl/Monitor.cpp
Normal file
99
src/etlng/impl/Monitor.cpp
Normal file
@@ -0,0 +1,99 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
Monitor::Monitor(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence
|
||||
)
|
||||
: strand_(ctx.makeStrand())
|
||||
, backend_(std::move(backend))
|
||||
, validatedLedgers_(std::move(validatedLedgers))
|
||||
, nextSequence_(startSequence)
|
||||
{
|
||||
}
|
||||
|
||||
Monitor::~Monitor()
|
||||
{
|
||||
stop();
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::run(std::chrono::steady_clock::duration repeatInterval)
|
||||
{
|
||||
ASSERT(not repeatedTask_.has_value(), "Monitor attempted to run more than once");
|
||||
LOG(log_.debug()) << "Starting monitor";
|
||||
|
||||
repeatedTask_ = strand_.executeRepeatedly(repeatInterval, std::bind_front(&Monitor::doWork, this));
|
||||
subscription_ = validatedLedgers_->subscribe(std::bind_front(&Monitor::onNextSequence, this));
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::stop()
|
||||
{
|
||||
if (repeatedTask_.has_value())
|
||||
repeatedTask_->abort();
|
||||
|
||||
repeatedTask_ = std::nullopt;
|
||||
}
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
Monitor::subscribe(SignalType::slot_type const& subscriber)
|
||||
{
|
||||
return notificationChannel_.connect(subscriber);
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::onNextSequence(uint32_t seq)
|
||||
{
|
||||
LOG(log_.debug()) << "rippled published sequence " << seq;
|
||||
repeatedTask_->invoke(); // force-invoke immediately
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::doWork()
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng) {
|
||||
while (rng->maxSequence >= nextSequence_)
|
||||
notificationChannel_(nextSequence_++);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
80
src/etlng/impl/Monitor.hpp
Normal file
80
src/etlng/impl/Monitor.hpp
Normal file
@@ -0,0 +1,80 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/async/AnyStrand.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class Monitor : public MonitorInterface {
|
||||
util::async::AnyStrand strand_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers_;
|
||||
|
||||
uint32_t nextSequence_;
|
||||
std::optional<util::async::AnyOperation<void>> repeatedTask_;
|
||||
std::optional<boost::signals2::scoped_connection> subscription_; // network validated ledgers subscription
|
||||
|
||||
SignalType notificationChannel_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
Monitor(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence
|
||||
);
|
||||
~Monitor() override;
|
||||
|
||||
void
|
||||
run(std::chrono::steady_clock::duration repeatInterval) override;
|
||||
|
||||
void
|
||||
stop() override;
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) override;
|
||||
|
||||
private:
|
||||
void
|
||||
onNextSequence(uint32_t seq);
|
||||
|
||||
void
|
||||
doWork();
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
151
src/etlng/impl/Scheduling.hpp
Normal file
151
src/etlng/impl/Scheduling.hpp
Normal file
@@ -0,0 +1,151 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/SchedulerInterface.hpp"
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
template <typename T>
|
||||
concept SomeScheduler = std::is_base_of_v<SchedulerInterface, std::decay_t<T>>;
|
||||
|
||||
class ForwardScheduler : public SchedulerInterface {
|
||||
std::reference_wrapper<etl::NetworkValidatedLedgersInterface> ledgers_;
|
||||
|
||||
uint32_t startSeq_;
|
||||
std::optional<uint32_t> maxSeq_;
|
||||
std::atomic_uint32_t seq_;
|
||||
|
||||
public:
|
||||
ForwardScheduler(ForwardScheduler const& other)
|
||||
: ledgers_(other.ledgers_), startSeq_(other.startSeq_), maxSeq_(other.maxSeq_), seq_(other.seq_.load())
|
||||
{
|
||||
}
|
||||
|
||||
ForwardScheduler(
|
||||
std::reference_wrapper<etl::NetworkValidatedLedgersInterface> ledgers,
|
||||
uint32_t startSeq,
|
||||
std::optional<uint32_t> maxSeq = std::nullopt
|
||||
)
|
||||
: ledgers_(ledgers), startSeq_(startSeq), maxSeq_(maxSeq), seq_(startSeq)
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<model::Task>
|
||||
next() override
|
||||
{
|
||||
static constexpr auto kMAX = std::numeric_limits<uint32_t>::max();
|
||||
uint32_t currentSeq = seq_;
|
||||
|
||||
if (ledgers_.get().getMostRecent() >= currentSeq) {
|
||||
while (currentSeq < maxSeq_.value_or(kMAX)) {
|
||||
if (seq_.compare_exchange_weak(currentSeq, currentSeq + 1u, std::memory_order_acq_rel)) {
|
||||
return {{.priority = model::Task::Priority::Higher, .seq = currentSeq}};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
class BackfillScheduler : public SchedulerInterface {
|
||||
uint32_t startSeq_;
|
||||
uint32_t minSeq_ = 0u;
|
||||
|
||||
std::atomic_uint32_t seq_;
|
||||
|
||||
public:
|
||||
BackfillScheduler(BackfillScheduler const& other)
|
||||
: startSeq_(other.startSeq_), minSeq_(other.minSeq_), seq_(other.seq_.load())
|
||||
{
|
||||
}
|
||||
|
||||
BackfillScheduler(uint32_t startSeq, std::optional<uint32_t> minSeq = std::nullopt)
|
||||
: startSeq_(startSeq), minSeq_(minSeq.value_or(0)), seq_(startSeq)
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<model::Task>
|
||||
next() override
|
||||
{
|
||||
uint32_t currentSeq = seq_;
|
||||
while (currentSeq > minSeq_) {
|
||||
if (seq_.compare_exchange_weak(currentSeq, currentSeq - 1u, std::memory_order_acq_rel)) {
|
||||
return {{.priority = model::Task::Priority::Lower, .seq = currentSeq}};
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
template <SomeScheduler... Schedulers>
|
||||
class SchedulerChain : public SchedulerInterface {
|
||||
std::tuple<Schedulers...> schedulers_;
|
||||
|
||||
public:
|
||||
template <SomeScheduler... Ts>
|
||||
requires(std::is_same_v<Ts, Schedulers> and ...)
|
||||
SchedulerChain(Ts&&... schedulers) : schedulers_(std::forward<Ts>(schedulers)...)
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] std::optional<model::Task>
|
||||
next() override
|
||||
{
|
||||
std::optional<model::Task> task;
|
||||
auto const expand = [&](auto& s) {
|
||||
if (task.has_value())
|
||||
return false;
|
||||
|
||||
task = s.next();
|
||||
return task.has_value();
|
||||
};
|
||||
|
||||
std::apply([&expand](auto&&... xs) { (... || expand(xs)); }, schedulers_);
|
||||
|
||||
return task;
|
||||
}
|
||||
};
|
||||
|
||||
static auto
|
||||
makeScheduler(SomeScheduler auto&&... schedulers)
|
||||
{
|
||||
return std::make_unique<SchedulerChain<std::decay_t<decltype(schedulers)>...>>(
|
||||
std::forward<decltype(schedulers)>(schedulers)...
|
||||
);
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
141
src/etlng/impl/TaskManager.cpp
Normal file
141
src/etlng/impl/TaskManager.cpp
Normal file
@@ -0,0 +1,141 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etlng/impl/TaskManager.hpp"
|
||||
|
||||
#include "etlng/ExtractorInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/SchedulerInterface.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/async/AnyStrand.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <functional>
|
||||
#include <ranges>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
TaskManager::TaskManager(
|
||||
util::async::AnyExecutionContext&& ctx,
|
||||
std::reference_wrapper<SchedulerInterface> scheduler,
|
||||
std::reference_wrapper<ExtractorInterface> extractor,
|
||||
std::reference_wrapper<LoaderInterface> loader
|
||||
)
|
||||
: ctx_(std::move(ctx)), schedulers_(scheduler), extractor_(extractor), loader_(loader)
|
||||
{
|
||||
}
|
||||
|
||||
TaskManager::~TaskManager()
|
||||
{
|
||||
stop();
|
||||
}
|
||||
|
||||
void
|
||||
TaskManager::run(Settings settings)
|
||||
{
|
||||
static constexpr auto kQUEUE_SIZE_LIMIT = 2048uz;
|
||||
|
||||
auto schedulingStrand = ctx_.makeStrand();
|
||||
PriorityQueue queue(ctx_.makeStrand(), kQUEUE_SIZE_LIMIT);
|
||||
|
||||
LOG(log_.debug()) << "Starting task manager...\n";
|
||||
|
||||
extractors_.reserve(settings.numExtractors);
|
||||
for ([[maybe_unused]] auto _ : std::views::iota(0uz, settings.numExtractors))
|
||||
extractors_.push_back(spawnExtractor(schedulingStrand, queue));
|
||||
|
||||
loaders_.reserve(settings.numLoaders);
|
||||
for ([[maybe_unused]] auto _ : std::views::iota(0uz, settings.numLoaders))
|
||||
loaders_.push_back(spawnLoader(queue));
|
||||
|
||||
wait();
|
||||
LOG(log_.debug()) << "All finished in task manager..\n";
|
||||
}
|
||||
|
||||
util::async::AnyOperation<void>
|
||||
TaskManager::spawnExtractor(util::async::AnyStrand& strand, PriorityQueue& queue)
|
||||
{
|
||||
// TODO: these values may be extracted to config later and/or need to be fine-tuned on a realistic system
|
||||
static constexpr auto kDELAY_BETWEEN_ATTEMPTS = std::chrono::milliseconds{100u};
|
||||
static constexpr auto kDELAY_BETWEEN_ENQUEUE_ATTEMPTS = std::chrono::milliseconds{1u};
|
||||
|
||||
return strand.execute([this, &queue](auto stopRequested) {
|
||||
while (not stopRequested) {
|
||||
if (auto task = schedulers_.get().next(); task.has_value()) {
|
||||
if (auto maybeBatch = extractor_.get().extractLedgerWithDiff(task->seq); maybeBatch.has_value()) {
|
||||
LOG(log_.debug()) << "Adding data after extracting diff";
|
||||
while (not queue.enqueue(*maybeBatch)) {
|
||||
// TODO (https://github.com/XRPLF/clio/issues/1852)
|
||||
std::this_thread::sleep_for(kDELAY_BETWEEN_ENQUEUE_ATTEMPTS);
|
||||
|
||||
if (stopRequested)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// TODO: how do we signal to the loaders that it's time to shutdown? some special task?
|
||||
break; // TODO: handle server shutdown or other node took over ETL
|
||||
}
|
||||
} else {
|
||||
// TODO (https://github.com/XRPLF/clio/issues/1852)
|
||||
std::this_thread::sleep_for(kDELAY_BETWEEN_ATTEMPTS);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
util::async::AnyOperation<void>
|
||||
TaskManager::spawnLoader(PriorityQueue& queue)
|
||||
{
|
||||
return ctx_.execute([this, &queue](auto stopRequested) {
|
||||
while (not stopRequested) {
|
||||
// TODO (https://github.com/XRPLF/clio/issues/66): does not tell the loader whether it's out of order or not
|
||||
if (auto data = queue.dequeue(); data.has_value())
|
||||
loader_.get().load(*data);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
TaskManager::wait()
|
||||
{
|
||||
for (auto& extractor : extractors_)
|
||||
extractor.wait();
|
||||
for (auto& loader : loaders_)
|
||||
loader.wait();
|
||||
}
|
||||
|
||||
void
|
||||
TaskManager::stop()
|
||||
{
|
||||
for (auto& extractor : extractors_)
|
||||
extractor.abort();
|
||||
for (auto& loader : loaders_)
|
||||
loader.abort();
|
||||
|
||||
wait();
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
94
src/etlng/impl/TaskManager.hpp
Normal file
94
src/etlng/impl/TaskManager.hpp
Normal file
@@ -0,0 +1,94 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etlng/ExtractorInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/SchedulerInterface.hpp"
|
||||
#include "util/StrandedPriorityQueue.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/async/AnyStrand.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class TaskManager {
|
||||
util::async::AnyExecutionContext ctx_;
|
||||
std::reference_wrapper<SchedulerInterface> schedulers_;
|
||||
std::reference_wrapper<ExtractorInterface> extractor_;
|
||||
std::reference_wrapper<LoaderInterface> loader_;
|
||||
|
||||
std::vector<util::async::AnyOperation<void>> extractors_;
|
||||
std::vector<util::async::AnyOperation<void>> loaders_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
struct ReverseOrderComparator {
|
||||
[[nodiscard]] bool
|
||||
operator()(model::LedgerData const& lhs, model::LedgerData const& rhs) const noexcept
|
||||
{
|
||||
return lhs.seq > rhs.seq;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
struct Settings {
|
||||
size_t numExtractors; /**< number of extraction tasks */
|
||||
size_t numLoaders; /**< number of loading tasks */
|
||||
};
|
||||
|
||||
// reverse order loading is needed (i.e. start with oldest seq in forward fill buffer)
|
||||
using PriorityQueue = util::StrandedPriorityQueue<model::LedgerData, ReverseOrderComparator>;
|
||||
|
||||
TaskManager(
|
||||
util::async::AnyExecutionContext&& ctx,
|
||||
std::reference_wrapper<SchedulerInterface> scheduler,
|
||||
std::reference_wrapper<ExtractorInterface> extractor,
|
||||
std::reference_wrapper<LoaderInterface> loader
|
||||
);
|
||||
|
||||
~TaskManager();
|
||||
|
||||
void
|
||||
run(Settings settings);
|
||||
|
||||
void
|
||||
stop();
|
||||
|
||||
private:
|
||||
void
|
||||
wait();
|
||||
|
||||
[[nodiscard]] util::async::AnyOperation<void>
|
||||
spawnExtractor(util::async::AnyStrand& strand, PriorityQueue& queue);
|
||||
|
||||
[[nodiscard]] util::async::AnyOperation<void>
|
||||
spawnLoader(PriorityQueue& queue);
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
@@ -50,7 +50,7 @@ void
|
||||
SubscriptionManager::pubBookChanges(
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
std::vector<data::TransactionAndMetadata> const& transactions
|
||||
) const
|
||||
)
|
||||
{
|
||||
bookChangesFeed_.pub(lgrInfo, transactions);
|
||||
}
|
||||
@@ -111,7 +111,7 @@ SubscriptionManager::pubLedger(
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t const txnCount
|
||||
) const
|
||||
)
|
||||
{
|
||||
ledgerFeed_.pub(lgrInfo, fees, ledgerRange, txnCount);
|
||||
}
|
||||
@@ -129,7 +129,7 @@ SubscriptionManager::unsubManifest(SubscriberSharedPtr const& subscriber)
|
||||
}
|
||||
|
||||
void
|
||||
SubscriptionManager::forwardManifest(boost::json::object const& manifestJson) const
|
||||
SubscriptionManager::forwardManifest(boost::json::object const& manifestJson)
|
||||
{
|
||||
manifestFeed_.pub(manifestJson);
|
||||
}
|
||||
@@ -147,7 +147,7 @@ SubscriptionManager::unsubValidation(SubscriberSharedPtr const& subscriber)
|
||||
}
|
||||
|
||||
void
|
||||
SubscriptionManager::forwardValidation(boost::json::object const& validationJson) const
|
||||
SubscriptionManager::forwardValidation(boost::json::object const& validationJson)
|
||||
{
|
||||
validationsFeed_.pub(validationJson);
|
||||
}
|
||||
@@ -191,7 +191,7 @@ SubscriptionManager::unsubBook(ripple::Book const& book, SubscriberSharedPtr con
|
||||
void
|
||||
SubscriptionManager::pubTransaction(data::TransactionAndMetadata const& txMeta, ripple::LedgerHeader const& lgrInfo)
|
||||
{
|
||||
transactionFeed_.pub(txMeta, lgrInfo, backend_);
|
||||
transactionFeed_.pub(txMeta, lgrInfo, backend_, amendmentCenter_);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -60,6 +61,7 @@ namespace feed {
|
||||
*/
|
||||
class SubscriptionManager : public SubscriptionManagerInterface {
|
||||
std::shared_ptr<data::BackendInterface const> backend_;
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> amendmentCenter_;
|
||||
util::async::AnyExecutionContext ctx_;
|
||||
impl::ForwardFeed manifestFeed_;
|
||||
impl::ForwardFeed validationsFeed_;
|
||||
@@ -74,12 +76,14 @@ public:
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param backend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
* @return A shared pointer to a new instance of SubscriptionManager
|
||||
*/
|
||||
static std::shared_ptr<SubscriptionManager>
|
||||
makeSubscriptionManager(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter
|
||||
)
|
||||
{
|
||||
auto const workersNum = config.get<uint64_t>("subscription_workers");
|
||||
@@ -87,7 +91,9 @@ public:
|
||||
util::Logger const logger{"Subscriptions"};
|
||||
LOG(logger.info()) << "Starting subscription manager with " << workersNum << " workers";
|
||||
|
||||
return std::make_shared<feed::SubscriptionManager>(util::async::PoolExecutionContext(workersNum), backend);
|
||||
return std::make_shared<feed::SubscriptionManager>(
|
||||
util::async::PoolExecutionContext(workersNum), backend, amendmentCenter
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -95,12 +101,15 @@ public:
|
||||
*
|
||||
* @param executor The executor to use to publish the feeds
|
||||
* @param backend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
*/
|
||||
SubscriptionManager(
|
||||
util::async::AnyExecutionContext&& executor,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter
|
||||
)
|
||||
: backend_(backend)
|
||||
, amendmentCenter_(amendmentCenter)
|
||||
, ctx_(std::move(executor))
|
||||
, manifestFeed_(ctx_, "manifest")
|
||||
, validationsFeed_(ctx_, "validations")
|
||||
@@ -115,6 +124,15 @@ public:
|
||||
* @brief Destructor of the SubscriptionManager object. It will block until all running jobs finished.
|
||||
*/
|
||||
~SubscriptionManager() override
|
||||
{
|
||||
stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stop the SubscriptionManager and wait for all jobs to finish.
|
||||
*/
|
||||
void
|
||||
stop() override
|
||||
{
|
||||
ctx_.stop();
|
||||
ctx_.join();
|
||||
@@ -141,7 +159,7 @@ public:
|
||||
*/
|
||||
void
|
||||
pubBookChanges(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions)
|
||||
const final;
|
||||
final;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the proposed transactions feed.
|
||||
@@ -209,7 +227,7 @@ public:
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount
|
||||
) const final;
|
||||
) final;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the manifest feed.
|
||||
@@ -230,7 +248,7 @@ public:
|
||||
* @param manifestJson The manifest json to forward.
|
||||
*/
|
||||
void
|
||||
forwardManifest(boost::json::object const& manifestJson) const final;
|
||||
forwardManifest(boost::json::object const& manifestJson) final;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the validation feed.
|
||||
@@ -251,7 +269,7 @@ public:
|
||||
* @param validationJson The validation feed json to forward.
|
||||
*/
|
||||
void
|
||||
forwardValidation(boost::json::object const& validationJson) const final;
|
||||
forwardValidation(boost::json::object const& validationJson) final;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the transactions feed.
|
||||
|
||||
@@ -45,6 +45,12 @@ class SubscriptionManagerInterface {
|
||||
public:
|
||||
virtual ~SubscriptionManagerInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Stop the SubscriptionManager and wait for all jobs to finish.
|
||||
*/
|
||||
virtual void
|
||||
stop() = 0;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the book changes feed.
|
||||
* @param subscriber
|
||||
@@ -65,8 +71,10 @@ public:
|
||||
* @param transactions The transactions in the current ledger.
|
||||
*/
|
||||
virtual void
|
||||
pubBookChanges(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions)
|
||||
const = 0;
|
||||
pubBookChanges(
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
std::vector<data::TransactionAndMetadata> const& transactions
|
||||
) = 0;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the proposed transactions feed.
|
||||
@@ -135,7 +143,7 @@ public:
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount
|
||||
) const = 0;
|
||||
) = 0;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the manifest feed.
|
||||
@@ -156,7 +164,7 @@ public:
|
||||
* @param manifestJson The manifest json to forward.
|
||||
*/
|
||||
virtual void
|
||||
forwardManifest(boost::json::object const& manifestJson) const = 0;
|
||||
forwardManifest(boost::json::object const& manifestJson) = 0;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the validation feed.
|
||||
@@ -177,7 +185,7 @@ public:
|
||||
* @param validationJson The validation feed json to forward.
|
||||
*/
|
||||
virtual void
|
||||
forwardValidation(boost::json::object const& validationJson) const = 0;
|
||||
forwardValidation(boost::json::object const& validationJson) = 0;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the transactions feed.
|
||||
|
||||
@@ -48,7 +48,7 @@ struct BookChangesFeed : public SingleFeedBase {
|
||||
* @param transactions The transactions that were included in the ledger.
|
||||
*/
|
||||
void
|
||||
pub(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions) const
|
||||
pub(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions)
|
||||
{
|
||||
SingleFeedBase::pub(boost::json::serialize(rpc::computeBookChanges(lgrInfo, transactions)));
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ struct ForwardFeed : public SingleFeedBase {
|
||||
* @brief Publishes the json object.
|
||||
*/
|
||||
void
|
||||
pub(boost::json::object const& json) const
|
||||
pub(boost::json::object const& json)
|
||||
{
|
||||
SingleFeedBase::pub(boost::json::serialize(json));
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ LedgerFeed::pub(
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t const txnCount
|
||||
) const
|
||||
)
|
||||
{
|
||||
SingleFeedBase::pub(boost::json::serialize(makeLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount)));
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ public:
|
||||
pub(ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount) const;
|
||||
std::uint32_t txnCount);
|
||||
|
||||
private:
|
||||
static boost::json::object
|
||||
|
||||
@@ -62,7 +62,7 @@ SingleFeedBase::unsub(SubscriberSharedPtr const& subscriber)
|
||||
}
|
||||
|
||||
void
|
||||
SingleFeedBase::pub(std::string msg) const
|
||||
SingleFeedBase::pub(std::string msg)
|
||||
{
|
||||
[[maybe_unused]] auto task = strand_.execute([this, msg = std::move(msg)]() {
|
||||
auto const msgPtr = std::make_shared<std::string>(msg);
|
||||
|
||||
@@ -73,7 +73,7 @@ public:
|
||||
* @param msg The message.
|
||||
*/
|
||||
void
|
||||
pub(std::string msg) const;
|
||||
pub(std::string msg);
|
||||
|
||||
/**
|
||||
* @brief Get the count of subscribers.
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/Mutex.hpp"
|
||||
|
||||
#include <boost/signals2.hpp>
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <boost/signals2/variadic_signal.hpp>
|
||||
@@ -45,8 +47,8 @@ class TrackableSignal {
|
||||
|
||||
// map of connection and signal connection, key is the pointer of the connection object
|
||||
// allow disconnect to be called in the destructor of the connection
|
||||
std::unordered_map<ConnectionPtr, boost::signals2::connection> connections_;
|
||||
mutable std::mutex mutex_;
|
||||
using ConnectionsMap = std::unordered_map<ConnectionPtr, boost::signals2::connection>;
|
||||
util::Mutex<ConnectionsMap> connections_;
|
||||
|
||||
using SignalType = boost::signals2::signal<void(Args...)>;
|
||||
SignalType signal_;
|
||||
@@ -64,8 +66,8 @@ public:
|
||||
bool
|
||||
connectTrackableSlot(ConnectionSharedPtr const& trackable, std::function<void(Args...)> slot)
|
||||
{
|
||||
std::scoped_lock const lk(mutex_);
|
||||
if (connections_.contains(trackable.get())) {
|
||||
auto connections = connections_.template lock<std::scoped_lock>();
|
||||
if (connections->contains(trackable.get())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -73,7 +75,7 @@ public:
|
||||
// the trackable's destructor. However, the trackable can not be destroied when the slot is being called
|
||||
// either. track_foreign will hold a weak_ptr to the connection, which makes sure the connection is valid when
|
||||
// the slot is called.
|
||||
connections_.emplace(
|
||||
connections->emplace(
|
||||
trackable.get(), signal_.connect(typename SignalType::slot_type(slot).track_foreign(trackable))
|
||||
);
|
||||
return true;
|
||||
@@ -89,10 +91,9 @@ public:
|
||||
bool
|
||||
disconnect(ConnectionPtr trackablePtr)
|
||||
{
|
||||
std::scoped_lock const lk(mutex_);
|
||||
if (connections_.contains(trackablePtr)) {
|
||||
connections_[trackablePtr].disconnect();
|
||||
connections_.erase(trackablePtr);
|
||||
if (auto connections = connections_.template lock<std::scoped_lock>(); connections->contains(trackablePtr)) {
|
||||
connections->operator[](trackablePtr).disconnect();
|
||||
connections->erase(trackablePtr);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -115,8 +116,7 @@ public:
|
||||
std::size_t
|
||||
count() const
|
||||
{
|
||||
std::scoped_lock const lk(mutex_);
|
||||
return connections_.size();
|
||||
return connections_.template lock<std::scoped_lock>()->size();
|
||||
}
|
||||
};
|
||||
} // namespace feed::impl
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "feed/impl/TrackableSignal.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
|
||||
#include <boost/signals2.hpp>
|
||||
|
||||
@@ -49,8 +50,8 @@ class TrackableSignalMap {
|
||||
using ConnectionPtr = Session*;
|
||||
using ConnectionSharedPtr = std::shared_ptr<Session>;
|
||||
|
||||
mutable std::mutex mutex_;
|
||||
std::unordered_map<Key, TrackableSignal<Session, Args...>> signalsMap_;
|
||||
using SignalsMap = std::unordered_map<Key, TrackableSignal<Session, Args...>>;
|
||||
util::Mutex<SignalsMap> signalsMap_;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -66,8 +67,8 @@ public:
|
||||
bool
|
||||
connectTrackableSlot(ConnectionSharedPtr const& trackable, Key const& key, std::function<void(Args...)> slot)
|
||||
{
|
||||
std::scoped_lock const lk(mutex_);
|
||||
return signalsMap_[key].connectTrackableSlot(trackable, slot);
|
||||
auto map = signalsMap_.template lock<std::scoped_lock>();
|
||||
return map->operator[](key).connectTrackableSlot(trackable, slot);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -80,14 +81,14 @@ public:
|
||||
bool
|
||||
disconnect(ConnectionPtr trackablePtr, Key const& key)
|
||||
{
|
||||
std::scoped_lock const lk(mutex_);
|
||||
if (!signalsMap_.contains(key))
|
||||
auto map = signalsMap_.template lock<std::scoped_lock>();
|
||||
if (!map->contains(key))
|
||||
return false;
|
||||
|
||||
auto const disconnected = signalsMap_[key].disconnect(trackablePtr);
|
||||
auto const disconnected = map->operator[](key).disconnect(trackablePtr);
|
||||
// clean the map if there is no connection left.
|
||||
if (disconnected && signalsMap_[key].count() == 0)
|
||||
signalsMap_.erase(key);
|
||||
if (disconnected && map->operator[](key).count() == 0)
|
||||
map->erase(key);
|
||||
|
||||
return disconnected;
|
||||
}
|
||||
@@ -101,9 +102,9 @@ public:
|
||||
void
|
||||
emit(Key const& key, Args const&... args)
|
||||
{
|
||||
std::scoped_lock const lk(mutex_);
|
||||
if (signalsMap_.contains(key))
|
||||
signalsMap_[key].emit(args...);
|
||||
auto map = signalsMap_.template lock<std::scoped_lock>();
|
||||
if (map->contains(key))
|
||||
map->operator[](key).emit(args...);
|
||||
}
|
||||
};
|
||||
} // namespace feed::impl
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include "feed/impl/TransactionFeed.hpp"
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "feed/Types.hpp"
|
||||
@@ -174,7 +175,8 @@ void
|
||||
TransactionFeed::pub(
|
||||
data::TransactionAndMetadata const& txMeta,
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter
|
||||
)
|
||||
{
|
||||
auto [tx, meta] = rpc::deserializeTxPlusMeta(txMeta, lgrInfo.seq);
|
||||
@@ -187,7 +189,7 @@ TransactionFeed::pub(
|
||||
if (account != amount.issue().account) {
|
||||
auto fetchFundsSynchronous = [&]() {
|
||||
data::synchronous([&](boost::asio::yield_context yield) {
|
||||
ownerFunds = rpc::accountFunds(*backend, lgrInfo.seq, amount, account, yield);
|
||||
ownerFunds = rpc::accountFunds(*backend, *amendmentCenter, lgrInfo.seq, amount, account, yield);
|
||||
});
|
||||
};
|
||||
data::retryOnTimeout(fetchFundsSynchronous);
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "feed/Types.hpp"
|
||||
@@ -94,6 +95,11 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Move constructor is deleted because TransactionSlot takes TransactionFeed by reference
|
||||
*/
|
||||
TransactionFeed(TransactionFeed&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the transaction feed.
|
||||
* @param subscriber
|
||||
@@ -180,7 +186,8 @@ public:
|
||||
void
|
||||
pub(data::TransactionAndMetadata const& txMeta,
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend);
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter);
|
||||
|
||||
/**
|
||||
* @brief Get the number of subscribers of the transaction feed.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022-2024, the clio developers.
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -41,27 +41,32 @@ try {
|
||||
return action.apply(
|
||||
[](app::CliArgs::Action::Exit const& exit) { return exit.exitCode; },
|
||||
[](app::CliArgs::Action::VerifyConfig const& verify) {
|
||||
if (app::verifyConfig(verify.configPath)) {
|
||||
std::cout << "Config is correct" << "\n";
|
||||
if (app::parseConfig(verify.configPath)) {
|
||||
std::cout << "Config " << verify.configPath << " is correct"
|
||||
<< "\n";
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
return EXIT_FAILURE;
|
||||
},
|
||||
[](app::CliArgs::Action::Run const& run) {
|
||||
auto const res = app::verifyConfig(run.configPath);
|
||||
if (res != EXIT_SUCCESS)
|
||||
if (not app::parseConfig(run.configPath))
|
||||
return EXIT_FAILURE;
|
||||
|
||||
util::LogService::init(gClioConfig);
|
||||
if (auto const initSuccess = util::LogService::init(gClioConfig); not initSuccess) {
|
||||
std::cerr << initSuccess.error() << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
app::ClioApplication clio{gClioConfig};
|
||||
return clio.run(run.useNgWebServer);
|
||||
},
|
||||
[](app::CliArgs::Action::Migrate const& migrate) {
|
||||
auto const res = app::verifyConfig(migrate.configPath);
|
||||
if (res != EXIT_SUCCESS)
|
||||
if (not app::parseConfig(migrate.configPath))
|
||||
return EXIT_FAILURE;
|
||||
|
||||
util::LogService::init(gClioConfig);
|
||||
if (auto const initSuccess = util::LogService::init(gClioConfig); not initSuccess) {
|
||||
std::cerr << initSuccess.error() << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
app::MigratorApplication migrator{gClioConfig, migrate.subCmd};
|
||||
return migrator.run();
|
||||
}
|
||||
|
||||
@@ -5,4 +5,4 @@ target_sources(
|
||||
cassandra/impl/ObjectsAdapter.cpp cassandra/impl/TransactionsAdapter.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(clio_migration PRIVATE clio_util clio_etl)
|
||||
target_link_libraries(clio_migration PRIVATE clio_util clio_data)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022-2024, the clio developers.
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -41,7 +41,7 @@ MigratorApplication::MigratorApplication(util::config::ClioConfigDefinition cons
|
||||
{
|
||||
PrometheusService::init(config);
|
||||
|
||||
auto expectedMigrationManager = migration::impl::makeMigrationManager(config);
|
||||
auto expectedMigrationManager = migration::impl::makeMigrationManager(config, cache_);
|
||||
|
||||
if (not expectedMigrationManager) {
|
||||
throw std::runtime_error("Failed to create migration manager: " + expectedMigrationManager.error());
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022-2024, the clio developers.
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "migration/MigrationManagerInterface.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
@@ -76,6 +77,7 @@ class MigratorApplication {
|
||||
std::string option_;
|
||||
std::shared_ptr<migration::MigrationManagerInterface> migrationManager_;
|
||||
MigrateSubCmd cmd_;
|
||||
data::LedgerCache cache_;
|
||||
|
||||
public:
|
||||
/**
|
||||
|
||||
65
src/migration/MigrationInspectorFactory.hpp
Normal file
65
src/migration/MigrationInspectorFactory.hpp
Normal file
@@ -0,0 +1,65 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "migration/MigrationInspectorInterface.hpp"
|
||||
#include "migration/MigratiorStatus.hpp"
|
||||
#include "migration/cassandra/CassandraMigrationManager.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace migration {
|
||||
|
||||
/**
|
||||
* @brief A factory function that creates migration inspector instance and initializes the migration table if needed.
|
||||
*
|
||||
* @param config The config.
|
||||
* @param backend The backend instance. It should be initialized before calling this function.
|
||||
* @return A shared_ptr<MigrationInspectorInterface> instance
|
||||
*/
|
||||
inline std::shared_ptr<MigrationInspectorInterface>
|
||||
makeMigrationInspector(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
std::shared_ptr<BackendInterface> const& backend
|
||||
)
|
||||
{
|
||||
ASSERT(backend != nullptr, "Backend is not initialized");
|
||||
|
||||
auto inspector = std::make_shared<migration::cassandra::CassandraMigrationInspector>(backend);
|
||||
|
||||
// Database is empty, we need to initialize the migration table if it is a writeable backend
|
||||
if (not config.get<bool>("read_only") and not backend->hardFetchLedgerRangeNoThrow()) {
|
||||
migration::MigratorStatus const migrated(migration::MigratorStatus::Migrated);
|
||||
for (auto const& name : inspector->allMigratorsNames()) {
|
||||
backend->writeMigratorStatus(name, migrated.toString());
|
||||
}
|
||||
}
|
||||
return inspector;
|
||||
}
|
||||
|
||||
} // namespace migration
|
||||
79
src/migration/MigrationInspectorInterface.hpp
Normal file
79
src/migration/MigrationInspectorInterface.hpp
Normal file
@@ -0,0 +1,79 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "migration/MigratiorStatus.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
namespace migration {
|
||||
|
||||
/**
|
||||
* @brief The interface for the migration inspector.The Clio server application will use this interface to inspect
|
||||
* the migration status.
|
||||
*/
|
||||
struct MigrationInspectorInterface {
|
||||
virtual ~MigrationInspectorInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Get the status of all the migrators
|
||||
* @return A vector of tuple, the first element is the migrator's name, the second element is the status of the
|
||||
*/
|
||||
virtual std::vector<std::tuple<std::string, MigratorStatus>>
|
||||
allMigratorsStatusPairs() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Get all registered migrators' names
|
||||
*
|
||||
* @return A vector of migrators' names
|
||||
*/
|
||||
virtual std::vector<std::string>
|
||||
allMigratorsNames() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Get the status of a migrator by its name
|
||||
*
|
||||
* @param name The migrator's name
|
||||
* @return The status of the migrator
|
||||
*/
|
||||
virtual MigratorStatus
|
||||
getMigratorStatusByName(std::string const& name) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Get the description of a migrator by its name
|
||||
*
|
||||
* @param name The migrator's name
|
||||
* @return The description of the migrator
|
||||
*/
|
||||
virtual std::string
|
||||
getMigratorDescriptionByName(std::string const& name) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Return if Clio server is blocked
|
||||
*
|
||||
* @return True if Clio server is blocked by migration, false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isBlockingClio() const = 0;
|
||||
};
|
||||
|
||||
} // namespace migration
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user