mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-19 03:05:51 +00:00
Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e61ee30180 | ||
|
|
d3df6d10e4 | ||
|
|
60df3a1914 | ||
|
|
f454076fb6 | ||
|
|
66b3f40268 | ||
|
|
b31b7633c9 | ||
|
|
a36aa3618f | ||
|
|
7943f47939 | ||
|
|
67e451ec23 | ||
|
|
92789d5a91 | ||
|
|
73477fb9d4 | ||
|
|
8ac1ff7699 | ||
|
|
26842374de | ||
|
|
a46d700390 | ||
|
|
a34d565ea4 | ||
|
|
c57fe1e6e4 | ||
|
|
8a08c5e6ce | ||
|
|
5d2694d36c | ||
|
|
98ff72be66 | ||
|
|
915a8beb40 | ||
|
|
f7db030ad7 | ||
|
|
86e2cd1cc4 | ||
|
|
f0613c945f | ||
|
|
d11e7bc60e | ||
|
|
b909b8879d | ||
|
|
918a92eeee | ||
|
|
c9e8330e0a | ||
|
|
f577139f70 | ||
|
|
491cd58f93 | ||
|
|
25296f8ffa | ||
|
|
4b178805de | ||
|
|
fcebd715ba | ||
|
|
531e1dad6d | ||
|
|
3c008b6bb4 | ||
|
|
624f7ff6d5 | ||
|
|
e503dffc9a | ||
|
|
cd1aa8fb70 | ||
|
|
b5fe22da18 | ||
|
|
cd6289b79a | ||
|
|
f5e6c9576e | ||
|
|
427ba47716 |
6
.github/actions/build_clio/action.yml
vendored
6
.github/actions/build_clio/action.yml
vendored
@@ -4,12 +4,18 @@ inputs:
|
||||
target:
|
||||
description: Build target name
|
||||
default: all
|
||||
substract_threads:
|
||||
description: An option for the action get_number_of_threads. See get_number_of_threads
|
||||
required: true
|
||||
default: '0'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
with:
|
||||
substract_threads: ${{ inputs.substract_threads }}
|
||||
|
||||
- name: Build Clio
|
||||
shell: bash
|
||||
|
||||
18
.github/actions/generate/action.yml
vendored
18
.github/actions/generate/action.yml
vendored
@@ -12,6 +12,10 @@ inputs:
|
||||
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
|
||||
required: true
|
||||
default: 'Release'
|
||||
build_integration_tests:
|
||||
description: Whether to build integration tests
|
||||
required: true
|
||||
default: 'true'
|
||||
code_coverage:
|
||||
description: Whether conan's coverage option should be on or not
|
||||
required: true
|
||||
@@ -20,6 +24,10 @@ inputs:
|
||||
description: Whether Clio is to be statically linked
|
||||
required: true
|
||||
default: 'false'
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
default: 'false' # false, tsan, asan or ubsan
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -33,14 +41,20 @@ runs:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
STATIC_OPTION: "${{ inputs.static == 'true' && 'True' || 'False' }}"
|
||||
INTEGRATION_TESTS_OPTION: "${{ inputs.build_integration_tests == 'true' && 'True' || 'False' }}"
|
||||
run: |
|
||||
cd build
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:static="${STATIC_OPTION}" -o clio:tests=True -o clio:integration_tests=True -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:static="${STATIC_OPTION}" -o clio:tests=True -o clio:integration_tests="${INTEGRATION_TESTS_OPTION}" -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
SANITIZER_OPTION: |
|
||||
${{ inputs.sanitizer == 'tsan' && '-Dsan=thread' ||
|
||||
inputs.sanitizer == 'ubsan' && '-Dsan=undefined' ||
|
||||
inputs.sanitizer == 'asan' && '-Dsan=address' ||
|
||||
'' }}
|
||||
run: |
|
||||
cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} ${{ inputs.extra_cmake_args }} .. -G Ninja
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" ${SANITIZER_OPTION} .. -G Ninja
|
||||
|
||||
14
.github/actions/get_number_of_threads/action.yml
vendored
14
.github/actions/get_number_of_threads/action.yml
vendored
@@ -1,5 +1,10 @@
|
||||
name: Get number of threads
|
||||
description: Determines number of threads to use on macOS and Linux
|
||||
inputs:
|
||||
substract_threads:
|
||||
description: How many threads to substract from the calculated number
|
||||
required: true
|
||||
default: '0'
|
||||
outputs:
|
||||
threads_number:
|
||||
description: Number of threads to use
|
||||
@@ -19,8 +24,11 @@ runs:
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
- name: Shift and export number of threads
|
||||
id: number_of_threads_export
|
||||
shell: bash
|
||||
run: |
|
||||
echo "num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}" >> $GITHUB_OUTPUT
|
||||
num_of_threads=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
|
||||
shift_by=${{ inputs.substract_threads }}
|
||||
shifted=$((num_of_threads - shift_by))
|
||||
echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT
|
||||
|
||||
28
.github/actions/prepare_runner/action.yml
vendored
28
.github/actions/prepare_runner/action.yml
vendored
@@ -11,9 +11,35 @@ runs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq gh conan@1 ca-certificates
|
||||
brew install llvm@14 pkg-config ninja bison ccache jq gh conan@1 ca-certificates
|
||||
echo "/opt/homebrew/opt/conan@1/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install CMake 3.31.6 on mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
# Uninstall any existing cmake
|
||||
brew uninstall cmake --ignore-dependencies || true
|
||||
|
||||
# Download specific cmake formula
|
||||
FORMULA_URL="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e46db74e74a8c1650b38b1da222284ce1ec5ce/Formula/c/cmake.rb"
|
||||
FORMULA_EXPECTED_SHA256="c7ec95d86f0657638835441871e77541165e0a2581b53b3dd657cf13ad4228d4"
|
||||
|
||||
mkdir -p /tmp/homebrew-formula
|
||||
curl -s -L $FORMULA_URL -o /tmp/homebrew-formula/cmake.rb
|
||||
|
||||
# Verify the downloaded formula
|
||||
ACTUAL_SHA256=$(shasum -a 256 /tmp/homebrew-formula/cmake.rb | cut -d ' ' -f 1)
|
||||
if [ "$ACTUAL_SHA256" != "$FORMULA_EXPECTED_SHA256" ]; then
|
||||
echo "Error: Formula checksum mismatch"
|
||||
echo "Expected: $FORMULA_EXPECTED_SHA256"
|
||||
echo "Actual: $ACTUAL_SHA256"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install cmake from the specific formula with force flag
|
||||
brew install --force /tmp/homebrew-formula/cmake.rb
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
|
||||
45
.github/scripts/execute-tests-under-sanitizer
vendored
Executable file
45
.github/scripts/execute-tests-under-sanitizer
vendored
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# Note: This script is intended to be run from the root of the repository.
|
||||
#
|
||||
# This script runs each unit-test separately and generates reports from the currently active sanitizer.
|
||||
# Output is saved in ./.sanitizer-report in the root of the repository
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
Path to clio_tests should be passed as first argument to the script.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TEST_BINARY=$1
|
||||
|
||||
if [[ ! -f "$TEST_BINARY" ]]; then
|
||||
echo "Test binary not found: $TEST_BINARY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {suite=$1}')
|
||||
|
||||
OUTPUT_DIR="./.sanitizer-report"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
for TEST in $TESTS; do
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}"
|
||||
export TSAN_OPTIONS="log_path=\"$OUTPUT_FILE\" die_after_fork=0"
|
||||
export ASAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export UBSAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export MallocNanoZone='0' # for MacOSX
|
||||
$TEST_BINARY --gtest_filter="$TEST" > /dev/null 2>&1
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "'$TEST' failed a sanitizer check."
|
||||
fi
|
||||
done
|
||||
172
.github/workflows/build.yml
vendored
172
.github/workflows/build.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
jobs:
|
||||
check_format:
|
||||
name: Check format
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
check_docs:
|
||||
name: Check documentation
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
@@ -47,133 +47,44 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: true
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
conan_profile: clang
|
||||
build_type: Release
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
conan_profile: clang
|
||||
build_type: Debug
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: macos15
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
static: false
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: ${{ matrix.conan_profile }}
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
static: ${{ matrix.static }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Show ccache's statistics
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
run: strip build/clio_tests && strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_*tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
- name: Run code coverage
|
||||
if: ${{ matrix.code_coverage }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
upload_coverage_report:
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
unit_tests: true
|
||||
integration_tests: true
|
||||
clio_server: true
|
||||
|
||||
test:
|
||||
name: Run Tests
|
||||
@@ -183,24 +94,24 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
build_type: Debug
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: macos15
|
||||
conan_profile: apple_clang_16
|
||||
build_type: Release
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
@@ -216,3 +127,44 @@ jobs:
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
|
||||
check_config:
|
||||
name: Check Config Description
|
||||
needs: build
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_server_Linux_Release_gcc
|
||||
- name: Compare Config Description
|
||||
shell: bash
|
||||
run: |
|
||||
repoConfigFile=docs/config-description.md
|
||||
if ! [ -f ${repoConfigFile} ]; then
|
||||
echo "Config Description markdown file is missing in docs folder"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chmod +x ./clio_server
|
||||
configDescriptionFile=config_description_new.md
|
||||
./clio_server -d ${configDescriptionFile}
|
||||
|
||||
configDescriptionHash=$(sha256sum ${configDescriptionFile} | cut -d' ' -f1)
|
||||
repoConfigHash=$(sha256sum ${repoConfigFile} | cut -d' ' -f1)
|
||||
|
||||
if [ ${configDescriptionHash} != ${repoConfigHash} ]; then
|
||||
echo "Markdown file is not up to date"
|
||||
diff -u "${repoConfigFile}" "${configDescriptionFile}"
|
||||
rm -f ${configDescriptionFile}
|
||||
exit 1
|
||||
fi
|
||||
rm -f ${configDescriptionFile}
|
||||
exit 0
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ on:
|
||||
jobs:
|
||||
build_and_publish_image:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
192
.github/workflows/build_impl.yml
vendored
Normal file
192
.github/workflows/build_impl.yml
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
name: Reusable build
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runs_on:
|
||||
description: Runner to run the job on
|
||||
required: true
|
||||
type: string
|
||||
default: heavy
|
||||
|
||||
container:
|
||||
description: "The container object as a JSON string (leave empty to run natively)"
|
||||
required: true
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
conan_profile:
|
||||
description: Conan profile to use
|
||||
required: true
|
||||
type: string
|
||||
|
||||
build_type:
|
||||
description: Build type
|
||||
required: true
|
||||
type: string
|
||||
|
||||
disable_cache:
|
||||
description: Whether ccache and conan cache should be disabled
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
code_coverage:
|
||||
description: Whether to enable code coverage
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
static:
|
||||
description: Whether to build static binaries
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
unit_tests:
|
||||
description: Whether to run unit tests
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
integration_tests:
|
||||
description: Whether to run integration tests
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
clio_server:
|
||||
description: Whether to build clio_server
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
target:
|
||||
description: Build target name
|
||||
required: false
|
||||
type: string
|
||||
default: all
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: false
|
||||
type: string
|
||||
default: 'false'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: ${{ inputs.disable_cache }}
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Restore cache
|
||||
if: ${{ !inputs.disable_cache }}
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ !inputs.disable_cache && steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
static: ${{ inputs.static }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
target: ${{ inputs.target }}
|
||||
|
||||
- name: Show ccache's statistics
|
||||
if: ${{ !inputs.disable_cache }}
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip unit_tests
|
||||
if: ${{ inputs.unit_tests && !inputs.code_coverage && inputs.sanitizer == 'false' }}
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Strip integration_tests
|
||||
if: ${{ inputs.integration_tests && !inputs.code_coverage }}
|
||||
run: strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
if: ${{ inputs.clio_server }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ inputs.unit_tests && !inputs.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Upload clio_integration_tests
|
||||
if: ${{ inputs.integration_tests && !inputs.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_integration_tests
|
||||
|
||||
- name: Save cache
|
||||
if: ${{ !inputs.disable_cache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
- name: Run code coverage
|
||||
if: ${{ inputs.code_coverage }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
upload_coverage_report:
|
||||
if: ${{ inputs.code_coverage }}
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
2
.github/workflows/check_libxrpl.yml
vendored
2
.github/workflows/check_libxrpl.yml
vendored
@@ -71,7 +71,7 @@ jobs:
|
||||
name: Create an issue on failure
|
||||
needs: [build, run_tests]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
2
.github/workflows/check_pr_title.yml
vendored
2
.github/workflows/check_pr_title.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check_title:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# permissions:
|
||||
# pull-requests: write
|
||||
steps:
|
||||
|
||||
4
.github/workflows/clang-tidy.yml
vendored
4
.github/workflows/clang-tidy.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Clang-tidy check
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 6 * * 1-5"
|
||||
- cron: "0 9 * * 1-5"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
@@ -12,7 +12,7 @@ on:
|
||||
|
||||
jobs:
|
||||
clang_tidy:
|
||||
runs-on: [self-hosted, Linux]
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
permissions:
|
||||
|
||||
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
restart_clang_tidy:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
actions: write
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
|
||||
82
.github/workflows/nightly.yml
vendored
82
.github/workflows/nightly.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Nightly release
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 5 * * 1-5'
|
||||
- cron: '0 8 * * 1-5'
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -21,68 +21,23 @@ jobs:
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
static: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
static: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: gcc
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: false
|
||||
static: ${{ matrix.static }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests && strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_*tests
|
||||
|
||||
- name: Compress clio_server
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
tar czf ./clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz ./clio_server
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz
|
||||
|
||||
unit_tests: true
|
||||
integration_tests: true
|
||||
clio_server: true
|
||||
disable_cache: true
|
||||
|
||||
run_tests:
|
||||
needs: build
|
||||
@@ -91,14 +46,17 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: macos15
|
||||
conan_profile: apple_clang_16
|
||||
build_type: Release
|
||||
integration_tests: false
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
integration_tests: true
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
@@ -122,13 +80,17 @@ jobs:
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
|
||||
# To be enabled back once docker in mac runner arrives
|
||||
# https://github.com/XRPLF/clio/issues/1400
|
||||
- name: Run clio_integration_tests
|
||||
@@ -140,7 +102,7 @@ jobs:
|
||||
nightly_release:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
needs: run_tests
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
@@ -189,14 +151,14 @@ jobs:
|
||||
tags: |
|
||||
type=raw,value=nightly
|
||||
type=raw,value=${{ github.sha }}
|
||||
artifact_name: clio_server_Linux_Release
|
||||
artifact_name: clio_server_Linux_Release_gcc
|
||||
strip_binary: true
|
||||
publish_image: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
create_issue_on_failure:
|
||||
needs: [build, run_tests, nightly_release, build_and_publish_docker_image]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') && github.event_name != 'pull_request' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
106
.github/workflows/sanitizers.yml
vendored
Normal file
106
.github/workflows/sanitizers.yml
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
name: Run tests with sanitizers
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 4 * * 1-5"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/sanitizers.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build clio tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sanitizer: tsan
|
||||
compiler: gcc
|
||||
- sanitizer: asan
|
||||
compiler: gcc
|
||||
# - sanitizer: ubsan # todo: enable when heavy runners are available
|
||||
# compiler: gcc
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ubuntu-latest # todo: change to heavy
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
disable_cache: true
|
||||
conan_profile: ${{ matrix.compiler }}.${{ matrix.sanitizer }}
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
static: false
|
||||
unit_tests: true
|
||||
integration_tests: false
|
||||
clio_server: false
|
||||
target: clio_tests
|
||||
sanitizer: ${{ matrix.sanitizer }}
|
||||
|
||||
# consider combining this with the previous matrix instead
|
||||
run_tests:
|
||||
needs: build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sanitizer: tsan
|
||||
compiler: gcc
|
||||
- sanitizer: asan
|
||||
compiler: gcc
|
||||
# - sanitizer: ubsan # todo: enable when heavy runners are available
|
||||
# compiler: gcc
|
||||
runs-on: ubuntu-latest # todo: change to heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_Release_${{ matrix.compiler }}.${{ matrix.sanitizer }}
|
||||
|
||||
- name: Run clio_tests [${{ matrix.compiler }} / ${{ matrix.sanitizer }}]
|
||||
shell: bash
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./.github/scripts/execute-tests-under-sanitizer ./clio_tests
|
||||
|
||||
- name: Check for sanitizer report
|
||||
shell: bash
|
||||
id: check_report
|
||||
run: |
|
||||
if ls .sanitizer-report/* 1> /dev/null 2>&1; then
|
||||
echo "found_report=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "found_report=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Upload report
|
||||
if: ${{ steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.compiler }}_${{ matrix.sanitizer }}_report
|
||||
path: .sanitizer-report/*
|
||||
include-hidden-files: true
|
||||
|
||||
#
|
||||
# todo: enable when we have fixed all currently existing issues from sanitizers
|
||||
#
|
||||
# - name: Create an issue
|
||||
# if: ${{ steps.check_report.outputs.found_report == 'true' }}
|
||||
# uses: ./.github/actions/create_issue
|
||||
# env:
|
||||
# GH_TOKEN: ${{ github.token }}
|
||||
# with:
|
||||
# labels: 'bug'
|
||||
# title: '[${{ matrix.sanitizer }}/${{ matrix.compiler }}] reported issues'
|
||||
# body: >
|
||||
# Clio tests failed one or more sanitizer checks when built with ${{ matrix.compiler }}`.
|
||||
|
||||
# Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
# Reports are available as artifacts.
|
||||
2
.github/workflows/upload_coverage_report.yml
vendored
2
.github/workflows/upload_coverage_report.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
jobs:
|
||||
upload_report:
|
||||
name: Upload report
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -6,6 +6,7 @@
|
||||
.vscode
|
||||
.python-version
|
||||
.DS_Store
|
||||
.sanitizer-report
|
||||
CMakeUserPresets.json
|
||||
config.json
|
||||
src/util/build/Build.cpp
|
||||
|
||||
@@ -16,6 +16,8 @@ option(coverage "Build test coverage report" FALSE)
|
||||
option(packaging "Create distribution packages" FALSE)
|
||||
option(lint "Run clang-tidy checks during compilation" FALSE)
|
||||
option(static "Statically linked Clio" FALSE)
|
||||
option(snapshot "Build snapshot tool" FALSE)
|
||||
|
||||
# ========================================================================== #
|
||||
set(san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
@@ -65,15 +67,21 @@ endif ()
|
||||
|
||||
# Enable selected sanitizer if enabled via `san`
|
||||
if (san)
|
||||
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
|
||||
list(FIND SUPPORTED_SANITIZERS "${san}" INDEX)
|
||||
if (INDEX EQUAL -1)
|
||||
message(FATAL_ERROR "Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}.")
|
||||
endif ()
|
||||
|
||||
target_compile_options(
|
||||
clio PUBLIC # Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
clio_options INTERFACE # Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1> ${SAN_FLAG} -fno-omit-frame-pointer
|
||||
)
|
||||
target_compile_definitions(
|
||||
clio PUBLIC $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
clio_options INTERFACE $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
|
||||
)
|
||||
target_link_libraries(clio INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
# Generate `docs` target for doxygen documentation if enabled Note: use `make docs` to generate the documentation
|
||||
@@ -85,3 +93,7 @@ include(install/install)
|
||||
if (packaging)
|
||||
include(cmake/packaging.cmake) # This file exists only in build runner
|
||||
endif ()
|
||||
|
||||
if (snapshot)
|
||||
add_subdirectory(tools/snapshot)
|
||||
endif ()
|
||||
|
||||
92
cliff.toml
Normal file
92
cliff.toml
Normal file
@@ -0,0 +1,92 @@
|
||||
# git-cliff ~ default configuration file
|
||||
# https://git-cliff.org/docs/configuration
|
||||
#
|
||||
# Lines starting with "#" are comments.
|
||||
# Configuration options are organized into tables and keys.
|
||||
# See documentation for more information on available options.
|
||||
|
||||
[changelog]
|
||||
# template for the changelog header
|
||||
header = """
|
||||
# Changelog\n
|
||||
All notable changes to this project will be documented in this file.\n
|
||||
"""
|
||||
# template for the changelog body
|
||||
# https://keats.github.io/tera/docs/#introduction
|
||||
body = """
|
||||
{% if version %}\
|
||||
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||
{% else %}\
|
||||
## [unreleased]
|
||||
{% endif %}\
|
||||
{% for group, commits in commits | filter(attribute="merge_commit", value=false) | group_by(attribute="group") %}
|
||||
### {{ group | striptags | trim | upper_first }}
|
||||
{% for commit in commits %}
|
||||
- {% if commit.scope %}*({{ commit.scope }})* {% endif %}\
|
||||
{% if commit.breaking %}[**breaking**] {% endif %}\
|
||||
{{ commit.message | upper_first }} {% if commit.remote.username %}by @{{ commit.remote.username }}{% endif %}\
|
||||
{% endfor %}
|
||||
{% endfor %}\n
|
||||
"""
|
||||
# template for the changelog footer
|
||||
footer = """
|
||||
<!-- generated by git-cliff -->
|
||||
"""
|
||||
# remove the leading and trailing s
|
||||
trim = true
|
||||
# postprocessors
|
||||
postprocessors = [
|
||||
# { pattern = '<REPO>', replace = "https://github.com/orhun/git-cliff" }, # replace repository URL
|
||||
]
|
||||
# render body even when there are no releases to process
|
||||
# render_always = true
|
||||
# output file path
|
||||
output = "CHANGELOG.md"
|
||||
|
||||
[git]
|
||||
# parse the commits based on https://www.conventionalcommits.org
|
||||
conventional_commits = true
|
||||
# filter out the commits that are not conventional
|
||||
filter_unconventional = true
|
||||
# process each line of a commit as an individual commit
|
||||
split_commits = false
|
||||
# regex for preprocessing the commit messages
|
||||
commit_preprocessors = [
|
||||
# Replace issue numbers
|
||||
#{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](<REPO>/issues/${2}))"},
|
||||
# Check spelling of the commit with https://github.com/crate-ci/typos
|
||||
# If the spelling is incorrect, it will be automatically fixed.
|
||||
#{ pattern = '.*', replace_command = 'typos --write-changes -' },
|
||||
]
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
|
||||
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
|
||||
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
|
||||
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
|
||||
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
|
||||
{ message = "^style.*[Cc]lang-tidy auto fixes", skip = true },
|
||||
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
|
||||
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true },
|
||||
{ message = "^chore: Commits", skip = true },
|
||||
{ message = "^chore\\(deps.*\\)", skip = true },
|
||||
{ message = "^chore\\(pr\\)", skip = true },
|
||||
{ message = "^chore\\(pull\\)", skip = true },
|
||||
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
|
||||
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
|
||||
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
|
||||
{ message = ".*", group = "<!-- 10 -->💼 Other" },
|
||||
]
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
filter_commits = false
|
||||
# sort the tags topologically
|
||||
topo_order = false
|
||||
# sort the commits inside sections by oldest/newest order
|
||||
sort_commits = "oldest"
|
||||
|
||||
ignore_tags = "^.*-[b|rc].*"
|
||||
|
||||
[remote.github]
|
||||
owner = "XRPLF"
|
||||
repo = "clio"
|
||||
@@ -39,6 +39,34 @@ if (is_appleclang)
|
||||
list(APPEND COMPILER_FLAGS -Wreorder-init-list)
|
||||
endif ()
|
||||
|
||||
if (san)
|
||||
# When building with sanitizers some compilers will actually produce extra warnings/errors. We don't want this yet, at
|
||||
# least not until we have fixed all runtime issues reported by the sanitizers. Once that is done we can start removing
|
||||
# some of these and trying to fix it in our codebase. We can never remove all of below because most of them are
|
||||
# reported from deep inside libraries like boost or libxrpl.
|
||||
#
|
||||
# TODO: Address in https://github.com/XRPLF/clio/issues/1885
|
||||
list(
|
||||
APPEND
|
||||
COMPILER_FLAGS
|
||||
-Wno-error=tsan # Disables treating TSAN warnings as errors
|
||||
-Wno-tsan # Disables TSAN warnings (thread-safety analysis)
|
||||
-Wno-uninitialized # Disables warnings about uninitialized variables (AddressSanitizer, UndefinedBehaviorSanitizer,
|
||||
# etc.)
|
||||
-Wno-stringop-overflow # Disables warnings about potential string operation overflows (AddressSanitizer)
|
||||
-Wno-unsafe-buffer-usage # Disables warnings about unsafe memory operations (AddressSanitizer)
|
||||
-Wno-frame-larger-than # Disables warnings about stack frame size being too large (AddressSanitizer)
|
||||
-Wno-unused-function # Disables warnings about unused functions (LeakSanitizer, memory-related issues)
|
||||
-Wno-unused-but-set-variable # Disables warnings about unused variables (MemorySanitizer)
|
||||
-Wno-thread-safety-analysis # Disables warnings related to thread safety usage (ThreadSanitizer)
|
||||
-Wno-thread-safety # Disables warnings related to thread safety usage (ThreadSanitizer)
|
||||
-Wno-sign-compare # Disables warnings about signed/unsigned comparison (UndefinedBehaviorSanitizer)
|
||||
-Wno-nonnull # Disables warnings related to null pointer dereferencing (UndefinedBehaviorSanitizer)
|
||||
-Wno-address # Disables warnings about address-related issues (UndefinedBehaviorSanitizer)
|
||||
-Wno-array-bounds # Disables array bounds checks (UndefinedBehaviorSanitizer)
|
||||
)
|
||||
endif ()
|
||||
|
||||
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for
|
||||
# the flags description
|
||||
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
|
||||
find_package(libbacktrace REQUIRED CONFIG)
|
||||
if ("${san}" STREQUAL "")
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
|
||||
find_package(libbacktrace REQUIRED CONFIG)
|
||||
else ()
|
||||
# Some sanitizers (TSAN and ASAN for sure) can't be used with libbacktrace because they have their own backtracing
|
||||
# capabilities and there are conflicts. In any case, this makes sure Clio code knows that backtrace is not available.
|
||||
# See relevant conan profiles for sanitizers where we disable stacktrace in Boost explicitly.
|
||||
target_compile_definitions(clio_options INTERFACE CLIO_WITHOUT_STACKTRACE)
|
||||
message(STATUS "Sanitizer enabled, disabling stacktrace")
|
||||
endif ()
|
||||
|
||||
@@ -19,16 +19,17 @@ class Clio(ConanFile):
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
'snapshot': [True, False], # build export/import snapshot tool
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'boost/1.83.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/2.4.0-b3',
|
||||
'openssl/1.1.1v',
|
||||
'xrpl/2.4.0',
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
@@ -44,6 +45,7 @@ class Clio(ConanFile):
|
||||
'coverage': False,
|
||||
'lint': False,
|
||||
'docs': False,
|
||||
'snapshot': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
'xrpl/*:rocksdb': False,
|
||||
@@ -92,6 +94,7 @@ class Clio(ConanFile):
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.variables['benchmark'] = self.options.benchmark
|
||||
tc.variables['snapshot'] = self.options.snapshot
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
|
||||
452
docs/config-description.md
Normal file
452
docs/config-description.md
Normal file
@@ -0,0 +1,452 @@
|
||||
# Clio Config Description
|
||||
This file lists all Clio Configuration definitions in detail.
|
||||
|
||||
## Configuration Details
|
||||
|
||||
### Key: database.type
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: cassandra
|
||||
- **Constraints**: The value must be one of the following: `cassandra`
|
||||
- **Description**: Type of database to use. We currently support Cassandra and Scylladb. We default to Scylladb.
|
||||
### Key: database.cassandra.contact_points
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: localhost
|
||||
- **Constraints**: None
|
||||
- **Description**: A list of IP addresses or hostnames of the initial nodes (Cassandra/Scylladb cluster nodes) that the client will connect to when establishing a connection with the database. If you're running locally, it should be 'localhost' or 127.0.0.1
|
||||
### Key: database.cassandra.secure_connect_bundle
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Configuration file that contains the necessary security credentials and connection details for securely connecting to a Cassandra database cluster.
|
||||
### Key: database.cassandra.port
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: Port number to connect to the database.
|
||||
### Key: database.cassandra.keyspace
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: clio
|
||||
- **Constraints**: None
|
||||
- **Description**: Keyspace to use for the database.
|
||||
### Key: database.cassandra.replication_factor
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 3
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of replicated nodes for Scylladb. Visit this link for more details : https://university.scylladb.com/courses/scylla-essentials-overview/lessons/high-availability/topic/fault-tolerance-replication-factor/
|
||||
### Key: database.cassandra.table_prefix
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Prefix for Database table names.
|
||||
### Key: database.cassandra.max_write_requests_outstanding
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 10000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of outstanding write requests. Write requests are api calls that write to database
|
||||
### Key: database.cassandra.max_read_requests_outstanding
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 100000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of outstanding read requests, which reads from database
|
||||
### Key: database.cassandra.threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: The number of available CPU cores.
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of threads that will be used for database operations.
|
||||
### Key: database.cassandra.core_connections_per_host
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of core connections per host for Cassandra.
|
||||
### Key: database.cassandra.queue_size_io
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Queue size for I/O operations in Cassandra.
|
||||
### Key: database.cassandra.write_batch_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Batch size for write operations in Cassandra.
|
||||
### Key: database.cassandra.connect_timeout
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The maximum amount of time in seconds the system will wait for a connection to be successfully established with the database.
|
||||
### Key: database.cassandra.request_timeout
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The maximum amount of time in seconds the system will wait for a request to be fetched from database.
|
||||
### Key: database.cassandra.username
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The username used for authenticating with the database.
|
||||
### Key: database.cassandra.password
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The password used for authenticating with the database.
|
||||
### Key: database.cassandra.certfile
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The path to the SSL/TLS certificate file used to establish a secure connection between the client and the Cassandra database.
|
||||
### Key: allow_no_etl
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: If True, no ETL nodes will run with Clio.
|
||||
### Key: etl_sources.[].ip
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be a valid IP address
|
||||
- **Description**: IP address of the ETL source.
|
||||
### Key: etl_sources.[].ws_port
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: WebSocket port of the ETL source.
|
||||
### Key: etl_sources.[].grpc_port
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: gRPC port of the ETL source.
|
||||
### Key: forwarding.cache_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for the forwarding cache used in Rippled communication.
|
||||
### Key: forwarding.request_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 10
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for the forwarding request used in Rippled communication.
|
||||
### Key: rpc.cache_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for RPC requests.
|
||||
### Key: num_markers
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `256`
|
||||
- **Description**: The number of markers is the number of coroutines to download the initial ledger
|
||||
### Key: dos_guard.whitelist.[]
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: List of IP addresses to whitelist for DOS protection.
|
||||
### Key: dos_guard.max_fetches
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1000000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of fetch operations allowed by DOS guard.
|
||||
### Key: dos_guard.max_connections
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of concurrent connections allowed by DOS guard.
|
||||
### Key: dos_guard.max_requests
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of requests allowed by DOS guard.
|
||||
### Key: dos_guard.sweep_interval
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Interval in seconds for DOS guard to sweep/clear its state.
|
||||
### Key: workers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: The number of available CPU cores.
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of threads to process RPC requests.
|
||||
### Key: server.ip
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be a valid IP address
|
||||
- **Description**: IP address of the Clio HTTP server.
|
||||
### Key: server.port
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: Port number of the Clio HTTP server.
|
||||
### Key: server.max_queue_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the server's request queue. Value of 0 is no limit.
|
||||
### Key: server.local_admin
|
||||
- **Required**: False
|
||||
- **Type**: boolean
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Indicates if the server should run with admin privileges. Only one of local_admin or admin_password can be set.
|
||||
### Key: server.admin_password
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Password for Clio admin-only APIs. Only one of local_admin or admin_password can be set.
|
||||
### Key: server.processing_policy
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: parallel
|
||||
- **Constraints**: The value must be one of the following: `parallel, sequent`
|
||||
- **Description**: Could be "sequent" or "parallel". For the sequent policy, requests from a single client
|
||||
connection are processed one by one, with the next request read only after the previous one is processed. For the parallel policy, Clio will accept
|
||||
all requests and process them in parallel, sending a reply for each request as soon as it is ready.
|
||||
### Key: server.parallel_requests_limit
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Optional parameter, used only if processing_strategy `parallel`. It limits the number of requests for a single client connection that are processed in parallel. If not specified, the limit is infinite.
|
||||
### Key: server.ws_max_sending_queue_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1500
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the websocket sending queue.
|
||||
### Key: prometheus.enabled
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: False
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable Prometheus metrics.
|
||||
### Key: prometheus.compress_reply
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: False
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable compression of Prometheus responses.
|
||||
### Key: io_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535`
|
||||
- **Description**: Number of I/O threads. Value cannot be less than 1
|
||||
### Key: subscription_workers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of worker threads or processes that are responsible for managing and processing subscription-based tasks from rippled
|
||||
### Key: graceful_period
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 10
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Number of milliseconds server will wait to shutdown gracefully.
|
||||
### Key: cache.num_diffs
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 32
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of diffs to cache. For more info, consult readme.md in etc
|
||||
### Key: cache.num_markers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 48
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of markers to cache.
|
||||
### Key: cache.num_cursors_from_diff
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Num of cursors that are different.
|
||||
### Key: cache.num_cursors_from_account
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of cursors from an account.
|
||||
### Key: cache.page_fetch_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 512
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Page fetch size for cache operations.
|
||||
### Key: cache.load
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: async
|
||||
- **Constraints**: The value must be one of the following: `sync, async, none`
|
||||
- **Description**: Cache loading strategy ('sync' or 'async').
|
||||
### Key: log_channels.[].channel
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be one of the following: `General, WebServer, Backend, RPC, ETL, Subscriptions, Performance, Migration`
|
||||
- **Description**: Name of the log channel.'RPC', 'ETL', and 'Performance'
|
||||
### Key: log_channels.[].log_level
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be one of the following: `trace, debug, info, warning, error, fatal, count`
|
||||
- **Description**: Log level for the specific log channel.`warning`, `error`, `fatal`
|
||||
### Key: log_level
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: info
|
||||
- **Constraints**: The value must be one of the following: `trace, debug, info, warning, error, fatal, count`
|
||||
- **Description**: General logging level of Clio. This level will be applied to all log channels that do not have an explicitly defined logging level.
|
||||
### Key: log_format
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: %TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%
|
||||
- **Constraints**: None
|
||||
- **Description**: Format string for log messages.
|
||||
### Key: log_to_console
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable logging to console.
|
||||
### Key: log_directory
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Directory path for log files.
|
||||
### Key: log_rotation_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2048
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Log rotation size in megabytes. When the log file reaches this particular size, a new log file starts.
|
||||
### Key: log_directory_max_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 51200
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the log directory in megabytes.
|
||||
### Key: log_rotation_hour_interval
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 12
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Interval in hours for log rotation. If the current log file reaches this value in logging, a new log file starts.
|
||||
### Key: log_tag_style
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: none
|
||||
- **Constraints**: The value must be one of the following: `int, uint, null, none, uuid`
|
||||
- **Description**: Style for log tags.
|
||||
### Key: extractor_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of extractor threads.
|
||||
### Key: read_only
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: Indicates if the server should have read-only privileges.
|
||||
### Key: txn_threshold
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Transaction threshold value.
|
||||
### Key: start_sequence
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Starting ledger index.
|
||||
### Key: finish_sequence
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Ending ledger index.
|
||||
### Key: ssl_cert_file
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Path to the SSL certificate file.
|
||||
### Key: ssl_key_file
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Path to the SSL key file.
|
||||
### Key: api_version.default
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Default API version Clio will run on.
|
||||
### Key: api_version.min
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Minimum API version.
|
||||
### Key: api_version.max
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 3
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Maximum API version.
|
||||
### Key: migration.full_scan_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of threads used to scan the table.
|
||||
### Key: migration.full_scan_jobs
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 4
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of coroutines used to scan the table.
|
||||
### Key: migration.cursors_per_job
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 100
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of cursors each coroutine will scan.
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# Example of clio monitoring infrastructure
|
||||
|
||||
> [!WARNING]
|
||||
> This is only an example of Grafana dashboard for Clio. It was created for demonstration purposes only and may contain errors.
|
||||
> Clio team would not recommend to relate on data from this dashboard or use it for monitoring your Clio instances.
|
||||
|
||||
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
|
||||
|
||||
The structure of the directory:
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
"graphTooltip": 0,
|
||||
"id": 1,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -79,6 +78,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -90,7 +90,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -159,6 +159,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -170,7 +171,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -243,6 +244,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -254,7 +256,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -327,6 +329,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -338,7 +341,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -373,6 +376,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -435,6 +439,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -491,6 +496,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -552,6 +558,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -586,6 +593,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -647,6 +655,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -681,6 +690,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -742,6 +752,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -776,6 +787,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -837,6 +849,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -872,6 +885,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -934,6 +948,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -941,7 +956,7 @@
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rpc_method_duration_us{job=\"clio\"}",
|
||||
"expr": "sum by (method) (increase(rpc_method_duration_us[$__interval]))\n / \n sum by (method,) (increase(rpc_method_total_number{status=\"finished\"}[$__interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{method}}",
|
||||
"range": true,
|
||||
@@ -968,6 +983,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1029,6 +1045,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1063,6 +1080,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1124,6 +1142,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1158,6 +1177,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
@@ -1223,7 +1243,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "10.2.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1296,6 +1316,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1357,6 +1378,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1404,6 +1426,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1465,6 +1488,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1510,6 +1534,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1572,6 +1597,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1590,8 +1616,9 @@
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
"refresh": "5s",
|
||||
"schemaVersion": 39,
|
||||
"schemaVersion": 40,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "migration/MigrationApplication.hpp"
|
||||
#include "util/build/Build.hpp"
|
||||
#include "util/newconfig/ConfigDescription.hpp"
|
||||
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <boost/program_options/parsers.hpp>
|
||||
@@ -29,6 +30,7 @@
|
||||
#include <boost/program_options/variables_map.hpp>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
@@ -42,12 +44,13 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
// clang-format off
|
||||
po::options_description description("Options");
|
||||
description.add_options()
|
||||
("help,h", "print help message and exit")
|
||||
("version,v", "print version and exit")
|
||||
("conf,c", po::value<std::string>()->default_value(kDEFAULT_CONFIG_PATH), "configuration file")
|
||||
("help,h", "Print help message and exit")
|
||||
("version,v", "Print version and exit")
|
||||
("conf,c", po::value<std::string>()->default_value(kDEFAULT_CONFIG_PATH), "Configuration file")
|
||||
("ng-web-server,w", "Use ng-web-server")
|
||||
("migrate", po::value<std::string>(), "start migration helper")
|
||||
("migrate", po::value<std::string>(), "Start migration helper")
|
||||
("verify", "Checks the validity of config values")
|
||||
("config-description,d", po::value<std::string>(), "Generate config description markdown file")
|
||||
;
|
||||
// clang-format on
|
||||
po::positional_options_description positional;
|
||||
@@ -67,6 +70,17 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
}
|
||||
|
||||
if (parsed.count("config-description") != 0u) {
|
||||
std::filesystem::path const filePath = parsed["config-description"].as<std::string>();
|
||||
|
||||
auto const res = util::config::ClioConfigDescription::generateConfigDescriptionToFile(filePath);
|
||||
if (res.has_value())
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
|
||||
std::cerr << res.error().error << std::endl;
|
||||
return Action{Action::Exit{EXIT_FAILURE}};
|
||||
}
|
||||
|
||||
auto configPath = parsed["conf"].as<std::string>();
|
||||
|
||||
if (parsed.count("migrate") != 0u) {
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include "app/WebHandlers.hpp"
|
||||
#include "data/AmendmentCenter.hpp"
|
||||
#include "data/BackendFactory.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/NetworkValidatedLedgers.hpp"
|
||||
@@ -102,9 +103,12 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
auto whitelistHandler = web::dosguard::WhitelistHandler{config_};
|
||||
auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler};
|
||||
auto sweepHandler = web::dosguard::IntervalSweepHandler{config_, ioc, dosGuard};
|
||||
auto cache = data::LedgerCache{};
|
||||
|
||||
// Interface to the database
|
||||
auto backend = data::makeBackend(config_);
|
||||
auto backend = data::makeBackend(config_, cache);
|
||||
|
||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||
|
||||
{
|
||||
auto const migrationInspector = migration::makeMigrationInspector(config_, backend);
|
||||
@@ -117,7 +121,7 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
}
|
||||
|
||||
// Manages clients subscribed to streams
|
||||
auto subscriptions = feed::SubscriptionManager::makeSubscriptionManager(config_, backend);
|
||||
auto subscriptions = feed::SubscriptionManager::makeSubscriptionManager(config_, backend, amendmentCenter);
|
||||
|
||||
// Tracks which ledgers have been validated by the network
|
||||
auto ledgers = etl::NetworkValidatedLedgers::makeValidatedLedgers();
|
||||
@@ -133,7 +137,7 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
|
||||
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
||||
auto counters = rpc::Counters::makeCounters(workQueue);
|
||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||
|
||||
auto const handlerProvider = std::make_shared<rpc::impl::ProductionHandlerProvider const>(
|
||||
config_, backend, subscriptions, balancer, etl, amendmentCenter, counters
|
||||
);
|
||||
|
||||
@@ -134,6 +134,9 @@ struct Amendments {
|
||||
REGISTER(Credentials);
|
||||
REGISTER(DynamicNFT);
|
||||
REGISTER(PermissionedDomains);
|
||||
REGISTER(fixInvalidTxFlags);
|
||||
REGISTER(fixFrozenLPTokenTransfer);
|
||||
REGISTER(DeepFreeze);
|
||||
|
||||
// Obsolete but supported by libxrpl
|
||||
REGISTER(CryptoConditionsSuite);
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/CassandraBackend.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
@@ -38,10 +39,11 @@ namespace data {
|
||||
* @brief A factory function that creates the backend based on a config.
|
||||
*
|
||||
* @param config The clio config to use
|
||||
* @param cache The ledger cache to use
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
makeBackend(util::config::ClioConfigDefinition const& config)
|
||||
makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
|
||||
{
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
@@ -53,7 +55,9 @@ makeBackend(util::config::ClioConfigDefinition const& config)
|
||||
|
||||
if (boost::iequals(type, "cassandra")) {
|
||||
auto const cfg = config.getObject("database." + type);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(
|
||||
data::cassandra::SettingsProvider{cfg}, cache, readOnly
|
||||
);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
|
||||
@@ -87,7 +87,7 @@ BackendInterface::fetchLedgerObject(
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
auto obj = cache_.get().get(key, sequence);
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return obj;
|
||||
@@ -126,7 +126,7 @@ BackendInterface::fetchLedgerObjects(
|
||||
results.resize(keys.size());
|
||||
std::vector<ripple::uint256> misses;
|
||||
for (size_t i = 0; i < keys.size(); ++i) {
|
||||
auto obj = cache_.get(keys[i], sequence);
|
||||
auto obj = cache_.get().get(keys[i], sequence);
|
||||
if (obj) {
|
||||
results[i] = *obj;
|
||||
} else {
|
||||
@@ -156,7 +156,7 @@ BackendInterface::fetchSuccessorKey(
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
auto succ = cache_.get().getSuccessor(key, ledgerSequence);
|
||||
if (succ) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
} else {
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
@@ -139,18 +140,27 @@ class BackendInterface {
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range_;
|
||||
LedgerCache cache_;
|
||||
std::optional<etl::CorruptionDetector<LedgerCache>> corruptionDetector_;
|
||||
std::reference_wrapper<LedgerCacheInterface> cache_;
|
||||
std::optional<etl::CorruptionDetector> corruptionDetector_;
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
/**
|
||||
* @brief Construct a new backend interface instance.
|
||||
*
|
||||
* @param cache The ledger cache to use
|
||||
*/
|
||||
BackendInterface(LedgerCacheInterface& cache) : cache_{cache}
|
||||
{
|
||||
}
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
// TODO: Remove this hack. Cache should not be exposed thru BackendInterface
|
||||
// TODO https://github.com/XRPLF/clio/issues/1956: Remove this hack once old ETL is removed.
|
||||
// Cache should not be exposed thru BackendInterface
|
||||
|
||||
/**
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
LedgerCacheInterface const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
@@ -159,7 +169,7 @@ public:
|
||||
/**
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
LedgerCacheInterface&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
@@ -171,7 +181,7 @@ public:
|
||||
* @param detector The corruption detector to set
|
||||
*/
|
||||
void
|
||||
setCorruptionDetector(etl::CorruptionDetector<LedgerCache> detector)
|
||||
setCorruptionDetector(etl::CorruptionDetector detector)
|
||||
{
|
||||
corruptionDetector_ = std::move(detector);
|
||||
}
|
||||
@@ -638,6 +648,14 @@ public:
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new account transaction.
|
||||
*
|
||||
* @param record An object representing the account transaction
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransaction(AccountTransactionsData record) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write NFTs transactions.
|
||||
*
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
@@ -36,6 +37,7 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
@@ -44,6 +46,7 @@
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/nft.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
@@ -87,10 +90,12 @@ public:
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
*
|
||||
* @param settingsProvider The settings provider to use
|
||||
* @param cache The ledger cache to use
|
||||
* @param readOnly Whether the database should be in readonly mode
|
||||
*/
|
||||
BasicCassandraBackend(SettingsProviderType settingsProvider, bool readOnly)
|
||||
: settingsProvider_{std::move(settingsProvider)}
|
||||
BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
|
||||
: BackendInterface(cache)
|
||||
, settingsProvider_{std::move(settingsProvider)}
|
||||
, schema_{settingsProvider_}
|
||||
, handle_{settingsProvider_.getSettings()}
|
||||
, executor_{settingsProvider_.getSettings(), handle_}
|
||||
@@ -113,13 +118,24 @@ public:
|
||||
try {
|
||||
schema_.prepareStatements(handle_);
|
||||
} catch (std::runtime_error const& ex) {
|
||||
LOG(log_.error()) << "Failed to prepare the statements: " << ex.what() << "; readOnly: " << readOnly;
|
||||
throw;
|
||||
auto const error = fmt::format(
|
||||
"Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
|
||||
"node with write access to DB should be started first.",
|
||||
ex.what(),
|
||||
readOnly
|
||||
);
|
||||
LOG(log_.error()) << error;
|
||||
throw std::runtime_error(error);
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Created (revamped) CassandraBackend";
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Move constructor is deleted because handle_ is shared by reference with executor
|
||||
*/
|
||||
BasicCassandraBackend(BasicCassandraBackend&&) = delete;
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
@@ -891,19 +907,31 @@ public:
|
||||
statements.reserve(data.size() * 10); // assume 10 transactions avg
|
||||
|
||||
for (auto& record : data) {
|
||||
std::transform(
|
||||
std::begin(record.accounts),
|
||||
std::end(record.accounts),
|
||||
std::back_inserter(statements),
|
||||
[this, &record](auto&& account) {
|
||||
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeAccountTransaction(AccountTransactionsData record) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(record.accounts.size());
|
||||
|
||||
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
@@ -914,7 +942,7 @@ public:
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
|
||||
std::transform(std::cbegin(data), std::cend(data), std::back_inserter(statements), [this](auto const& record) {
|
||||
std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
|
||||
return schema_->insertNFTTx.bind(
|
||||
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
|
||||
);
|
||||
@@ -984,7 +1012,7 @@ public:
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
for (auto [mptId, holder] : data)
|
||||
statements.push_back(schema_->insertMPTHolder.bind(std::move(mptId), std::move(holder)));
|
||||
statements.push_back(schema_->insertMPTHolder.bind(mptId, holder));
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ struct AccountTransactionsData {
|
||||
* @param meta The transaction metadata
|
||||
* @param txHash The transaction hash
|
||||
*/
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
|
||||
AccountTransactionsData(ripple::TxMeta const& meta, ripple::uint256 const& txHash)
|
||||
: accounts(meta.getAffectedAccounts())
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include "data/LedgerCache.hpp"
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
@@ -87,6 +88,42 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCache::update(std::vector<etlng::model::Object> const& objs, uint32_t seq)
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
|
||||
std::scoped_lock const lck{mtx_};
|
||||
if (seq > latestSeq_) {
|
||||
ASSERT(
|
||||
seq == latestSeq_ + 1 || latestSeq_ == 0,
|
||||
"New sequence must be either next or first. seq = {}, latestSeq_ = {}",
|
||||
seq,
|
||||
latestSeq_
|
||||
);
|
||||
latestSeq_ = seq;
|
||||
}
|
||||
|
||||
deleted_.clear(); // previous update's deletes no longer needed
|
||||
|
||||
for (auto const& obj : objs) {
|
||||
if (!obj.data.empty()) {
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq)
|
||||
e = {.seq = seq, .blob = obj.data};
|
||||
} else {
|
||||
if (map_.contains(obj.key))
|
||||
deleted_[obj.key] = map_[obj.key];
|
||||
|
||||
map_.erase(obj.key);
|
||||
if (!full_)
|
||||
deletes_.insert(obj.key);
|
||||
}
|
||||
}
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
@@ -139,6 +176,29 @@ LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
LedgerCache::getDeleted(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (disabled_)
|
||||
return std::nullopt;
|
||||
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
return std::nullopt;
|
||||
|
||||
++objectReqCounter_.get();
|
||||
|
||||
auto e = deleted_.find(key);
|
||||
if (e == deleted_.end())
|
||||
return std::nullopt;
|
||||
|
||||
if (seq < e->second.seq)
|
||||
return std::nullopt;
|
||||
|
||||
++objectHitCounter_.get();
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCache::setDisabled()
|
||||
{
|
||||
|
||||
@@ -19,7 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/prometheus/Bool.hpp"
|
||||
#include "util/prometheus/Counter.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
@@ -27,7 +30,6 @@
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/hardened_hash.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -43,7 +45,7 @@ namespace data {
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCache {
|
||||
class LedgerCache : public LedgerCacheInterface {
|
||||
struct CacheEntry {
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
@@ -72,120 +74,70 @@ class LedgerCache {
|
||||
)};
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
std::map<ripple::uint256, CacheEntry> deleted_;
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::condition_variable_any cv_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
util::prometheus::Bool full_{PrometheusService::boolMetric(
|
||||
"ledger_cache_full",
|
||||
util::prometheus::Labels{},
|
||||
"Whether ledger cache full or not"
|
||||
)};
|
||||
util::prometheus::Bool disabled_{PrometheusService::boolMetric(
|
||||
"ledger_cache_disabled",
|
||||
util::prometheus::Labels{},
|
||||
"Whether ledger cache is disabled or not"
|
||||
)};
|
||||
|
||||
// temporary set to prevent background thread from writing already deleted data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param objs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
void
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false);
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground) override;
|
||||
|
||||
void
|
||||
update(std::vector<etlng::model::Object> const& objs, uint32_t seq) override;
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
get(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
std::optional<Blob>
|
||||
getDeleted(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached successor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached predcessor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Disables the cache.
|
||||
*/
|
||||
void
|
||||
setDisabled();
|
||||
setDisabled() override;
|
||||
|
||||
/**
|
||||
* @return true if the cache is disabled; false otherwise
|
||||
*/
|
||||
bool
|
||||
isDisabled() const;
|
||||
isDisabled() const override;
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
|
||||
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
|
||||
* config).
|
||||
*/
|
||||
void
|
||||
setFull();
|
||||
setFull() override;
|
||||
|
||||
/**
|
||||
* @return The latest ledger sequence for which cache is available.
|
||||
*/
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
latestLedgerSequence() const override;
|
||||
|
||||
/**
|
||||
* @return true if the cache has all data for the most recent ledger; false otherwise
|
||||
*/
|
||||
bool
|
||||
isFull() const;
|
||||
isFull() const override;
|
||||
|
||||
/**
|
||||
* @return The total size of the cache.
|
||||
*/
|
||||
size_t
|
||||
size() const;
|
||||
size() const override;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting an object in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
getObjectHitRate() const override;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
getSuccessorHitRate() const override;
|
||||
|
||||
/**
|
||||
* @brief Waits until the cache contains a specific sequence.
|
||||
*
|
||||
* @param seq The sequence to wait for
|
||||
*/
|
||||
void
|
||||
waitUntilCacheContainsSeq(uint32_t seq);
|
||||
waitUntilCacheContainsSeq(uint32_t seq) override;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
|
||||
173
src/data/LedgerCacheInterface.hpp
Normal file
173
src/data/LedgerCacheInterface.hpp
Normal file
@@ -0,0 +1,173 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/hardened_hash.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCacheInterface {
|
||||
public:
|
||||
virtual ~LedgerCacheInterface() = default;
|
||||
LedgerCacheInterface() = default;
|
||||
LedgerCacheInterface(LedgerCacheInterface&&) = delete;
|
||||
LedgerCacheInterface(LedgerCacheInterface const&) = delete;
|
||||
LedgerCacheInterface&
|
||||
operator=(LedgerCacheInterface&&) = delete;
|
||||
LedgerCacheInterface&
|
||||
operator=(LedgerCacheInterface const&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param objs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
virtual void
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false) = 0;
|
||||
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param objs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
*/
|
||||
virtual void
|
||||
update(std::vector<etlng::model::Object> const& objs, uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch a recently deleted object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in deleted cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
getDeleted(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached successor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached predcessor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Disables the cache.
|
||||
*/
|
||||
virtual void
|
||||
setDisabled() = 0;
|
||||
|
||||
/**
|
||||
* @return true if the cache is disabled; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isDisabled() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
|
||||
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
|
||||
* config).
|
||||
*/
|
||||
virtual void
|
||||
setFull() = 0;
|
||||
|
||||
/**
|
||||
* @return The latest ledger sequence for which cache is available.
|
||||
*/
|
||||
virtual uint32_t
|
||||
latestLedgerSequence() const = 0;
|
||||
|
||||
/**
|
||||
* @return true if the cache has all data for the most recent ledger; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isFull() const = 0;
|
||||
|
||||
/**
|
||||
* @return The total size of the cache.
|
||||
*/
|
||||
virtual size_t
|
||||
size() const = 0;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting an object in the cache versus missing it.
|
||||
*/
|
||||
virtual float
|
||||
getObjectHitRate() const = 0;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
|
||||
*/
|
||||
virtual float
|
||||
getSuccessorHitRate() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Waits until the cache contains a specific sequence.
|
||||
*
|
||||
* @param seq The sequence to wait for
|
||||
*/
|
||||
virtual void
|
||||
waitUntilCacheContainsSeq(uint32_t seq) = 0;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "etl/CacheLoaderSettings.hpp"
|
||||
#include "etl/impl/CacheLoader.hpp"
|
||||
#include "etl/impl/CursorFromAccountProvider.hpp"
|
||||
@@ -44,13 +45,13 @@ namespace etl {
|
||||
* @tparam CursorProviderType The type of the cursor provider to use
|
||||
* @tparam ExecutionContextType The type of the execution context to use
|
||||
*/
|
||||
template <typename CacheType, typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
template <typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
class CacheLoader {
|
||||
using CacheLoaderType = impl::CacheLoaderImpl<CacheType>;
|
||||
using CacheLoaderType = impl::CacheLoaderImpl<data::LedgerCacheInterface>;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
|
||||
CacheLoaderSettings settings_;
|
||||
ExecutionContextType ctx_;
|
||||
@@ -67,7 +68,7 @@ public:
|
||||
CacheLoader(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
std::shared_ptr<BackendInterface> const& backend,
|
||||
CacheType& cache
|
||||
data::LedgerCacheInterface& cache
|
||||
)
|
||||
: backend_{backend}, cache_{cache}, settings_{makeCacheLoaderSettings(config)}, ctx_{settings_.numThreads}
|
||||
{
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
@@ -31,10 +32,9 @@ namespace etl {
|
||||
*
|
||||
* @tparam CacheType The type of the cache to disable on corruption
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class CorruptionDetector {
|
||||
std::reference_wrapper<SystemState> state_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
@@ -45,7 +45,8 @@ public:
|
||||
* @param state The system state
|
||||
* @param cache The cache to disable on corruption
|
||||
*/
|
||||
CorruptionDetector(SystemState& state, CacheType& cache) : state_{std::ref(state)}, cache_{std::ref(cache)}
|
||||
CorruptionDetector(SystemState& state, data::LedgerCacheInterface& cache)
|
||||
: state_{std::ref(state)}, cache_{std::ref(cache)}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#include "etl/ETLService.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -285,6 +284,6 @@ ETLService::ETLService(
|
||||
txnThreshold_ = config.get<std::size_t>("txn_threshold");
|
||||
|
||||
// This should probably be done in the backend factory but we don't have state available until here
|
||||
backend_->setCorruptionDetector(CorruptionDetector<data::LedgerCache>{state_, backend->cache()});
|
||||
backend_->setCorruptionDetector(CorruptionDetector{state_, backend->cache()});
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/CacheLoader.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
@@ -86,12 +85,11 @@ class ETLService : public ETLServiceTag {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using DataPipeType = etl::impl::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheType = data::LedgerCache;
|
||||
using CacheLoaderType = etl::CacheLoader<CacheType>;
|
||||
using CacheLoaderType = etl::CacheLoader<>;
|
||||
using LedgerFetcherType = etl::impl::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = etl::impl::Extractor<DataPipeType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::impl::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher<CacheType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher;
|
||||
using AmendmentBlockHandlerType = etl::impl::AmendmentBlockHandler;
|
||||
using TransformerType =
|
||||
etl::impl::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
@@ -138,6 +136,11 @@ public:
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Move constructor is deleted because ETL service shares its fields by reference
|
||||
*/
|
||||
ETLService(ETLService&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief A factory function to spawn new ETLService instances.
|
||||
*
|
||||
|
||||
@@ -31,15 +31,12 @@
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::optional<ETLState>
|
||||
tag_invoke(boost::json::value_to_tag<std::optional<ETLState>>, boost::json::value const& jv)
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv)
|
||||
{
|
||||
ETLState state;
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
if (jsonObject.contains(JS(error)))
|
||||
return std::nullopt;
|
||||
|
||||
if (jsonObject.contains(JS(result)) && jsonObject.at(JS(result)).as_object().contains(JS(info))) {
|
||||
auto const rippledInfo = jsonObject.at(JS(result)).as_object().at(JS(info)).as_object();
|
||||
if (rippledInfo.contains(JS(network_id)))
|
||||
|
||||
@@ -20,12 +20,14 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -54,8 +56,9 @@ struct ETLState {
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
if (serverInfoRippled)
|
||||
return boost::json::value_to<std::optional<ETLState>>(boost::json::value(*serverInfoRippled));
|
||||
if (serverInfoRippled && not serverInfoRippled->contains(JS(error))) {
|
||||
return boost::json::value_to<ETLState>(boost::json::value(*serverInfoRippled));
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -67,7 +70,7 @@ struct ETLState {
|
||||
* @param jv The json value to convert
|
||||
* @return The ETLState
|
||||
*/
|
||||
std::optional<ETLState>
|
||||
tag_invoke(boost::json::value_to_tag<std::optional<ETLState>>, boost::json::value const& jv);
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv);
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
#include "etl/NetworkValidatedLedgers.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
@@ -35,25 +37,27 @@ NetworkValidatedLedgers::makeValidatedLedgers()
|
||||
void
|
||||
NetworkValidatedLedgers::push(uint32_t idx)
|
||||
{
|
||||
std::lock_guard const lck(m_);
|
||||
if (!max_ || idx > *max_)
|
||||
max_ = idx;
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (!latest_ || idx > *latest_)
|
||||
latest_ = idx;
|
||||
|
||||
notificationChannel_(idx);
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
std::optional<uint32_t>
|
||||
NetworkValidatedLedgers::getMostRecent()
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
cv_.wait(lck, [this]() { return max_; });
|
||||
return max_;
|
||||
std::unique_lock lck(mtx_);
|
||||
cv_.wait(lck, [this]() { return latest_; });
|
||||
return latest_;
|
||||
}
|
||||
|
||||
bool
|
||||
NetworkValidatedLedgers::waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs)
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
|
||||
std::unique_lock lck(mtx_);
|
||||
auto pred = [sequence, this]() -> bool { return (latest_ && sequence <= *latest_); };
|
||||
if (maxWaitMs) {
|
||||
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
|
||||
} else {
|
||||
@@ -62,4 +66,10 @@ NetworkValidatedLedgers::waitUntilValidatedByNetwork(uint32_t sequence, std::opt
|
||||
return pred();
|
||||
}
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
NetworkValidatedLedgers::subscribe(SignalType::slot_type const& subscriber)
|
||||
{
|
||||
return notificationChannel_.connect(subscriber);
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -21,6 +21,10 @@
|
||||
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <boost/signals2/signal.hpp>
|
||||
#include <boost/signals2/variadic_signal.hpp>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
@@ -38,12 +42,13 @@ namespace etl {
|
||||
* remains stopped for the rest of its lifetime.
|
||||
*/
|
||||
class NetworkValidatedLedgers : public NetworkValidatedLedgersInterface {
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
std::optional<uint32_t> latest_; // currently known latest sequence validated by network
|
||||
|
||||
mutable std::mutex m_;
|
||||
mutable std::mutex mtx_;
|
||||
std::condition_variable cv_;
|
||||
|
||||
SignalType notificationChannel_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief A factory function for NetworkValidatedLedgers
|
||||
@@ -81,6 +86,9 @@ public:
|
||||
*/
|
||||
bool
|
||||
waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs = {}) final;
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) override;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,6 +20,10 @@
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <boost/signals2/signal.hpp>
|
||||
#include <boost/signals2/variadic_signal.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
namespace etl {
|
||||
@@ -29,6 +33,8 @@ namespace etl {
|
||||
*/
|
||||
class NetworkValidatedLedgersInterface {
|
||||
public:
|
||||
using SignalType = boost::signals2::signal<void(uint32_t)>;
|
||||
|
||||
virtual ~NetworkValidatedLedgersInterface() = default;
|
||||
|
||||
/**
|
||||
@@ -46,7 +52,7 @@ public:
|
||||
*
|
||||
* @return Sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
*/
|
||||
virtual std::optional<uint32_t>
|
||||
[[nodiscard]] virtual std::optional<uint32_t>
|
||||
getMostRecent() = 0;
|
||||
|
||||
/**
|
||||
@@ -59,6 +65,15 @@ public:
|
||||
*/
|
||||
virtual bool
|
||||
waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs = {}) = 0;
|
||||
|
||||
/**
|
||||
* @brief Allows clients to get notified when a new validated ledger becomes known to Clio
|
||||
*
|
||||
* @param subscriber The slot to connect
|
||||
* @return A connection object that automatically disconnects the subscription once destroyed
|
||||
*/
|
||||
[[nodiscard]] virtual boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) = 0;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -121,7 +121,7 @@ public:
|
||||
|
||||
LOG(log_.trace()) << "Inserting transaction = " << sttx.getTransactionID();
|
||||
|
||||
ripple::TxMeta txMeta{sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
|
||||
ripple::TxMeta const txMeta{sttx.getTransactionID(), ledger.seq, txn.metadata_blob()};
|
||||
|
||||
auto const [nftTxs, maybeNFT] = getNFTDataFromTx(txMeta, sttx);
|
||||
result.nfTokenTxData.insert(result.nfTokenTxData.end(), nftTxs.begin(), nftTxs.end());
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -38,6 +39,7 @@
|
||||
#include <xrpl/protocol/STObject.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -64,14 +66,13 @@ namespace etl::impl {
|
||||
* includes reading all of the transactions from the database) is done from the application wide asio io_service, and a
|
||||
* strand is used to ensure ledgers are published in order.
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class LedgerPublisher {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> publishStrand_;
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions_;
|
||||
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
||||
|
||||
@@ -94,7 +95,7 @@ public:
|
||||
LedgerPublisher(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
CacheType& cache,
|
||||
data::LedgerCacheInterface& cache,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
SystemState const& state
|
||||
)
|
||||
@@ -205,7 +206,7 @@ public:
|
||||
subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size());
|
||||
|
||||
// order with transaction index
|
||||
std::sort(transactions.begin(), transactions.end(), [](auto const& t1, auto const& t2) {
|
||||
std::ranges::sort(transactions, [](auto const& t1, auto const& t2) {
|
||||
ripple::SerialIter iter1{t1.metadata.data(), t1.metadata.size()};
|
||||
ripple::STObject const object1(iter1, ripple::sfMetadata);
|
||||
ripple::SerialIter iter2{t2.metadata.data(), t2.metadata.size()};
|
||||
|
||||
@@ -1,8 +1,18 @@
|
||||
add_library(clio_etlng)
|
||||
|
||||
target_sources(
|
||||
clio_etlng PRIVATE impl/AmendmentBlockHandler.cpp impl/AsyncGrpcCall.cpp impl/Extraction.cpp impl/GrpcSource.cpp
|
||||
impl/Loading.cpp impl/TaskManager.cpp
|
||||
clio_etlng
|
||||
PRIVATE impl/AmendmentBlockHandler.cpp
|
||||
impl/AsyncGrpcCall.cpp
|
||||
impl/Extraction.cpp
|
||||
impl/GrpcSource.cpp
|
||||
impl/Loading.cpp
|
||||
impl/Monitor.cpp
|
||||
impl/TaskManager.cpp
|
||||
impl/ext/Cache.cpp
|
||||
impl/ext/Core.cpp
|
||||
impl/ext/NFT.cpp
|
||||
impl/ext/Successor.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(clio_etlng PUBLIC clio_data)
|
||||
|
||||
67
src/etlng/MonitorInterface.hpp
Normal file
67
src/etlng/MonitorInterface.hpp
Normal file
@@ -0,0 +1,67 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <boost/signals2/signal.hpp>
|
||||
#include <boost/signals2/variadic_signal.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief An interface for the monitor service
|
||||
* An implementation of this service is responsible for periodically checking various datasources to detect newly
|
||||
* ingested ledgers.
|
||||
*/
|
||||
class MonitorInterface {
|
||||
public:
|
||||
static constexpr auto kDEFAULT_REPEAT_INTERVAL = std::chrono::seconds{1};
|
||||
using SignalType = boost::signals2::signal<void(uint32_t)>;
|
||||
|
||||
virtual ~MonitorInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Allows clients to get notified when a new ledger becomes available in Clio's database
|
||||
*
|
||||
* @param subscriber The slot to connect
|
||||
* @return A connection object that automatically disconnects the subscription once destroyed
|
||||
*/
|
||||
[[nodiscard]] virtual boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) = 0;
|
||||
|
||||
/**
|
||||
* @brief Run the monitor service
|
||||
*
|
||||
* @param repeatInterval The interval between attempts to check the database for new ledgers
|
||||
*/
|
||||
virtual void
|
||||
run(std::chrono::steady_clock::duration repeatInterval = kDEFAULT_REPEAT_INTERVAL) = 0;
|
||||
|
||||
/**
|
||||
* @brief Stops the monitor service
|
||||
*/
|
||||
virtual void
|
||||
stop() = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
99
src/etlng/impl/Monitor.cpp
Normal file
99
src/etlng/impl/Monitor.cpp
Normal file
@@ -0,0 +1,99 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
Monitor::Monitor(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence
|
||||
)
|
||||
: strand_(ctx.makeStrand())
|
||||
, backend_(std::move(backend))
|
||||
, validatedLedgers_(std::move(validatedLedgers))
|
||||
, nextSequence_(startSequence)
|
||||
{
|
||||
}
|
||||
|
||||
Monitor::~Monitor()
|
||||
{
|
||||
stop();
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::run(std::chrono::steady_clock::duration repeatInterval)
|
||||
{
|
||||
ASSERT(not repeatedTask_.has_value(), "Monitor attempted to run more than once");
|
||||
LOG(log_.debug()) << "Starting monitor";
|
||||
|
||||
repeatedTask_ = strand_.executeRepeatedly(repeatInterval, std::bind_front(&Monitor::doWork, this));
|
||||
subscription_ = validatedLedgers_->subscribe(std::bind_front(&Monitor::onNextSequence, this));
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::stop()
|
||||
{
|
||||
if (repeatedTask_.has_value())
|
||||
repeatedTask_->abort();
|
||||
|
||||
repeatedTask_ = std::nullopt;
|
||||
}
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
Monitor::subscribe(SignalType::slot_type const& subscriber)
|
||||
{
|
||||
return notificationChannel_.connect(subscriber);
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::onNextSequence(uint32_t seq)
|
||||
{
|
||||
LOG(log_.debug()) << "rippled published sequence " << seq;
|
||||
repeatedTask_->invoke(); // force-invoke immediately
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::doWork()
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng) {
|
||||
while (rng->maxSequence >= nextSequence_)
|
||||
notificationChannel_(nextSequence_++);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
80
src/etlng/impl/Monitor.hpp
Normal file
80
src/etlng/impl/Monitor.hpp
Normal file
@@ -0,0 +1,80 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/async/AnyStrand.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class Monitor : public MonitorInterface {
|
||||
util::async::AnyStrand strand_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers_;
|
||||
|
||||
uint32_t nextSequence_;
|
||||
std::optional<util::async::AnyOperation<void>> repeatedTask_;
|
||||
std::optional<boost::signals2::scoped_connection> subscription_; // network validated ledgers subscription
|
||||
|
||||
SignalType notificationChannel_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
Monitor(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence
|
||||
);
|
||||
~Monitor() override;
|
||||
|
||||
void
|
||||
run(std::chrono::steady_clock::duration repeatInterval) override;
|
||||
|
||||
void
|
||||
stop() override;
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) override;
|
||||
|
||||
private:
|
||||
void
|
||||
onNextSequence(uint32_t seq);
|
||||
|
||||
void
|
||||
doWork();
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
59
src/etlng/impl/ext/Cache.cpp
Normal file
59
src/etlng/impl/ext/Cache.cpp
Normal file
@@ -0,0 +1,59 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etlng/impl/ext/Cache.hpp"
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
CacheExt::CacheExt(data::LedgerCacheInterface& cache) : cache_(cache)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
CacheExt::onLedgerData(model::LedgerData const& data) const
|
||||
{
|
||||
cache_.get().update(data.objects, data.seq);
|
||||
LOG(log_.trace()) << "got data. objects cnt = " << data.objects.size();
|
||||
}
|
||||
|
||||
void
|
||||
CacheExt::onInitialData(model::LedgerData const& data) const
|
||||
{
|
||||
LOG(log_.trace()) << "got initial data. objects cnt = " << data.objects.size();
|
||||
cache_.get().update(data.objects, data.seq);
|
||||
cache_.get().setFull();
|
||||
}
|
||||
|
||||
void
|
||||
CacheExt::onInitialObjects(uint32_t seq, std::vector<model::Object> const& objs, [[maybe_unused]] std::string lastKey)
|
||||
const
|
||||
{
|
||||
LOG(log_.trace()) << "got initial objects cnt = " << objs.size();
|
||||
cache_.get().update(objs, seq);
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
51
src/etlng/impl/ext/Cache.hpp
Normal file
51
src/etlng/impl/ext/Cache.hpp
Normal file
@@ -0,0 +1,51 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class CacheExt {
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
CacheExt(data::LedgerCacheInterface& cache);
|
||||
|
||||
void
|
||||
onLedgerData(model::LedgerData const& data) const;
|
||||
|
||||
void
|
||||
onInitialData(model::LedgerData const& data) const;
|
||||
|
||||
void
|
||||
onInitialObjects(uint32_t seq, std::vector<model::Object> const& objs, [[maybe_unused]] std::string lastKey) const;
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
83
src/etlng/impl/ext/Core.cpp
Normal file
83
src/etlng/impl/ext/Core.cpp
Normal file
@@ -0,0 +1,83 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etlng/impl/ext/Core.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
CoreExt::CoreExt(std::shared_ptr<BackendInterface> backend) : backend_(std::move(backend))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
CoreExt::onLedgerData(model::LedgerData const& data) const
|
||||
{
|
||||
LOG(log_.debug()) << "Loading ledger data for " << data.seq;
|
||||
backend_->writeLedger(data.header, auto{data.rawHeader});
|
||||
insertTransactions(data);
|
||||
}
|
||||
|
||||
void
|
||||
CoreExt::onInitialData(model::LedgerData const& data) const
|
||||
{
|
||||
LOG(log_.info()) << "Loading initial ledger data for " << data.seq;
|
||||
backend_->writeLedger(data.header, auto{data.rawHeader});
|
||||
insertTransactions(data);
|
||||
}
|
||||
|
||||
void
|
||||
CoreExt::onInitialObject(uint32_t seq, model::Object const& obj) const
|
||||
{
|
||||
LOG(log_.trace()) << "got initial OBJ = " << obj.key << " for seq " << seq;
|
||||
backend_->writeLedgerObject(auto{obj.keyRaw}, seq, auto{obj.dataRaw});
|
||||
}
|
||||
|
||||
void
|
||||
CoreExt::onObject(uint32_t seq, model::Object const& obj) const
|
||||
{
|
||||
LOG(log_.trace()) << "got OBJ = " << obj.key << " for seq " << seq;
|
||||
backend_->writeLedgerObject(auto{obj.keyRaw}, seq, auto{obj.dataRaw});
|
||||
}
|
||||
|
||||
void
|
||||
CoreExt::insertTransactions(model::LedgerData const& data) const
|
||||
{
|
||||
for (auto const& txn : data.transactions) {
|
||||
LOG(log_.trace()) << "Inserting transaction = " << txn.sttx.getTransactionID();
|
||||
|
||||
backend_->writeAccountTransaction({txn.meta, txn.sttx.getTransactionID()});
|
||||
backend_->writeTransaction(
|
||||
auto{txn.key},
|
||||
data.seq,
|
||||
data.header.closeTime.time_since_epoch().count(), // This is why we can't use 'onTransaction'
|
||||
auto{txn.raw},
|
||||
auto{txn.metaRaw}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
58
src/etlng/impl/ext/Core.hpp
Normal file
58
src/etlng/impl/ext/Core.hpp
Normal file
@@ -0,0 +1,58 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class CoreExt {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
CoreExt(std::shared_ptr<BackendInterface> backend);
|
||||
|
||||
void
|
||||
onLedgerData(model::LedgerData const& data) const;
|
||||
|
||||
void
|
||||
onInitialData(model::LedgerData const& data) const;
|
||||
|
||||
void
|
||||
onInitialObject(uint32_t seq, model::Object const& obj) const;
|
||||
|
||||
void
|
||||
onObject(uint32_t seq, model::Object const& obj) const;
|
||||
|
||||
private:
|
||||
void
|
||||
insertTransactions(model::LedgerData const& data) const;
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
77
src/etlng/impl/ext/NFT.cpp
Normal file
77
src/etlng/impl/ext/NFT.cpp
Normal file
@@ -0,0 +1,77 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etlng/impl/ext/NFT.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "etl/NFTHelpers.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
NFTExt::NFTExt(std::shared_ptr<BackendInterface> backend) : backend_(std::move(backend))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
NFTExt::onLedgerData(model::LedgerData const& data) const
|
||||
{
|
||||
writeNFTs(data);
|
||||
}
|
||||
|
||||
void
|
||||
NFTExt::onInitialObject(uint32_t seq, model::Object const& obj) const
|
||||
{
|
||||
LOG(log_.trace()) << "got initial object with key = " << obj.key;
|
||||
backend_->writeNFTs(etl::getNFTDataFromObj(seq, obj.keyRaw, obj.dataRaw));
|
||||
}
|
||||
|
||||
void
|
||||
NFTExt::onInitialData(model::LedgerData const& data) const
|
||||
{
|
||||
LOG(log_.trace()) << "got initial TXS cnt = " << data.transactions.size();
|
||||
writeNFTs(data);
|
||||
}
|
||||
|
||||
void
|
||||
NFTExt::writeNFTs(model::LedgerData const& data) const
|
||||
{
|
||||
std::vector<NFTsData> nfts;
|
||||
std::vector<NFTTransactionsData> nftTxs;
|
||||
|
||||
for (auto const& tx : data.transactions) {
|
||||
auto const [txs, maybeNFT] = etl::getNFTDataFromTx(tx.meta, tx.sttx);
|
||||
nftTxs.insert(nftTxs.end(), txs.begin(), txs.end());
|
||||
if (maybeNFT)
|
||||
nfts.push_back(*maybeNFT);
|
||||
}
|
||||
|
||||
// This is uniqued so that we only write latest modification (as in previous implementation)
|
||||
backend_->writeNFTs(etl::getUniqueNFTsDatas(nfts));
|
||||
backend_->writeNFTTransactions(nftTxs);
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
56
src/etlng/impl/ext/NFT.hpp
Normal file
56
src/etlng/impl/ext/NFT.hpp
Normal file
@@ -0,0 +1,56 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "etl/NFTHelpers.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class NFTExt {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
NFTExt(std::shared_ptr<BackendInterface> backend);
|
||||
|
||||
void
|
||||
onLedgerData(model::LedgerData const& data) const;
|
||||
|
||||
void
|
||||
onInitialObject(uint32_t seq, model::Object const& obj) const;
|
||||
|
||||
void
|
||||
onInitialData(model::LedgerData const& data) const;
|
||||
|
||||
private:
|
||||
void
|
||||
writeNFTs(model::LedgerData const& data) const;
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
222
src/etlng/impl/ext/Successor.cpp
Normal file
222
src/etlng/impl/ext/Successor.cpp
Normal file
@@ -0,0 +1,222 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etlng/impl/ext/Successor.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <ranges>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
SuccessorExt::SuccessorExt(std::shared_ptr<BackendInterface> backend, data::LedgerCacheInterface& cache)
|
||||
: backend_(std::move(backend)), cache_(cache)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::onInitialData(model::LedgerData const& data) const
|
||||
{
|
||||
ASSERT(cache_.get().isFull(), "Cache must be full at this point");
|
||||
ASSERT(data.edgeKeys.has_value(), "Expecting to have edge keys on initial data load");
|
||||
ASSERT(data.objects.empty(), "Should not have objects from initial data");
|
||||
writeSuccessors(data.seq);
|
||||
writeEdgeKeys(data.seq, data.edgeKeys.value());
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::onInitialObjects(
|
||||
uint32_t seq,
|
||||
[[maybe_unused]] std::vector<model::Object> const& objs,
|
||||
std::string lastKey
|
||||
) const
|
||||
{
|
||||
for (auto const& obj : objs) {
|
||||
if (!lastKey.empty())
|
||||
backend_->writeSuccessor(std::move(lastKey), seq, auto{obj.keyRaw});
|
||||
lastKey = obj.keyRaw;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::onLedgerData(model::LedgerData const& data) const
|
||||
{
|
||||
namespace vs = std::views;
|
||||
|
||||
LOG(log_.info()) << "Received ledger data for successor ext; obj cnt = " << data.objects.size()
|
||||
<< "; got successors = " << data.successors.has_value() << "; cache is "
|
||||
<< (cache_.get().isFull() ? "FULL" : "Not full");
|
||||
|
||||
auto filteredObjects = data.objects //
|
||||
| vs::filter([](auto const& obj) { return obj.type != model::Object::ModType::Modified; });
|
||||
|
||||
if (data.successors.has_value()) {
|
||||
for (auto const& successor : data.successors.value())
|
||||
writeIncludedSuccessor(data.seq, successor);
|
||||
|
||||
for (auto const& obj : filteredObjects)
|
||||
writeIncludedSuccessor(data.seq, obj);
|
||||
} else {
|
||||
if (not cache_.get().isFull() or cache_.get().latestLedgerSequence() != data.seq)
|
||||
throw std::logic_error("Cache is not full, but object neighbors were not included");
|
||||
|
||||
for (auto const& obj : filteredObjects)
|
||||
updateSuccessorFromCache(data.seq, obj);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::writeIncludedSuccessor(uint32_t seq, model::BookSuccessor const& succ) const
|
||||
{
|
||||
auto firstBook = succ.firstBook;
|
||||
if (firstBook.empty())
|
||||
firstBook = uint256ToString(data::kLAST_KEY);
|
||||
|
||||
backend_->writeSuccessor(auto{succ.bookBase}, seq, std::move(firstBook));
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::writeIncludedSuccessor(uint32_t seq, model::Object const& obj) const
|
||||
{
|
||||
ASSERT(obj.type != model::Object::ModType::Modified, "Attempt to write successor for a modified object");
|
||||
|
||||
// TODO: perhaps make these optionals inside of obj and move value_or here
|
||||
auto pred = obj.predecessor;
|
||||
auto succ = obj.successor;
|
||||
|
||||
if (obj.type == model::Object::ModType::Deleted) {
|
||||
backend_->writeSuccessor(std::move(pred), seq, std::move(succ));
|
||||
} else if (obj.type == model::Object::ModType::Created) {
|
||||
backend_->writeSuccessor(std::move(pred), seq, auto{obj.keyRaw});
|
||||
backend_->writeSuccessor(auto{obj.keyRaw}, seq, std::move(succ));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::updateSuccessorFromCache(uint32_t seq, model::Object const& obj) const
|
||||
{
|
||||
auto const lb =
|
||||
cache_.get().getPredecessor(obj.key, seq).value_or(data::LedgerObject{.key = data::kFIRST_KEY, .blob = {}});
|
||||
auto const ub =
|
||||
cache_.get().getSuccessor(obj.key, seq).value_or(data::LedgerObject{.key = data::kLAST_KEY, .blob = {}});
|
||||
|
||||
auto checkBookBase = false;
|
||||
auto const isDeleted = obj.data.empty();
|
||||
|
||||
if (isDeleted) {
|
||||
backend_->writeSuccessor(uint256ToString(lb.key), seq, uint256ToString(ub.key));
|
||||
} else {
|
||||
backend_->writeSuccessor(uint256ToString(lb.key), seq, uint256ToString(obj.key));
|
||||
backend_->writeSuccessor(uint256ToString(obj.key), seq, uint256ToString(ub.key));
|
||||
}
|
||||
|
||||
if (isDeleted) {
|
||||
auto const old = cache_.get().getDeleted(obj.key, seq - 1);
|
||||
ASSERT(old.has_value(), "Deleted object {} must be in cache", ripple::strHex(obj.key));
|
||||
|
||||
checkBookBase = isBookDir(obj.key, *old);
|
||||
} else {
|
||||
checkBookBase = isBookDir(obj.key, obj.data);
|
||||
}
|
||||
|
||||
if (checkBookBase) {
|
||||
auto const current = cache_.get().get(obj.key, seq);
|
||||
auto const bookBase = getBookBase(obj.key);
|
||||
|
||||
if (isDeleted and not current.has_value()) {
|
||||
updateBookSuccessor(cache_.get().getSuccessor(bookBase, seq), seq, bookBase);
|
||||
} else if (current.has_value()) {
|
||||
auto const successor = cache_.get().getSuccessor(bookBase, seq);
|
||||
ASSERT(successor.has_value(), "Book base must have a successor for seq = {}", seq);
|
||||
|
||||
if (successor->key == obj.key) {
|
||||
updateBookSuccessor(successor, seq, bookBase);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::updateBookSuccessor(
|
||||
std::optional<data::LedgerObject> const& maybeSuccessor,
|
||||
auto seq,
|
||||
ripple::uint256 const& bookBase
|
||||
) const
|
||||
{
|
||||
if (maybeSuccessor.has_value()) {
|
||||
backend_->writeSuccessor(uint256ToString(bookBase), seq, uint256ToString(maybeSuccessor->key));
|
||||
} else {
|
||||
backend_->writeSuccessor(uint256ToString(bookBase), seq, uint256ToString(data::kLAST_KEY));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::writeSuccessors(uint32_t seq) const
|
||||
{
|
||||
ripple::uint256 prev = data::kFIRST_KEY;
|
||||
while (auto cur = cache_.get().getSuccessor(prev, seq)) {
|
||||
if (prev == data::kFIRST_KEY)
|
||||
backend_->writeSuccessor(uint256ToString(prev), seq, uint256ToString(cur->key));
|
||||
|
||||
if (isBookDir(cur->key, cur->blob)) {
|
||||
auto base = getBookBase(cur->key);
|
||||
|
||||
// make sure the base is not an actual object
|
||||
if (not cache_.get().get(base, seq)) {
|
||||
auto succ = cache_.get().getSuccessor(base, seq);
|
||||
ASSERT(succ.has_value(), "Book base {} must have a successor", ripple::strHex(base));
|
||||
|
||||
if (succ->key == cur->key)
|
||||
backend_->writeSuccessor(uint256ToString(base), seq, uint256ToString(cur->key));
|
||||
}
|
||||
}
|
||||
|
||||
prev = cur->key;
|
||||
}
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(prev), seq, uint256ToString(data::kLAST_KEY));
|
||||
}
|
||||
|
||||
void
|
||||
SuccessorExt::writeEdgeKeys(std::uint32_t seq, auto const& edgeKeys) const
|
||||
{
|
||||
for (auto const& key : edgeKeys) {
|
||||
auto succ = cache_.get().getSuccessor(*ripple::uint256::fromVoidChecked(key), seq);
|
||||
if (succ)
|
||||
backend_->writeSuccessor(auto{key}, seq, uint256ToString(succ->key));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
82
src/etlng/impl/ext/Successor.hpp
Normal file
82
src/etlng/impl/ext/Successor.hpp
Normal file
@@ -0,0 +1,82 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class SuccessorExt {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
SuccessorExt(std::shared_ptr<BackendInterface> backend, data::LedgerCacheInterface& cache);
|
||||
|
||||
void
|
||||
onInitialData(model::LedgerData const& data) const;
|
||||
|
||||
void
|
||||
onInitialObjects(uint32_t seq, [[maybe_unused]] std::vector<model::Object> const& objs, std::string lastKey) const;
|
||||
|
||||
void
|
||||
onLedgerData(model::LedgerData const& data) const;
|
||||
|
||||
private:
|
||||
void
|
||||
writeIncludedSuccessor(uint32_t seq, model::BookSuccessor const& succ) const;
|
||||
|
||||
void
|
||||
writeIncludedSuccessor(uint32_t seq, model::Object const& obj) const;
|
||||
|
||||
void
|
||||
updateSuccessorFromCache(uint32_t seq, model::Object const& obj) const;
|
||||
|
||||
void
|
||||
updateBookSuccessor(
|
||||
std::optional<data::LedgerObject> const& maybeSuccessor,
|
||||
auto seq,
|
||||
ripple::uint256 const& bookBase
|
||||
) const;
|
||||
|
||||
void
|
||||
writeSuccessors(uint32_t seq) const;
|
||||
|
||||
void
|
||||
writeEdgeKeys(std::uint32_t seq, auto const& edgeKeys) const;
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
@@ -191,7 +191,7 @@ SubscriptionManager::unsubBook(ripple::Book const& book, SubscriberSharedPtr con
|
||||
void
|
||||
SubscriptionManager::pubTransaction(data::TransactionAndMetadata const& txMeta, ripple::LedgerHeader const& lgrInfo)
|
||||
{
|
||||
transactionFeed_.pub(txMeta, lgrInfo, backend_);
|
||||
transactionFeed_.pub(txMeta, lgrInfo, backend_, amendmentCenter_);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -60,6 +61,7 @@ namespace feed {
|
||||
*/
|
||||
class SubscriptionManager : public SubscriptionManagerInterface {
|
||||
std::shared_ptr<data::BackendInterface const> backend_;
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> amendmentCenter_;
|
||||
util::async::AnyExecutionContext ctx_;
|
||||
impl::ForwardFeed manifestFeed_;
|
||||
impl::ForwardFeed validationsFeed_;
|
||||
@@ -74,12 +76,14 @@ public:
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param backend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
* @return A shared pointer to a new instance of SubscriptionManager
|
||||
*/
|
||||
static std::shared_ptr<SubscriptionManager>
|
||||
makeSubscriptionManager(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter
|
||||
)
|
||||
{
|
||||
auto const workersNum = config.get<uint64_t>("subscription_workers");
|
||||
@@ -87,7 +91,9 @@ public:
|
||||
util::Logger const logger{"Subscriptions"};
|
||||
LOG(logger.info()) << "Starting subscription manager with " << workersNum << " workers";
|
||||
|
||||
return std::make_shared<feed::SubscriptionManager>(util::async::PoolExecutionContext(workersNum), backend);
|
||||
return std::make_shared<feed::SubscriptionManager>(
|
||||
util::async::PoolExecutionContext(workersNum), backend, amendmentCenter
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -95,12 +101,15 @@ public:
|
||||
*
|
||||
* @param executor The executor to use to publish the feeds
|
||||
* @param backend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
*/
|
||||
SubscriptionManager(
|
||||
util::async::AnyExecutionContext&& executor,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter
|
||||
)
|
||||
: backend_(backend)
|
||||
, amendmentCenter_(amendmentCenter)
|
||||
, ctx_(std::move(executor))
|
||||
, manifestFeed_(ctx_, "manifest")
|
||||
, validationsFeed_(ctx_, "validations")
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include "feed/impl/TransactionFeed.hpp"
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "feed/Types.hpp"
|
||||
@@ -174,7 +175,8 @@ void
|
||||
TransactionFeed::pub(
|
||||
data::TransactionAndMetadata const& txMeta,
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter
|
||||
)
|
||||
{
|
||||
auto [tx, meta] = rpc::deserializeTxPlusMeta(txMeta, lgrInfo.seq);
|
||||
@@ -187,7 +189,7 @@ TransactionFeed::pub(
|
||||
if (account != amount.issue().account) {
|
||||
auto fetchFundsSynchronous = [&]() {
|
||||
data::synchronous([&](boost::asio::yield_context yield) {
|
||||
ownerFunds = rpc::accountFunds(*backend, lgrInfo.seq, amount, account, yield);
|
||||
ownerFunds = rpc::accountFunds(*backend, *amendmentCenter, lgrInfo.seq, amount, account, yield);
|
||||
});
|
||||
};
|
||||
data::retryOnTimeout(fetchFundsSynchronous);
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "feed/Types.hpp"
|
||||
@@ -94,6 +95,11 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Move constructor is deleted because TransactionSlot takes TransactionFeed by reference
|
||||
*/
|
||||
TransactionFeed(TransactionFeed&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Subscribe to the transaction feed.
|
||||
* @param subscriber
|
||||
@@ -180,7 +186,8 @@ public:
|
||||
void
|
||||
pub(data::TransactionAndMetadata const& txMeta,
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend);
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter);
|
||||
|
||||
/**
|
||||
* @brief Get the number of subscribers of the transaction feed.
|
||||
|
||||
@@ -52,7 +52,10 @@ try {
|
||||
if (not app::parseConfig(run.configPath))
|
||||
return EXIT_FAILURE;
|
||||
|
||||
util::LogService::init(gClioConfig);
|
||||
if (auto const initSuccess = util::LogService::init(gClioConfig); not initSuccess) {
|
||||
std::cerr << initSuccess.error() << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
app::ClioApplication clio{gClioConfig};
|
||||
return clio.run(run.useNgWebServer);
|
||||
},
|
||||
@@ -60,7 +63,10 @@ try {
|
||||
if (not app::parseConfig(migrate.configPath))
|
||||
return EXIT_FAILURE;
|
||||
|
||||
util::LogService::init(gClioConfig);
|
||||
if (auto const initSuccess = util::LogService::init(gClioConfig); not initSuccess) {
|
||||
std::cerr << initSuccess.error() << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
app::MigratorApplication migrator{gClioConfig, migrate.subCmd};
|
||||
return migrator.run();
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ MigratorApplication::MigratorApplication(util::config::ClioConfigDefinition cons
|
||||
{
|
||||
PrometheusService::init(config);
|
||||
|
||||
auto expectedMigrationManager = migration::impl::makeMigrationManager(config);
|
||||
auto expectedMigrationManager = migration::impl::makeMigrationManager(config, cache_);
|
||||
|
||||
if (not expectedMigrationManager) {
|
||||
throw std::runtime_error("Failed to create migration manager: " + expectedMigrationManager.error());
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "migration/MigrationManagerInterface.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
@@ -76,6 +77,7 @@ class MigratorApplication {
|
||||
std::string option_;
|
||||
std::shared_ptr<migration::MigrationManagerInterface> migrationManager_;
|
||||
MigrateSubCmd cmd_;
|
||||
data::LedgerCache cache_;
|
||||
|
||||
public:
|
||||
/**
|
||||
|
||||
@@ -20,9 +20,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/CassandraBackend.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "migration/MigratiorStatus.hpp"
|
||||
#include "migration/cassandra/impl/CassandraMigrationSchema.hpp"
|
||||
#include "migration/cassandra/impl/Spec.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -49,9 +48,13 @@ public:
|
||||
* @brief Construct a new Cassandra Migration Backend object. The backend is not readonly.
|
||||
*
|
||||
* @param settingsProvider The settings provider
|
||||
* @param cache The ledger cache to use
|
||||
*/
|
||||
explicit CassandraMigrationBackend(data::cassandra::SettingsProvider settingsProvider)
|
||||
: data::cassandra::CassandraBackend{auto{settingsProvider}, false /* not readonly */}
|
||||
explicit CassandraMigrationBackend(
|
||||
data::cassandra::SettingsProvider settingsProvider,
|
||||
data::LedgerCacheInterface& cache
|
||||
)
|
||||
: data::cassandra::CassandraBackend{auto{settingsProvider}, cache, false /* not readonly */}
|
||||
, settingsProvider_(std::move(settingsProvider))
|
||||
, migrationSchema_{settingsProvider_}
|
||||
{
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include "migration/impl/MigrationManagerFactory.hpp"
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "migration/MigrationManagerInterface.hpp"
|
||||
#include "migration/cassandra/CassandraMigrationBackend.hpp"
|
||||
@@ -35,7 +36,7 @@
|
||||
namespace migration::impl {
|
||||
|
||||
std::expected<std::shared_ptr<MigrationManagerInterface>, std::string>
|
||||
makeMigrationManager(util::config::ClioConfigDefinition const& config)
|
||||
makeMigrationManager(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
|
||||
{
|
||||
static util::Logger const log{"Migration"}; // NOLINT(readability-identifier-naming)
|
||||
LOG(log.info()) << "Constructing MigrationManager";
|
||||
@@ -48,11 +49,10 @@ makeMigrationManager(util::config::ClioConfigDefinition const& config)
|
||||
}
|
||||
|
||||
auto const cfg = config.getObject("database." + type);
|
||||
|
||||
auto migrationCfg = config.getObject("migration");
|
||||
|
||||
return std::make_shared<cassandra::CassandraMigrationManager>(
|
||||
std::make_shared<cassandra::CassandraMigrationBackend>(data::cassandra::SettingsProvider{cfg}),
|
||||
std::make_shared<cassandra::CassandraMigrationBackend>(data::cassandra::SettingsProvider{cfg}, cache),
|
||||
std::move(migrationCfg)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "migration/MigrationManagerInterface.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
@@ -33,9 +34,10 @@ namespace migration::impl {
|
||||
*
|
||||
* @param config The configuration of the migration application, it contains the database connection configuration and
|
||||
* other migration specific configurations
|
||||
* @param cache The ledger cache to use
|
||||
* @return A shared pointer to the MigrationManagerInterface if the creation was successful, otherwise an error message
|
||||
*/
|
||||
std::expected<std::shared_ptr<MigrationManagerInterface>, std::string>
|
||||
makeMigrationManager(util::config::ClioConfigDefinition const& config);
|
||||
makeMigrationManager(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache);
|
||||
|
||||
} // namespace migration::impl
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include "rpc/AMMHelpers.hpp"
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/RPCHelpers.hpp"
|
||||
|
||||
@@ -38,6 +39,7 @@ namespace rpc {
|
||||
std::pair<ripple::STAmount, ripple::STAmount>
|
||||
getAmmPoolHolds(
|
||||
BackendInterface const& backend,
|
||||
data::AmendmentCenterInterface const& amendmentCenter,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& ammAccountID,
|
||||
ripple::Issue const& issue1,
|
||||
@@ -46,10 +48,12 @@ getAmmPoolHolds(
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
auto const assetInBalance =
|
||||
accountHolds(backend, sequence, ammAccountID, issue1.currency, issue1.account, freezeHandling, yield);
|
||||
auto const assetOutBalance =
|
||||
accountHolds(backend, sequence, ammAccountID, issue2.currency, issue2.account, freezeHandling, yield);
|
||||
auto const assetInBalance = accountHolds(
|
||||
backend, amendmentCenter, sequence, ammAccountID, issue1.currency, issue1.account, freezeHandling, yield
|
||||
);
|
||||
auto const assetOutBalance = accountHolds(
|
||||
backend, amendmentCenter, sequence, ammAccountID, issue2.currency, issue2.account, freezeHandling, yield
|
||||
);
|
||||
return std::make_pair(assetInBalance, assetOutBalance);
|
||||
}
|
||||
|
||||
@@ -65,7 +69,9 @@ getAmmLpHolds(
|
||||
)
|
||||
{
|
||||
auto const lptCurrency = ammLPTCurrency(cur1, cur2);
|
||||
return accountHolds(backend, sequence, lpAccount, lptCurrency, ammAccount, true, yield);
|
||||
|
||||
// not using accountHolds because we don't need to check if the associated tokens of the LP are frozen
|
||||
return ammAccountHolds(backend, sequence, lpAccount, lptCurrency, ammAccount, true, yield);
|
||||
}
|
||||
|
||||
ripple::STAmount
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
@@ -37,6 +38,7 @@ namespace rpc {
|
||||
* @brief getAmmPoolHolds returns the balances of the amm asset pair
|
||||
*
|
||||
* @param backend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
* @param sequence The sequence number to use
|
||||
* @param ammAccountID The amm account
|
||||
* @param issue1 The first issue
|
||||
@@ -48,6 +50,7 @@ namespace rpc {
|
||||
std::pair<ripple::STAmount, ripple::STAmount>
|
||||
getAmmPoolHolds(
|
||||
BackendInterface const& backend,
|
||||
data::AmendmentCenterInterface const& amendmentCenter,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& ammAccountID,
|
||||
ripple::Issue const& issue1,
|
||||
|
||||
@@ -19,12 +19,15 @@
|
||||
|
||||
#include "rpc/RPCHelpers.hpp"
|
||||
|
||||
#include "data/AmendmentCenter.hpp"
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
#include "rpc/common/Types.hpp"
|
||||
#include "util/AccountUtils.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/Context.hpp"
|
||||
@@ -382,7 +385,7 @@ insertDeliverMaxAlias(boost::json::object& txJson, std::uint32_t const apiVersio
|
||||
{
|
||||
if (txJson.contains(JS(TransactionType)) and txJson.at(JS(TransactionType)).is_string() and
|
||||
txJson.at(JS(TransactionType)).as_string() == JS(Payment) and txJson.contains(JS(Amount))) {
|
||||
txJson[JS(DeliverMax)] = txJson[JS(Amount)];
|
||||
txJson.insert_or_assign(JS(DeliverMax), txJson[JS(Amount)]);
|
||||
if (apiVersion > 1)
|
||||
txJson.erase(JS(Amount));
|
||||
}
|
||||
@@ -899,6 +902,26 @@ isGlobalFrozen(
|
||||
return sle.isFlag(ripple::lsfGlobalFreeze);
|
||||
}
|
||||
|
||||
bool
|
||||
fetchAndCheckAnyFlagsExists(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t sequence,
|
||||
ripple::Keylet const& keylet,
|
||||
std::vector<std::uint32_t> const& flags,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
auto const blob = backend.fetchLedgerObject(keylet.key, sequence, yield);
|
||||
|
||||
if (!blob)
|
||||
return false;
|
||||
|
||||
ripple::SerialIter it{blob->data(), blob->size()};
|
||||
ripple::SLE const sle{it, keylet.key};
|
||||
|
||||
return std::ranges::any_of(flags, [sle](std::uint32_t flag) { return sle.isFlag(flag); });
|
||||
}
|
||||
|
||||
bool
|
||||
isFrozen(
|
||||
BackendInterface const& backend,
|
||||
@@ -912,35 +935,57 @@ isFrozen(
|
||||
if (ripple::isXRP(currency))
|
||||
return false;
|
||||
|
||||
auto key = ripple::keylet::account(issuer).key;
|
||||
auto blob = backend.fetchLedgerObject(key, sequence, yield);
|
||||
|
||||
if (!blob)
|
||||
return false;
|
||||
|
||||
ripple::SerialIter it{blob->data(), blob->size()};
|
||||
ripple::SLE const sle{it, key};
|
||||
|
||||
if (sle.isFlag(ripple::lsfGlobalFreeze))
|
||||
if (fetchAndCheckAnyFlagsExists(
|
||||
backend, sequence, ripple::keylet::account(issuer), {ripple::lsfGlobalFreeze}, yield
|
||||
))
|
||||
return true;
|
||||
|
||||
if (issuer != account) {
|
||||
key = ripple::keylet::line(account, issuer, currency).key;
|
||||
blob = backend.fetchLedgerObject(key, sequence, yield);
|
||||
auto const trustLineKeylet = ripple::keylet::line(account, issuer, currency);
|
||||
return issuer != account &&
|
||||
fetchAndCheckAnyFlagsExists(
|
||||
backend,
|
||||
sequence,
|
||||
trustLineKeylet,
|
||||
{(issuer > account) ? ripple::lsfHighFreeze : ripple::lsfLowFreeze},
|
||||
yield
|
||||
);
|
||||
}
|
||||
|
||||
if (!blob)
|
||||
bool
|
||||
isDeepFrozen(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& account,
|
||||
ripple::Currency const& currency,
|
||||
ripple::AccountID const& issuer,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
if (ripple::isXRP(currency))
|
||||
return false;
|
||||
|
||||
ripple::SerialIter issuerIt{blob->data(), blob->size()};
|
||||
ripple::SLE const issuerLine{issuerIt, key};
|
||||
|
||||
auto frozen = (issuer > account) ? ripple::lsfHighFreeze : ripple::lsfLowFreeze;
|
||||
|
||||
if (issuerLine.isFlag(frozen))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (issuer == account)
|
||||
return false;
|
||||
|
||||
auto const trustLineKeylet = ripple::keylet::line(account, issuer, currency);
|
||||
|
||||
return fetchAndCheckAnyFlagsExists(
|
||||
backend, sequence, trustLineKeylet, {ripple::lsfHighDeepFreeze, ripple::lsfLowDeepFreeze}, yield
|
||||
);
|
||||
}
|
||||
|
||||
bool
|
||||
isLPTokenFrozen(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& account,
|
||||
ripple::Issue const& asset,
|
||||
ripple::Issue const& asset2,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
return isFrozen(backend, sequence, account, asset.currency, asset.account, yield) ||
|
||||
isFrozen(backend, sequence, account, asset2.currency, asset2.account, yield);
|
||||
}
|
||||
|
||||
ripple::XRPAmount
|
||||
@@ -981,6 +1026,7 @@ xrpLiquid(
|
||||
ripple::STAmount
|
||||
accountFunds(
|
||||
BackendInterface const& backend,
|
||||
data::AmendmentCenterInterface const& amendmentCenter,
|
||||
std::uint32_t const sequence,
|
||||
ripple::STAmount const& amount,
|
||||
ripple::AccountID const& id,
|
||||
@@ -991,12 +1037,58 @@ accountFunds(
|
||||
return amount;
|
||||
}
|
||||
|
||||
return accountHolds(backend, sequence, id, amount.getCurrency(), amount.getIssuer(), true, yield);
|
||||
return accountHolds(backend, amendmentCenter, sequence, id, amount.getCurrency(), amount.getIssuer(), true, yield);
|
||||
}
|
||||
|
||||
ripple::STAmount
|
||||
ammAccountHolds(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& account,
|
||||
ripple::Currency const& currency,
|
||||
ripple::AccountID const& issuer,
|
||||
bool const zeroIfFrozen,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
ripple::STAmount amount;
|
||||
ASSERT(!ripple::isXRP(currency), "LPToken currency can never be XRP");
|
||||
if (ripple::isXRP(currency))
|
||||
return {xrpLiquid(backend, sequence, account, yield)};
|
||||
|
||||
auto const key = ripple::keylet::line(account, issuer, currency).key;
|
||||
auto const blob = backend.fetchLedgerObject(key, sequence, yield);
|
||||
|
||||
if (!blob) {
|
||||
amount.setIssue(ripple::Issue(currency, issuer));
|
||||
amount.clear();
|
||||
return amount;
|
||||
}
|
||||
|
||||
ripple::SerialIter it{blob->data(), blob->size()};
|
||||
ripple::SLE const sle{it, key};
|
||||
|
||||
if (zeroIfFrozen &&
|
||||
(isFrozen(backend, sequence, account, currency, issuer, yield) ||
|
||||
isDeepFrozen(backend, sequence, account, currency, issuer, yield))) {
|
||||
amount.setIssue(ripple::Issue(currency, issuer));
|
||||
amount.clear();
|
||||
} else {
|
||||
amount = sle.getFieldAmount(ripple::sfBalance);
|
||||
if (account > issuer) {
|
||||
// Put balance in account terms.
|
||||
amount.negate();
|
||||
}
|
||||
amount.setIssuer(issuer);
|
||||
}
|
||||
|
||||
return amount;
|
||||
}
|
||||
|
||||
ripple::STAmount
|
||||
accountHolds(
|
||||
BackendInterface const& backend,
|
||||
data::AmendmentCenterInterface const& amendmentCenter,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& account,
|
||||
ripple::Currency const& currency,
|
||||
@@ -1018,19 +1110,61 @@ accountHolds(
|
||||
return amount;
|
||||
}
|
||||
|
||||
auto const allowBalance = [&]() {
|
||||
if (!zeroIfFrozen)
|
||||
return true;
|
||||
|
||||
if (isFrozen(backend, sequence, account, currency, issuer, yield))
|
||||
return false;
|
||||
|
||||
if (amendmentCenter.isEnabled(yield, data::Amendments::fixFrozenLPTokenTransfer, sequence)) {
|
||||
auto const issuerBlob = backend.fetchLedgerObject(ripple::keylet::account(issuer).key, sequence, yield);
|
||||
|
||||
if (!issuerBlob)
|
||||
return false;
|
||||
|
||||
ripple::SLE const issuerSle{
|
||||
ripple::SerialIter{issuerBlob->data(), issuerBlob->size()}, ripple::keylet::account(issuer).key
|
||||
};
|
||||
|
||||
// if the issuer is an amm account, then currency is lptoken, so we will need to check if the
|
||||
// assets in the pool are frozen as well
|
||||
if (issuerSle.isFieldPresent(ripple::sfAMMID)) {
|
||||
auto const ammKeylet = ripple::keylet::amm(issuerSle[ripple::sfAMMID]);
|
||||
auto const ammBlob = backend.fetchLedgerObject(ammKeylet.key, sequence, yield);
|
||||
|
||||
if (!ammBlob)
|
||||
return false;
|
||||
|
||||
ripple::SLE const ammSle{ripple::SerialIter{ammBlob->data(), ammBlob->size()}, ammKeylet.key};
|
||||
|
||||
return !isLPTokenFrozen(
|
||||
backend,
|
||||
sequence,
|
||||
account,
|
||||
ammSle[ripple::sfAsset].get<ripple::Issue>(),
|
||||
ammSle[ripple::sfAsset2].get<ripple::Issue>(),
|
||||
yield
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}();
|
||||
|
||||
if (allowBalance) {
|
||||
ripple::SerialIter it{blob->data(), blob->size()};
|
||||
ripple::SLE const sle{it, key};
|
||||
|
||||
if (zeroIfFrozen && isFrozen(backend, sequence, account, currency, issuer, yield)) {
|
||||
amount.setIssue(ripple::Issue(currency, issuer));
|
||||
amount.clear();
|
||||
} else {
|
||||
amount = sle.getFieldAmount(ripple::sfBalance);
|
||||
if (account > issuer) {
|
||||
// Put balance in account terms.
|
||||
amount.negate();
|
||||
}
|
||||
amount.setIssuer(issuer);
|
||||
} else {
|
||||
amount.setIssue(ripple::Issue(currency, issuer));
|
||||
amount.clear();
|
||||
}
|
||||
|
||||
return amount;
|
||||
@@ -1064,6 +1198,7 @@ postProcessOrderBook(
|
||||
ripple::Book const& book,
|
||||
ripple::AccountID const& takerID,
|
||||
data::BackendInterface const& backend,
|
||||
data::AmendmentCenterInterface const& amendmentCenter,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
@@ -1106,7 +1241,14 @@ postProcessOrderBook(
|
||||
firstOwnerOffer = false;
|
||||
} else {
|
||||
saOwnerFunds = accountHolds(
|
||||
backend, ledgerSequence, uOfferOwnerID, book.out.currency, book.out.account, true, yield
|
||||
backend,
|
||||
amendmentCenter,
|
||||
ledgerSequence,
|
||||
uOfferOwnerID,
|
||||
book.out.currency,
|
||||
book.out.account,
|
||||
true,
|
||||
yield
|
||||
);
|
||||
|
||||
if (saOwnerFunds < beast::zero)
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
* This file contains a variety of utility functions used when executing the handlers.
|
||||
*/
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
@@ -427,10 +428,75 @@ isFrozen(
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Fetches a ledger object and checks if any of the specified flag is set on the account.
|
||||
*
|
||||
* @param backend The backend to use
|
||||
* @param sequence The sequence
|
||||
* @param keylet The keylet representing the object
|
||||
* @param flags The flags to check on the fetched `SLE`.
|
||||
* @param yield The coroutine context
|
||||
* @return true if any of the flag in flags are set for this account; false otherwise
|
||||
*/
|
||||
bool
|
||||
fetchAndCheckAnyFlagsExists(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t sequence,
|
||||
ripple::Keylet const& keylet,
|
||||
std::vector<std::uint32_t> const& flags,
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Whether the trustline is deep frozen.
|
||||
*
|
||||
* For deep freeze, (unlike regular freeze) we do not care which account has the high/low deep freeze flag.
|
||||
* We only care about if the trustline is deep frozen or not.
|
||||
*
|
||||
* @param backend The backend to use
|
||||
* @param sequence The sequence
|
||||
* @param account The account
|
||||
* @param currency The currency
|
||||
* @param issuer The issuer
|
||||
* @param yield The coroutine context
|
||||
* @return true if the account is deep frozen; false otherwise
|
||||
*/
|
||||
bool
|
||||
isDeepFrozen(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& account,
|
||||
ripple::Currency const& currency,
|
||||
ripple::AccountID const& issuer,
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Whether the account that owns a LPToken is frozen for the assets in the pool
|
||||
*
|
||||
* @param backend The backend to use
|
||||
* @param sequence The sequence
|
||||
* @param account The account
|
||||
* @param asset The first asset in the pool
|
||||
* @param asset2 The second asset in the pool
|
||||
* @param yield The coroutine context
|
||||
* @return true if account is frozen for one of the assets
|
||||
*/
|
||||
bool
|
||||
isLPTokenFrozen(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& account,
|
||||
ripple::Issue const& asset,
|
||||
ripple::Issue const& asset2,
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Get the account funds
|
||||
*
|
||||
* @param backend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
* @param sequence The sequence
|
||||
* @param amount The amount
|
||||
* @param id The account ID
|
||||
@@ -440,6 +506,7 @@ isFrozen(
|
||||
ripple::STAmount
|
||||
accountFunds(
|
||||
BackendInterface const& backend,
|
||||
data::AmendmentCenterInterface const& amendmentCenter,
|
||||
std::uint32_t sequence,
|
||||
ripple::STAmount const& amount,
|
||||
ripple::AccountID const& id,
|
||||
@@ -450,6 +517,7 @@ accountFunds(
|
||||
* @brief Get the amount that an account holds
|
||||
*
|
||||
* @param backend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
* @param sequence The sequence
|
||||
* @param account The account
|
||||
* @param currency The currency
|
||||
@@ -461,6 +529,7 @@ accountFunds(
|
||||
ripple::STAmount
|
||||
accountHolds(
|
||||
BackendInterface const& backend,
|
||||
data::AmendmentCenterInterface const& amendmentCenter,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& account,
|
||||
ripple::Currency const& currency,
|
||||
@@ -469,6 +538,29 @@ accountHolds(
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Get the amount that an LPToken owner holds
|
||||
*
|
||||
* @param backend The backend to use
|
||||
* @param sequence The sequence
|
||||
* @param account The account
|
||||
* @param currency The currency
|
||||
* @param issuer The issuer
|
||||
* @param zeroIfFrozen Whether to return zero if frozen
|
||||
* @param yield The coroutine context
|
||||
* @return The amount account holds
|
||||
*/
|
||||
ripple::STAmount
|
||||
ammAccountHolds(
|
||||
BackendInterface const& backend,
|
||||
std::uint32_t sequence,
|
||||
ripple::AccountID const& account,
|
||||
ripple::Currency const& currency,
|
||||
ripple::AccountID const& issuer,
|
||||
bool const zeroIfFrozen,
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Get the transfer rate
|
||||
*
|
||||
@@ -510,6 +602,7 @@ xrpLiquid(
|
||||
* @param book The book
|
||||
* @param takerID The taker ID
|
||||
* @param backend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
* @param ledgerSequence The ledger sequence
|
||||
* @param yield The coroutine context
|
||||
* @return The post processed order book
|
||||
@@ -520,6 +613,7 @@ postProcessOrderBook(
|
||||
ripple::Book const& book,
|
||||
ripple::AccountID const& takerID,
|
||||
data::BackendInterface const& backend,
|
||||
data::AmendmentCenterInterface const& amendmentCenter,
|
||||
std::uint32_t ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
|
||||
@@ -116,6 +116,7 @@ public:
|
||||
"manifest",
|
||||
"channel_authorize",
|
||||
"channel_verify",
|
||||
"simulate",
|
||||
};
|
||||
|
||||
return kPROXIED_COMMANDS.contains(method);
|
||||
|
||||
@@ -86,14 +86,14 @@ ProductionHandlerProvider::ProductionHandlerProvider(
|
||||
{"account_objects", {.handler = AccountObjectsHandler{backend}}},
|
||||
{"account_offers", {.handler = AccountOffersHandler{backend}}},
|
||||
{"account_tx", {.handler = AccountTxHandler{backend}}},
|
||||
{"amm_info", {.handler = AMMInfoHandler{backend}}},
|
||||
{"amm_info", {.handler = AMMInfoHandler{backend, amendmentCenter}}},
|
||||
{"book_changes", {.handler = BookChangesHandler{backend}}},
|
||||
{"book_offers", {.handler = BookOffersHandler{backend}}},
|
||||
{"book_offers", {.handler = BookOffersHandler{backend, amendmentCenter}}},
|
||||
{"deposit_authorized", {.handler = DepositAuthorizedHandler{backend}}},
|
||||
{"feature", {.handler = FeatureHandler{backend, amendmentCenter}}},
|
||||
{"gateway_balances", {.handler = GatewayBalancesHandler{backend}}},
|
||||
{"get_aggregate_price", {.handler = GetAggregatePriceHandler{backend}}},
|
||||
{"ledger", {.handler = LedgerHandler{backend}}},
|
||||
{"ledger", {.handler = LedgerHandler{backend, amendmentCenter}}},
|
||||
{"ledger_data", {.handler = LedgerDataHandler{backend}}},
|
||||
{"ledger_entry", {.handler = LedgerEntryHandler{backend}}},
|
||||
{"ledger_index", {.handler = LedgerIndexHandler{backend}, .isClioOnly = true}}, // clio only
|
||||
@@ -110,7 +110,7 @@ ProductionHandlerProvider::ProductionHandlerProvider(
|
||||
{"server_info", {.handler = ServerInfoHandler{backend, subscriptionManager, balancer, etl, counters}}},
|
||||
{"transaction_entry", {.handler = TransactionEntryHandler{backend}}},
|
||||
{"tx", {.handler = TxHandler{backend, etl}}},
|
||||
{"subscribe", {.handler = SubscribeHandler{backend, subscriptionManager}}},
|
||||
{"subscribe", {.handler = SubscribeHandler{backend, amendmentCenter, subscriptionManager}}},
|
||||
{"unsubscribe", {.handler = UnsubscribeHandler{subscriptionManager}}},
|
||||
{"version", {.handler = VersionHandler{config}}},
|
||||
}
|
||||
|
||||
@@ -149,8 +149,9 @@ AMMInfoHandler::process(AMMInfoHandler::Input input, Context const& ctx) const
|
||||
issue2 = amm[sfAsset2].get<Issue>();
|
||||
}
|
||||
|
||||
auto const [asset1Balance, asset2Balance] =
|
||||
getAmmPoolHolds(*sharedPtrBackend_, lgrInfo.seq, ammAccountID, issue1, issue2, false, ctx.yield);
|
||||
auto const [asset1Balance, asset2Balance] = getAmmPoolHolds(
|
||||
*sharedPtrBackend_, *amendmentCenter_, lgrInfo.seq, ammAccountID, issue1, issue2, false, ctx.yield
|
||||
);
|
||||
auto const lptAMMBalance = input.accountID
|
||||
? getAmmLpHolds(*sharedPtrBackend_, lgrInfo.seq, amm, *input.accountID, ctx.yield)
|
||||
: amm[sfLPTokenBalance];
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/common/Specs.hpp"
|
||||
|
||||
@@ -42,6 +43,7 @@ namespace rpc {
|
||||
*/
|
||||
class AMMInfoHandler {
|
||||
std::shared_ptr<BackendInterface> sharedPtrBackend_;
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> amendmentCenter_;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -82,8 +84,13 @@ public:
|
||||
* @brief Construct a new AMMInfoHandler object
|
||||
*
|
||||
* @param sharedPtrBackend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
*/
|
||||
AMMInfoHandler(std::shared_ptr<BackendInterface> const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend)
|
||||
AMMInfoHandler(
|
||||
std::shared_ptr<BackendInterface> const& sharedPtrBackend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter
|
||||
)
|
||||
: sharedPtrBackend_(sharedPtrBackend), amendmentCenter_{amendmentCenter}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -86,6 +86,8 @@ AccountLinesHandler::addLine(
|
||||
bool const lineNoRipplePeer = (flags & (not viewLowest ? ripple::lsfLowNoRipple : ripple::lsfHighNoRipple)) != 0u;
|
||||
bool const lineFreeze = (flags & (viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze)) != 0u;
|
||||
bool const lineFreezePeer = (flags & (not viewLowest ? ripple::lsfLowFreeze : ripple::lsfHighFreeze)) != 0u;
|
||||
bool const lineDeepFreeze = (flags & (viewLowest ? ripple::lsfLowDeepFreeze : ripple::lsfHighFreeze)) != 0u;
|
||||
bool const lineDeepFreezePeer = (flags & (not viewLowest ? ripple::lsfLowDeepFreeze : ripple::lsfHighFreeze)) != 0u;
|
||||
|
||||
ripple::STAmount const& saBalance = balance;
|
||||
ripple::STAmount const& saLimit = lineLimit;
|
||||
@@ -100,6 +102,12 @@ AccountLinesHandler::addLine(
|
||||
line.qualityIn = lineQualityIn;
|
||||
line.qualityOut = lineQualityOut;
|
||||
|
||||
if (lineNoRipple)
|
||||
line.noRipple = true;
|
||||
|
||||
if (lineNoRipplePeer)
|
||||
line.noRipplePeer = true;
|
||||
|
||||
if (lineAuth)
|
||||
line.authorized = true;
|
||||
|
||||
@@ -112,8 +120,12 @@ AccountLinesHandler::addLine(
|
||||
if (lineFreezePeer)
|
||||
line.freezePeer = true;
|
||||
|
||||
line.noRipple = lineNoRipple;
|
||||
line.noRipplePeer = lineNoRipplePeer;
|
||||
if (lineDeepFreeze)
|
||||
line.deepFreeze = true;
|
||||
|
||||
if (lineDeepFreezePeer)
|
||||
line.deepFreezePeer = true;
|
||||
|
||||
lines.push_back(line);
|
||||
}
|
||||
|
||||
@@ -249,8 +261,11 @@ tag_invoke(
|
||||
{JS(quality_out), line.qualityOut},
|
||||
};
|
||||
|
||||
obj[JS(no_ripple)] = line.noRipple;
|
||||
obj[JS(no_ripple_peer)] = line.noRipplePeer;
|
||||
if (line.noRipple)
|
||||
obj[JS(no_ripple)] = *(line.noRipple);
|
||||
|
||||
if (line.noRipplePeer)
|
||||
obj[JS(no_ripple_peer)] = *(line.noRipplePeer);
|
||||
|
||||
if (line.authorized)
|
||||
obj[JS(authorized)] = *(line.authorized);
|
||||
@@ -264,6 +279,12 @@ tag_invoke(
|
||||
if (line.freezePeer)
|
||||
obj[JS(freeze_peer)] = *(line.freezePeer);
|
||||
|
||||
if (line.deepFreeze)
|
||||
obj[JS(deep_freeze)] = *(line.deepFreeze);
|
||||
|
||||
if (line.deepFreezePeer)
|
||||
obj[JS(deep_freeze_peer)] = *(line.deepFreezePeer);
|
||||
|
||||
jv = std::move(obj);
|
||||
}
|
||||
|
||||
|
||||
@@ -70,12 +70,14 @@ public:
|
||||
std::string limitPeer;
|
||||
uint32_t qualityIn{};
|
||||
uint32_t qualityOut{};
|
||||
bool noRipple{};
|
||||
bool noRipplePeer{};
|
||||
std::optional<bool> noRipple;
|
||||
std::optional<bool> noRipplePeer;
|
||||
std::optional<bool> authorized;
|
||||
std::optional<bool> peerAuthorized;
|
||||
std::optional<bool> freeze;
|
||||
std::optional<bool> freezePeer;
|
||||
std::optional<bool> deepFreeze;
|
||||
std::optional<bool> deepFreezePeer;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -72,7 +72,13 @@ BookOffersHandler::process(Input input, Context const& ctx) const
|
||||
output.ledgerHash = ripple::strHex(lgrInfo.hash);
|
||||
output.ledgerIndex = lgrInfo.seq;
|
||||
output.offers = postProcessOrderBook(
|
||||
offers, book, input.taker ? *(input.taker) : beast::zero, *sharedPtrBackend_, lgrInfo.seq, ctx.yield
|
||||
offers,
|
||||
book,
|
||||
input.taker ? *(input.taker) : beast::zero,
|
||||
*sharedPtrBackend_,
|
||||
*amendmentCenter_,
|
||||
lgrInfo.seq,
|
||||
ctx.yield
|
||||
);
|
||||
|
||||
return output;
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
@@ -51,6 +52,7 @@ namespace rpc {
|
||||
*/
|
||||
class BookOffersHandler {
|
||||
std::shared_ptr<BackendInterface> sharedPtrBackend_;
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> amendmentCenter_;
|
||||
|
||||
public:
|
||||
static constexpr auto kLIMIT_MIN = 1;
|
||||
@@ -91,8 +93,13 @@ public:
|
||||
* @brief Construct a new BookOffersHandler object
|
||||
*
|
||||
* @param sharedPtrBackend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
*/
|
||||
BookOffersHandler(std::shared_ptr<BackendInterface> const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend)
|
||||
BookOffersHandler(
|
||||
std::shared_ptr<BackendInterface> const& sharedPtrBackend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter
|
||||
)
|
||||
: sharedPtrBackend_(sharedPtrBackend), amendmentCenter_{amendmentCenter}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -134,6 +134,7 @@ LedgerHandler::process(LedgerHandler::Input input, Context const& ctx) const
|
||||
if (account != amount.getIssuer()) {
|
||||
auto const ownerFunds = accountHolds(
|
||||
*sharedPtrBackend_,
|
||||
*amendmentCenter_,
|
||||
lgrInfo.seq,
|
||||
account,
|
||||
amount.getCurrency(),
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
#include "rpc/common/Checkers.hpp"
|
||||
@@ -35,6 +36,7 @@
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace rpc {
|
||||
|
||||
@@ -45,6 +47,7 @@ namespace rpc {
|
||||
*/
|
||||
class LedgerHandler {
|
||||
std::shared_ptr<BackendInterface> sharedPtrBackend_;
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> amendmentCenter_;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -89,8 +92,13 @@ public:
|
||||
* @brief Construct a new LedgerHandler object
|
||||
*
|
||||
* @param sharedPtrBackend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
*/
|
||||
LedgerHandler(std::shared_ptr<BackendInterface> const& sharedPtrBackend) : sharedPtrBackend_(sharedPtrBackend)
|
||||
LedgerHandler(
|
||||
std::shared_ptr<BackendInterface> const& sharedPtrBackend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> amendmentCenter
|
||||
)
|
||||
: sharedPtrBackend_(sharedPtrBackend), amendmentCenter_(std::move(amendmentCenter))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include "rpc/handlers/Subscribe.hpp"
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -55,9 +56,10 @@ namespace rpc {
|
||||
|
||||
SubscribeHandler::SubscribeHandler(
|
||||
std::shared_ptr<BackendInterface> const& sharedPtrBackend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> const& subscriptions
|
||||
)
|
||||
: sharedPtrBackend_(sharedPtrBackend), subscriptions_(subscriptions)
|
||||
: sharedPtrBackend_(sharedPtrBackend), amendmentCenter_(amendmentCenter), subscriptions_(subscriptions)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -216,8 +218,9 @@ SubscribeHandler::subscribeToBooks(
|
||||
// https://github.com/XRPLF/xrpl-dev-portal/issues/1818
|
||||
auto const takerID = internalBook.taker ? accountFromStringStrict(*(internalBook.taker)) : beast::zero;
|
||||
|
||||
auto const orderBook =
|
||||
postProcessOrderBook(offers, book, *takerID, *sharedPtrBackend_, rng->maxSequence, yield);
|
||||
auto const orderBook = postProcessOrderBook(
|
||||
offers, book, *takerID, *sharedPtrBackend_, *amendmentCenter_, rng->maxSequence, yield
|
||||
);
|
||||
std::copy(orderBook.begin(), orderBook.end(), std::back_inserter(snapshots));
|
||||
};
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "feed/Types.hpp"
|
||||
@@ -53,6 +54,7 @@ namespace rpc {
|
||||
|
||||
class SubscribeHandler {
|
||||
std::shared_ptr<BackendInterface> sharedPtrBackend_;
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> amendmentCenter_;
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions_;
|
||||
|
||||
public:
|
||||
@@ -98,10 +100,12 @@ public:
|
||||
* @brief Construct a new BaseSubscribeHandler object
|
||||
*
|
||||
* @param sharedPtrBackend The backend to use
|
||||
* @param amendmentCenter The amendmentCenter to use
|
||||
* @param subscriptions The subscription manager to use
|
||||
*/
|
||||
SubscribeHandler(
|
||||
std::shared_ptr<BackendInterface> const& sharedPtrBackend,
|
||||
std::shared_ptr<data::AmendmentCenterInterface const> const& amendmentCenter,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> const& subscriptions
|
||||
);
|
||||
|
||||
|
||||
65
src/util/Assert.cpp
Normal file
65
src/util/Assert.cpp
Normal file
@@ -0,0 +1,65 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
|
||||
namespace util::impl {
|
||||
|
||||
OnAssert::ActionType OnAssert::action;
|
||||
|
||||
void
|
||||
OnAssert::call(std::string_view message)
|
||||
{
|
||||
if (not OnAssert::action) {
|
||||
resetAction();
|
||||
}
|
||||
OnAssert::action(message);
|
||||
}
|
||||
|
||||
void
|
||||
OnAssert::setAction(ActionType newAction)
|
||||
{
|
||||
OnAssert::action = std::move(newAction);
|
||||
}
|
||||
|
||||
void
|
||||
OnAssert::resetAction()
|
||||
{
|
||||
OnAssert::action = [](std::string_view m) { OnAssert::defaultAction(m); };
|
||||
}
|
||||
|
||||
void
|
||||
OnAssert::defaultAction(std::string_view message)
|
||||
{
|
||||
if (LogService::enabled()) {
|
||||
LOG(LogService::fatal()) << message;
|
||||
} else {
|
||||
std::cerr << message;
|
||||
}
|
||||
std::exit(EXIT_FAILURE); // std::abort does not flush gcovr output and causes uncovered lines
|
||||
}
|
||||
|
||||
} // namespace util::impl
|
||||
@@ -20,18 +20,43 @@
|
||||
#pragma once
|
||||
|
||||
#include "util/SourceLocation.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/log/core/core.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <string_view>
|
||||
#ifndef CLIO_WITHOUT_STACKTRACE
|
||||
#include <boost/stacktrace.hpp>
|
||||
#include <boost/stacktrace/stacktrace.hpp>
|
||||
#endif // CLIO_WITHOUT_STACKTRACE
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
|
||||
namespace util {
|
||||
namespace util::impl {
|
||||
|
||||
class OnAssert {
|
||||
public:
|
||||
using ActionType = std::function<void(std::string_view)>;
|
||||
|
||||
private:
|
||||
static ActionType action;
|
||||
|
||||
public:
|
||||
static void
|
||||
call(std::string_view message);
|
||||
|
||||
static void
|
||||
setAction(ActionType newAction);
|
||||
|
||||
static void
|
||||
resetAction();
|
||||
|
||||
private:
|
||||
static void
|
||||
defaultAction(std::string_view message);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Assert that a condition is true
|
||||
@@ -55,6 +80,7 @@ assertImpl(
|
||||
)
|
||||
{
|
||||
if (!condition) {
|
||||
#ifndef CLIO_WITHOUT_STACKTRACE
|
||||
auto const resultMessage = fmt::format(
|
||||
"Assertion '{}' failed at {}:{}:\n{}\nStacktrace:\n{}",
|
||||
expression,
|
||||
@@ -63,16 +89,21 @@ assertImpl(
|
||||
fmt::format(format, std::forward<Args>(args)...),
|
||||
boost::stacktrace::to_string(boost::stacktrace::stacktrace())
|
||||
);
|
||||
if (boost::log::core::get()->get_logging_enabled()) {
|
||||
LOG(LogService::fatal()) << resultMessage;
|
||||
} else {
|
||||
std::cerr << resultMessage;
|
||||
}
|
||||
std::exit(EXIT_FAILURE); // std::abort does not flush gcovr output and causes uncovered lines
|
||||
#else
|
||||
auto const resultMessage = fmt::format(
|
||||
"Assertion '{}' failed at {}:{}:\n{}",
|
||||
expression,
|
||||
location.file_name(),
|
||||
location.line(),
|
||||
fmt::format(format, std::forward<Args>(args)...)
|
||||
);
|
||||
#endif
|
||||
|
||||
OnAssert::call(resultMessage);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace util
|
||||
} // namespace util::impl
|
||||
|
||||
#define ASSERT(condition, ...) \
|
||||
util::assertImpl(CURRENT_SRC_LOCATION, #condition, static_cast<bool>(condition), __VA_ARGS__)
|
||||
util::impl::assertImpl(CURRENT_SRC_LOCATION, #condition, static_cast<bool>(condition), __VA_ARGS__)
|
||||
|
||||
@@ -2,7 +2,8 @@ add_library(clio_util)
|
||||
|
||||
target_sources(
|
||||
clio_util
|
||||
PRIVATE build/Build.cpp
|
||||
PRIVATE Assert.cpp
|
||||
build/Build.cpp
|
||||
config/Config.cpp
|
||||
CoroutineGroup.cpp
|
||||
log/Logger.cpp
|
||||
@@ -34,20 +35,17 @@ target_sources(
|
||||
newconfig/ConfigDefinition.cpp
|
||||
newconfig/ConfigFileJson.cpp
|
||||
newconfig/ObjectView.cpp
|
||||
newconfig/Types.cpp
|
||||
newconfig/ValueView.cpp
|
||||
)
|
||||
|
||||
# This must be above the target_link_libraries call otherwise backtrace doesn't work
|
||||
if ("${san}" STREQUAL "")
|
||||
target_link_libraries(clio_util PUBLIC Boost::stacktrace_backtrace dl libbacktrace::libbacktrace)
|
||||
endif ()
|
||||
|
||||
target_link_libraries(
|
||||
clio_util
|
||||
PUBLIC Boost::headers
|
||||
Boost::stacktrace_backtrace
|
||||
dl
|
||||
libbacktrace::libbacktrace
|
||||
fmt::fmt
|
||||
openssl::openssl
|
||||
xrpl::libxrpl
|
||||
Threads::Threads
|
||||
clio_options
|
||||
clio_util PUBLIC Boost::headers fmt::fmt openssl::openssl xrpl::libxrpl Threads::Threads clio_options
|
||||
)
|
||||
|
||||
# FIXME: needed on gcc-12, clang-16 and AppleClang for now (known boost 1.82 issue for some compilers)
|
||||
|
||||
@@ -56,13 +56,15 @@ CoroutineGroup::spawn(boost::asio::yield_context yield, std::function<void(boost
|
||||
}
|
||||
|
||||
std::optional<std::function<void()>>
|
||||
CoroutineGroup::registerForeign()
|
||||
CoroutineGroup::registerForeign(boost::asio::yield_context yield)
|
||||
{
|
||||
if (isFull())
|
||||
return std::nullopt;
|
||||
|
||||
++childrenCounter_;
|
||||
return [this]() { onCoroutineCompleted(); };
|
||||
// It is important to spawn onCoroutineCompleted() to the same coroutine as will be calling asyncWait().
|
||||
// timer_ here is not thread safe, so without spawn there could be a data race.
|
||||
return [this, yield]() { boost::asio::spawn(yield, [this](auto&&) { onCoroutineCompleted(); }); };
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -73,10 +73,11 @@ public:
|
||||
* @note A foreign coroutine is still counted as a child one, i.e. calling this method increases the size of the
|
||||
* group.
|
||||
*
|
||||
* @param yield The yield context owning the coroutine group.
|
||||
* @return A callback to call on foreign coroutine completes or std::nullopt if the group is already full.
|
||||
*/
|
||||
std::optional<std::function<void()>>
|
||||
registerForeign();
|
||||
registerForeign(boost::asio::yield_context yield);
|
||||
|
||||
/**
|
||||
* @brief Wait for all the coroutines in the group to finish
|
||||
|
||||
@@ -21,7 +21,9 @@
|
||||
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#ifndef CLIO_WITHOUT_STACKTRACE
|
||||
#include <boost/stacktrace/stacktrace.hpp>
|
||||
#endif // CLIO_WITHOUT_STACKTRACE
|
||||
|
||||
#include <cstdlib>
|
||||
#include <exception>
|
||||
@@ -33,11 +35,15 @@ namespace {
|
||||
void
|
||||
terminationHandler()
|
||||
{
|
||||
#ifndef CLIO_WITHOUT_STACKTRACE
|
||||
try {
|
||||
LOG(LogService::fatal()) << "Exit on terminate. Backtrace:\n" << boost::stacktrace::stacktrace();
|
||||
} catch (...) {
|
||||
LOG(LogService::fatal()) << "Exit on terminate. Can't get backtrace.";
|
||||
}
|
||||
#else
|
||||
LOG(LogService::fatal()) << "Exit on terminate. Stacktrace disabled.";
|
||||
#endif // CLIO_WITHOUT_STACKTRACE
|
||||
std::abort();
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/async/Concepts.hpp"
|
||||
#include "util/async/Error.hpp"
|
||||
#include "util/async/impl/ErasedOperation.hpp"
|
||||
|
||||
@@ -78,7 +77,7 @@ public:
|
||||
* Used to cancel the timer for scheduled operations and request the operation to be stopped as soon as possible
|
||||
*/
|
||||
void
|
||||
abort() noexcept
|
||||
abort()
|
||||
{
|
||||
operation_.abort();
|
||||
}
|
||||
@@ -107,6 +106,18 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Force-invoke the operation
|
||||
* @note The action is scheduled on the underlying context/strand
|
||||
* @warning The code of the user-provided action is expected to take care of thread-safety unless this operation is
|
||||
* scheduled through a strand
|
||||
*/
|
||||
void
|
||||
invoke()
|
||||
{
|
||||
operation_.invoke();
|
||||
}
|
||||
|
||||
private:
|
||||
impl::ErasedOperation operation_;
|
||||
};
|
||||
|
||||
@@ -131,14 +131,43 @@ public:
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Schedule a repeating operation on the execution context
|
||||
*
|
||||
* @param interval The interval at which the operation should be repeated
|
||||
* @param fn The block of code to execute; no args allowed and return type must be void
|
||||
* @return A repeating stoppable operation that can be used to wait for its cancellation
|
||||
*/
|
||||
[[nodiscard]] auto
|
||||
executeRepeatedly(SomeStdDuration auto interval, SomeHandlerWithoutStopToken auto&& fn)
|
||||
{
|
||||
using RetType = std::decay_t<decltype(fn())>;
|
||||
static_assert(not std::is_same_v<RetType, std::any>);
|
||||
|
||||
auto const millis = std::chrono::duration_cast<std::chrono::milliseconds>(interval);
|
||||
return AnyOperation<RetType>( //
|
||||
pimpl_->executeRepeatedly(
|
||||
millis,
|
||||
[fn = std::forward<decltype(fn)>(fn)] -> std::any {
|
||||
fn();
|
||||
return {};
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
private:
|
||||
struct Concept {
|
||||
virtual ~Concept() = default;
|
||||
|
||||
[[nodiscard]] virtual impl::ErasedOperation
|
||||
execute(std::function<std::any(AnyStopToken)>, std::optional<std::chrono::milliseconds> timeout = std::nullopt)
|
||||
const = 0;
|
||||
execute(
|
||||
std::function<std::any(AnyStopToken)>,
|
||||
std::optional<std::chrono::milliseconds> timeout = std::nullopt
|
||||
) = 0;
|
||||
[[nodiscard]] virtual impl::ErasedOperation execute(std::function<std::any()>) = 0;
|
||||
[[nodiscard]] virtual impl::ErasedOperation
|
||||
executeRepeatedly(std::chrono::milliseconds, std::function<std::any()>) = 0;
|
||||
};
|
||||
|
||||
template <typename StrandType>
|
||||
@@ -152,8 +181,7 @@ private:
|
||||
}
|
||||
|
||||
[[nodiscard]] impl::ErasedOperation
|
||||
execute(std::function<std::any(AnyStopToken)> fn, std::optional<std::chrono::milliseconds> timeout)
|
||||
const override
|
||||
execute(std::function<std::any(AnyStopToken)> fn, std::optional<std::chrono::milliseconds> timeout) override
|
||||
{
|
||||
return strand.execute(std::move(fn), timeout);
|
||||
}
|
||||
@@ -163,6 +191,12 @@ private:
|
||||
{
|
||||
return strand.execute(std::move(fn));
|
||||
}
|
||||
|
||||
impl::ErasedOperation
|
||||
executeRepeatedly(std::chrono::milliseconds interval, std::function<std::any()> fn) override
|
||||
{
|
||||
return strand.executeRepeatedly(interval, std::move(fn));
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
@@ -75,6 +75,14 @@ concept SomeOperationWithData = SomeOperation<T> and requires(T v) {
|
||||
{ v.get() };
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Specifies the interface for an operation that can force-invoked
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeForceInvocableOperation = SomeOperation<T> and requires(T v) {
|
||||
{ v.invoke() };
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Specifies the interface for an operation that can be stopped
|
||||
*/
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <concepts>
|
||||
#include <condition_variable>
|
||||
#include <expected>
|
||||
#include <functional>
|
||||
#include <future>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
@@ -227,6 +228,7 @@ using ScheduledOperation = impl::BasicScheduledOperation<CtxType, OpType>;
|
||||
template <typename CtxType>
|
||||
class RepeatingOperation : public util::MoveTracker {
|
||||
util::Repeat repeat_;
|
||||
std::function<void()> action_;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -237,10 +239,11 @@ public:
|
||||
* @param interval Time to wait before repeating the user-provided block of code
|
||||
* @param fn The function to execute repeatedly
|
||||
*/
|
||||
RepeatingOperation(auto& executor, std::chrono::steady_clock::duration interval, std::invocable auto&& fn)
|
||||
: repeat_(executor)
|
||||
template <std::invocable FnType>
|
||||
RepeatingOperation(auto& executor, std::chrono::steady_clock::duration interval, FnType&& fn)
|
||||
: repeat_(executor), action_([fn = std::forward<FnType>(fn), &executor] { boost::asio::post(executor, fn); })
|
||||
{
|
||||
repeat_.start(interval, std::forward<decltype(fn)>(fn));
|
||||
repeat_.start(interval, action_);
|
||||
}
|
||||
|
||||
~RepeatingOperation() override
|
||||
@@ -266,6 +269,18 @@ public:
|
||||
{
|
||||
repeat_.stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Force-invoke the operation
|
||||
* @note The action is scheduled on the underlying context/strand
|
||||
* @warning The code of the user-provided action is expected to take care of thread-safety unless this operation is
|
||||
* scheduled through a strand
|
||||
*/
|
||||
void
|
||||
invoke()
|
||||
{
|
||||
action_();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace util::async
|
||||
|
||||
@@ -61,8 +61,8 @@ struct AsioPoolStrandContext {
|
||||
using Executor = boost::asio::strand<boost::asio::thread_pool::executor_type>;
|
||||
using Timer = SteadyTimer<Executor>;
|
||||
|
||||
Executor const&
|
||||
getExecutor() const
|
||||
Executor&
|
||||
getExecutor()
|
||||
{
|
||||
return executor;
|
||||
}
|
||||
@@ -272,6 +272,7 @@ public:
|
||||
|
||||
/**
|
||||
* @brief Schedule a repeating operation on the execution context
|
||||
* @warning The code of the user-provided action is expected to be thread-safe
|
||||
*
|
||||
* @param interval The interval at which the operation should be repeated
|
||||
* @param fn The block of code to execute; no args allowed and return type must be void
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "util/async/Concepts.hpp"
|
||||
#include "util/async/Operation.hpp"
|
||||
#include "util/async/context/impl/Cancellation.hpp"
|
||||
#include "util/async/context/impl/Execution.hpp"
|
||||
#include "util/async/context/impl/Timer.hpp"
|
||||
@@ -52,6 +53,7 @@ public:
|
||||
using StopToken = typename StopSourceType::Token;
|
||||
using Timer =
|
||||
typename ParentContextType::ContextHolderType::Timer; // timers are associated with the parent context
|
||||
using RepeatedOperation = RepeatingOperation<BasicStrand>;
|
||||
|
||||
BasicStrand(ParentContextType& parent, auto&& strand)
|
||||
: parentContext_{std::ref(parent)}, context_{std::forward<decltype(strand)>(strand)}
|
||||
@@ -64,8 +66,10 @@ public:
|
||||
BasicStrand(BasicStrand const&) = delete;
|
||||
|
||||
[[nodiscard]] auto
|
||||
execute(SomeHandlerWith<StopToken> auto&& fn, std::optional<std::chrono::milliseconds> timeout = std::nullopt) const
|
||||
noexcept(kIS_NOEXCEPT)
|
||||
execute(
|
||||
SomeHandlerWith<StopToken> auto&& fn,
|
||||
std::optional<std::chrono::milliseconds> timeout = std::nullopt
|
||||
) noexcept(kIS_NOEXCEPT)
|
||||
{
|
||||
return DispatcherType::dispatch(
|
||||
context_,
|
||||
@@ -89,7 +93,7 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] auto
|
||||
execute(SomeHandlerWith<StopToken> auto&& fn, SomeStdDuration auto timeout) const noexcept(kIS_NOEXCEPT)
|
||||
execute(SomeHandlerWith<StopToken> auto&& fn, SomeStdDuration auto timeout) noexcept(kIS_NOEXCEPT)
|
||||
{
|
||||
return execute(
|
||||
std::forward<decltype(fn)>(fn),
|
||||
@@ -98,7 +102,7 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] auto
|
||||
execute(SomeHandlerWithoutStopToken auto&& fn) const noexcept(kIS_NOEXCEPT)
|
||||
execute(SomeHandlerWithoutStopToken auto&& fn) noexcept(kIS_NOEXCEPT)
|
||||
{
|
||||
return DispatcherType::dispatch(
|
||||
context_,
|
||||
@@ -114,6 +118,16 @@ public:
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
[[nodiscard]] auto
|
||||
executeRepeatedly(SomeStdDuration auto interval, SomeHandlerWithoutStopToken auto&& fn) noexcept(kIS_NOEXCEPT)
|
||||
{
|
||||
if constexpr (not std::is_same_v<decltype(TimerContextProvider::getContext(*this)), decltype(*this)>) {
|
||||
return TimerContextProvider::getContext(*this).executeRepeatedly(interval, std::forward<decltype(fn)>(fn));
|
||||
} else {
|
||||
return RepeatedOperation(impl::extractAssociatedExecutor(*this), interval, std::forward<decltype(fn)>(fn));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace util::async::impl
|
||||
|
||||
@@ -71,6 +71,12 @@ public:
|
||||
pimpl_->abort();
|
||||
}
|
||||
|
||||
void
|
||||
invoke()
|
||||
{
|
||||
pimpl_->invoke();
|
||||
}
|
||||
|
||||
private:
|
||||
struct Concept {
|
||||
virtual ~Concept() = default;
|
||||
@@ -81,6 +87,8 @@ private:
|
||||
get() = 0;
|
||||
virtual void
|
||||
abort() = 0;
|
||||
virtual void
|
||||
invoke() = 0;
|
||||
};
|
||||
|
||||
template <SomeOperation OpType>
|
||||
@@ -133,6 +141,16 @@ private:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
invoke() override
|
||||
{
|
||||
if constexpr (not SomeForceInvocableOperation<OpType>) {
|
||||
ASSERT(false, "Called invoke() on an operation that can't be force-invoked");
|
||||
} else {
|
||||
operation.invoke();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
@@ -28,8 +28,6 @@
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/date_time/posix_time/posix_time_duration.hpp>
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/log/attributes/attribute_value_set.hpp>
|
||||
#include <boost/log/core/core.hpp>
|
||||
#include <boost/log/expressions/filter.hpp>
|
||||
@@ -48,18 +46,20 @@
|
||||
#include <boost/log/utility/setup/console.hpp>
|
||||
#include <boost/log/utility/setup/file.hpp>
|
||||
#include <boost/log/utility/setup/formatter_parser.hpp>
|
||||
#include <fmt/core.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <ios>
|
||||
#include <iostream>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <system_error>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
|
||||
@@ -111,7 +111,7 @@ getSeverityLevel(std::string_view logLevel)
|
||||
std::unreachable();
|
||||
}
|
||||
|
||||
void
|
||||
std::expected<void, std::string>
|
||||
LogService::init(config::ClioConfigDefinition const& config)
|
||||
{
|
||||
namespace keywords = boost::log::keywords;
|
||||
@@ -132,9 +132,15 @@ LogService::init(config::ClioConfigDefinition const& config)
|
||||
|
||||
auto const logDir = config.maybeValue<std::string>("log_directory");
|
||||
if (logDir) {
|
||||
boost::filesystem::path dirPath{logDir.value()};
|
||||
if (!boost::filesystem::exists(dirPath))
|
||||
boost::filesystem::create_directories(dirPath);
|
||||
std::filesystem::path dirPath{logDir.value()};
|
||||
if (not std::filesystem::exists(dirPath)) {
|
||||
if (std::error_code error; not std::filesystem::create_directories(dirPath, error)) {
|
||||
return std::unexpected{
|
||||
fmt::format("Couldn't create logs directory '{}': {}", dirPath.string(), error.message())
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
auto const rotationPeriod = config.get<uint32_t>("log_rotation_hour_interval");
|
||||
|
||||
// the below are taken from user in MB, but boost::log::add_file_log needs it to be in bytes
|
||||
@@ -169,8 +175,9 @@ LogService::init(config::ClioConfigDefinition const& config)
|
||||
for (auto it = overrides.begin<util::config::ObjectView>(); it != overrides.end<util::config::ObjectView>(); ++it) {
|
||||
auto const& channelConfig = *it;
|
||||
auto const name = channelConfig.get<std::string>("channel");
|
||||
if (std::count(std::begin(Logger::kCHANNELS), std::end(Logger::kCHANNELS), name) == 0)
|
||||
throw std::runtime_error("Can't override settings for log channel " + name + ": invalid channel");
|
||||
if (std::ranges::count(Logger::kCHANNELS, name) == 0) { // TODO: use std::ranges::contains when available
|
||||
return std::unexpected{fmt::format("Can't override settings for log channel {}: invalid channel", name)};
|
||||
}
|
||||
|
||||
minSeverity[name] = getSeverityLevel(channelConfig.get<std::string>("log_level"));
|
||||
}
|
||||
@@ -189,6 +196,13 @@ LogService::init(config::ClioConfigDefinition const& config)
|
||||
filter = boost::log::filter{std::move(logFilter)};
|
||||
boost::log::core::get()->set_filter(filter);
|
||||
LOG(LogService::info()) << "Default log level = " << defaultSeverity;
|
||||
return {};
|
||||
}
|
||||
|
||||
bool
|
||||
LogService::enabled()
|
||||
{
|
||||
return boost::log::core::get()->get_logging_enabled();
|
||||
}
|
||||
|
||||
Logger::Pump
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <expected>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
@@ -278,8 +279,9 @@ public:
|
||||
* @brief Global log core initialization from a @ref Config
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @return Void on success, error message on failure
|
||||
*/
|
||||
static void
|
||||
[[nodiscard]] static std::expected<void, std::string>
|
||||
init(config::ClioConfigDefinition const& config);
|
||||
|
||||
/**
|
||||
@@ -365,6 +367,14 @@ public:
|
||||
{
|
||||
return alertLog.warn(loc);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Whether the LogService is enabled or not
|
||||
*
|
||||
* @return true if the LogService is enabled, false otherwise
|
||||
*/
|
||||
[[nodiscard]] static bool
|
||||
enabled();
|
||||
};
|
||||
|
||||
}; // namespace util
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user