mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 11:55:51 +00:00
Compare commits
185 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2312c25d70 | ||
|
|
8d76a05dfd | ||
|
|
db4fc6a07d | ||
|
|
565aadd3b2 | ||
|
|
41967c88bd | ||
|
|
317b3e234b | ||
|
|
7eff1e6a9e | ||
|
|
099052ad3f | ||
|
|
c69df885d3 | ||
|
|
cb5671b917 | ||
|
|
58ec0ed8d8 | ||
|
|
1dff0421f2 | ||
|
|
208b3e8f8a | ||
|
|
8859250d53 | ||
|
|
409dcd106c | ||
|
|
12a3feddb7 | ||
|
|
9a392ca072 | ||
|
|
f5d494be23 | ||
|
|
e029a9b3df | ||
|
|
702ee0324e | ||
|
|
df3f1865ae | ||
|
|
8d875702eb | ||
|
|
67e451ec23 | ||
|
|
92789d5a91 | ||
|
|
5e7ff66ba6 | ||
|
|
1b1a5e4068 | ||
|
|
73477fb9d4 | ||
|
|
8ac1ff7699 | ||
|
|
1bba437085 | ||
|
|
41fc67748a | ||
|
|
26842374de | ||
|
|
a46d700390 | ||
|
|
a34d565ea4 | ||
|
|
c57fe1e6e4 | ||
|
|
8a08c5e6ce | ||
|
|
5d2694d36c | ||
|
|
98ff72be66 | ||
|
|
915a8beb40 | ||
|
|
f7db030ad7 | ||
|
|
86e2cd1cc4 | ||
|
|
f0613c945f | ||
|
|
d11e7bc60e | ||
|
|
b909b8879d | ||
|
|
918a92eeee | ||
|
|
c9e8330e0a | ||
|
|
f577139f70 | ||
|
|
491cd58f93 | ||
|
|
25296f8ffa | ||
|
|
4b178805de | ||
|
|
fcebd715ba | ||
|
|
531e1dad6d | ||
|
|
3c008b6bb4 | ||
|
|
624f7ff6d5 | ||
|
|
e503dffc9a | ||
|
|
cd1aa8fb70 | ||
|
|
b5fe22da18 | ||
|
|
cd6289b79a | ||
|
|
f5e6c9576e | ||
|
|
427ba47716 | ||
|
|
7b043025e8 | ||
|
|
67c989081d | ||
|
|
2fd16cd582 | ||
|
|
89af8fe500 | ||
|
|
1753c95910 | ||
|
|
e7702e9c11 | ||
|
|
e549657766 | ||
|
|
7c2742036b | ||
|
|
73f375f20d | ||
|
|
3e200d8b9d | ||
|
|
81fe617816 | ||
|
|
75354fbecd | ||
|
|
540e938223 | ||
|
|
6ef6ca9e65 | ||
|
|
35b9a066e3 | ||
|
|
957028699b | ||
|
|
12e6fcc97e | ||
|
|
f9d9879513 | ||
|
|
278f7b1b58 | ||
|
|
fbedeff697 | ||
|
|
f64d8ecb77 | ||
|
|
3e38ea9b48 | ||
|
|
7834b63b55 | ||
|
|
2cf849dd12 | ||
|
|
c47b96bc68 | ||
|
|
9659d98140 | ||
|
|
f1698c55ff | ||
|
|
91c00e781a | ||
|
|
c0d52723c9 | ||
|
|
590c07ad84 | ||
|
|
48c8d85d0c | ||
|
|
36a9f40a60 | ||
|
|
1460d590f1 | ||
|
|
698718a02a | ||
|
|
0a9dbe1cc1 | ||
|
|
cce7aa2893 | ||
|
|
820b32c6d7 | ||
|
|
efe5d08205 | ||
|
|
285d4e6e9b | ||
|
|
f2a89b095d | ||
|
|
7d4e3619b0 | ||
|
|
c4b87d2a0a | ||
|
|
2d0253bc4a | ||
|
|
017cf2adc9 | ||
|
|
64b50b419f | ||
|
|
fc3e60f17f | ||
|
|
8dc7f16ef1 | ||
|
|
15a441b084 | ||
|
|
3c4903a339 | ||
|
|
b53cfd0ec1 | ||
|
|
c41399ef8e | ||
|
|
7bef13f913 | ||
|
|
4ff2953257 | ||
|
|
475e309f25 | ||
|
|
a7074dbf0f | ||
|
|
66691c45a0 | ||
|
|
fe4f95dabd | ||
|
|
f62fadc9f9 | ||
|
|
afb0c7fee2 | ||
|
|
fd73b90416 | ||
|
|
541bf4395f | ||
|
|
63c80f2b7d | ||
|
|
385d99c56e | ||
|
|
b5da61931f | ||
|
|
6af86367fd | ||
|
|
9dc322fc7b | ||
|
|
c77154a5e6 | ||
|
|
fc3ba07f2e | ||
|
|
229ba69b5d | ||
|
|
524d096777 | ||
|
|
815dfd672e | ||
|
|
a4b3877cb2 | ||
|
|
6bb5804bb8 | ||
|
|
67d99457f2 | ||
|
|
0e25c0cabc | ||
|
|
6b61984e0e | ||
|
|
891fd1e7bf | ||
|
|
de6c17797f | ||
|
|
0add6c6d90 | ||
|
|
e6cdb141c5 | ||
|
|
c435466fb0 | ||
|
|
f8df654d8e | ||
|
|
f3e754398e | ||
|
|
d04331d244 | ||
|
|
1c82d379d9 | ||
|
|
f083c82557 | ||
|
|
b6d5ec5cf7 | ||
|
|
c62e9d56b8 | ||
|
|
2a5d73730f | ||
|
|
cffda52ba6 | ||
|
|
cf081e7e25 | ||
|
|
f351a4cc79 | ||
|
|
d60654c3dc | ||
|
|
9b0b4f5ad7 | ||
|
|
a21011799b | ||
|
|
2f40cde7f5 | ||
|
|
02a75356fb | ||
|
|
b761fffa2d | ||
|
|
c3be155f8d | ||
|
|
11192c362e | ||
|
|
2c18fd5465 | ||
|
|
da76907279 | ||
|
|
1b42466a0d | ||
|
|
87f1c06b5b | ||
|
|
d0c6b65870 | ||
|
|
3343c1fa6b | ||
|
|
c8e3da6470 | ||
|
|
c409f8b2d6 | ||
|
|
13a9aef579 | ||
|
|
af4fde9a3a | ||
|
|
0282504f18 | ||
|
|
bea905adcd | ||
|
|
7a9a1656f9 | ||
|
|
0ede0ed351 | ||
|
|
ee6018186e | ||
|
|
293af3f3b0 | ||
|
|
9600637edd | ||
|
|
7d0753f1da | ||
|
|
b04e090cbb | ||
|
|
7088ebad97 | ||
|
|
1d33b8e88a | ||
|
|
44c07e7332 | ||
|
|
dbb8d9eedd | ||
|
|
bc9ca41bc1 | ||
|
|
e4736bf9d8 | ||
|
|
7360c4fd0e |
45
.clang-tidy
45
.clang-tidy
@@ -8,6 +8,7 @@ Checks: '-*,
|
||||
bugprone-chained-comparison,
|
||||
bugprone-compare-pointer-to-member-virtual-function,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-crtp-constructor-accessibility,
|
||||
bugprone-dangling-handle,
|
||||
bugprone-dynamic-static-initializers,
|
||||
bugprone-empty-catch,
|
||||
@@ -33,9 +34,11 @@ Checks: '-*,
|
||||
bugprone-non-zero-enum-to-bool-conversion,
|
||||
bugprone-optional-value-conversion,
|
||||
bugprone-parent-virtual-call,
|
||||
bugprone-pointer-arithmetic-on-polymorphic-object,
|
||||
bugprone-posix-return,
|
||||
bugprone-redundant-branch-condition,
|
||||
bugprone-reserved-identifier,
|
||||
bugprone-return-const-ref-from-parameter,
|
||||
bugprone-shared-ptr-array-mismatch,
|
||||
bugprone-signal-handler,
|
||||
bugprone-signed-char-misuse,
|
||||
@@ -55,6 +58,7 @@ Checks: '-*,
|
||||
bugprone-suspicious-realloc-usage,
|
||||
bugprone-suspicious-semicolon,
|
||||
bugprone-suspicious-string-compare,
|
||||
bugprone-suspicious-stringview-data-usage,
|
||||
bugprone-swapped-arguments,
|
||||
bugprone-switch-missing-default-case,
|
||||
bugprone-terminating-continue,
|
||||
@@ -97,10 +101,12 @@ Checks: '-*,
|
||||
modernize-make-unique,
|
||||
modernize-pass-by-value,
|
||||
modernize-type-traits,
|
||||
modernize-use-designated-initializers,
|
||||
modernize-use-emplace,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-override,
|
||||
modernize-use-ranges,
|
||||
modernize-use-starts-ends-with,
|
||||
modernize-use-std-numbers,
|
||||
modernize-use-using,
|
||||
@@ -121,9 +127,12 @@ Checks: '-*,
|
||||
readability-convert-member-functions-to-static,
|
||||
readability-duplicate-include,
|
||||
readability-else-after-return,
|
||||
readability-enum-initial-value,
|
||||
readability-implicit-bool-conversion,
|
||||
readability-inconsistent-declaration-parameter-name,
|
||||
readability-identifier-naming,
|
||||
readability-make-member-function-const,
|
||||
readability-math-missing-parentheses,
|
||||
readability-misleading-indentation,
|
||||
readability-non-const-parameter,
|
||||
readability-redundant-casting,
|
||||
@@ -135,11 +144,45 @@ Checks: '-*,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-suspicious-call-argument
|
||||
readability-suspicious-call-argument,
|
||||
readability-use-std-min-max
|
||||
'
|
||||
|
||||
CheckOptions:
|
||||
readability-braces-around-statements.ShortStatementLines: 2
|
||||
readability-identifier-naming.MacroDefinitionCase: UPPER_CASE
|
||||
readability-identifier-naming.ClassCase: CamelCase
|
||||
readability-identifier-naming.StructCase: CamelCase
|
||||
readability-identifier-naming.UnionCase: CamelCase
|
||||
readability-identifier-naming.EnumCase: CamelCase
|
||||
readability-identifier-naming.EnumConstantCase: CamelCase
|
||||
readability-identifier-naming.ScopedEnumConstantCase: CamelCase
|
||||
readability-identifier-naming.GlobalConstantCase: UPPER_CASE
|
||||
readability-identifier-naming.GlobalConstantPrefix: 'k'
|
||||
readability-identifier-naming.GlobalVariableCase: CamelCase
|
||||
readability-identifier-naming.GlobalVariablePrefix: 'g'
|
||||
readability-identifier-naming.ConstexprFunctionCase: camelBack
|
||||
readability-identifier-naming.ConstexprMethodCase: camelBack
|
||||
readability-identifier-naming.ClassMethodCase: camelBack
|
||||
readability-identifier-naming.ClassMemberCase: camelBack
|
||||
readability-identifier-naming.ClassConstantCase: UPPER_CASE
|
||||
readability-identifier-naming.ClassConstantPrefix: 'k'
|
||||
readability-identifier-naming.StaticConstantCase: UPPER_CASE
|
||||
readability-identifier-naming.StaticConstantPrefix: 'k'
|
||||
readability-identifier-naming.StaticVariableCase: UPPER_CASE
|
||||
readability-identifier-naming.StaticVariablePrefix: 'k'
|
||||
readability-identifier-naming.ConstexprVariableCase: UPPER_CASE
|
||||
readability-identifier-naming.ConstexprVariablePrefix: 'k'
|
||||
readability-identifier-naming.LocalConstantCase: camelBack
|
||||
readability-identifier-naming.LocalVariableCase: camelBack
|
||||
readability-identifier-naming.TemplateParameterCase: CamelCase
|
||||
readability-identifier-naming.ParameterCase: camelBack
|
||||
readability-identifier-naming.FunctionCase: camelBack
|
||||
readability-identifier-naming.MemberCase: camelBack
|
||||
readability-identifier-naming.PrivateMemberSuffix: _
|
||||
readability-identifier-naming.ProtectedMemberSuffix: _
|
||||
readability-identifier-naming.PublicMemberSuffix: ''
|
||||
readability-identifier-naming.FunctionIgnoredRegexp: '.*tag_invoke.*'
|
||||
bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true
|
||||
bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc
|
||||
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*;.*(expected|unexpected).*;.*ranges_lower_bound\.h;time.h;stdlib.h'
|
||||
|
||||
@@ -26,12 +26,12 @@ sources="src tests"
|
||||
formatter="clang-format -i"
|
||||
version=$($formatter --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "18.0.0" > "$version" ]]; then
|
||||
if [[ "19.0.0" > "$version" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 18 of `which clang-format` is required.
|
||||
A minimum of version 19 of `which clang-format` is required.
|
||||
Your version is $version.
|
||||
Please fix paths and run again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
@@ -42,7 +42,7 @@ verify_tag_signed() {
|
||||
while read local_ref local_oid remote_ref remote_oid; do
|
||||
# Check some things if we're pushing a branch called "release/"
|
||||
if echo "$remote_ref" | grep ^refs\/heads\/release\/ &> /dev/null ; then
|
||||
version=$(echo $remote_ref | awk -F/ '{print $NF}')
|
||||
version=$(git tag --points-at HEAD)
|
||||
echo "Looks like you're trying to push a $version release..."
|
||||
echo "Making sure you've signed and tagged it."
|
||||
if verify_commit_signed && verify_tag && verify_tag_signed ; then
|
||||
|
||||
6
.github/actions/build_clio/action.yml
vendored
6
.github/actions/build_clio/action.yml
vendored
@@ -4,12 +4,18 @@ inputs:
|
||||
target:
|
||||
description: Build target name
|
||||
default: all
|
||||
substract_threads:
|
||||
description: An option for the action get_number_of_threads. See get_number_of_threads
|
||||
required: true
|
||||
default: '0'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
with:
|
||||
substract_threads: ${{ inputs.substract_threads }}
|
||||
|
||||
- name: Build Clio
|
||||
shell: bash
|
||||
|
||||
20
.github/actions/generate/action.yml
vendored
20
.github/actions/generate/action.yml
vendored
@@ -12,6 +12,10 @@ inputs:
|
||||
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
|
||||
required: true
|
||||
default: 'Release'
|
||||
build_integration_tests:
|
||||
description: Whether to build integration tests
|
||||
required: true
|
||||
default: 'true'
|
||||
code_coverage:
|
||||
description: Whether conan's coverage option should be on or not
|
||||
required: true
|
||||
@@ -20,6 +24,10 @@ inputs:
|
||||
description: Whether Clio is to be statically linked
|
||||
required: true
|
||||
default: 'false'
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
default: 'false' # false, tsan, asan or ubsan
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -33,14 +41,20 @@ runs:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
STATIC_OPTION: "${{ inputs.static == 'true' && 'True' || 'False' }}"
|
||||
INTEGRATION_TESTS_OPTION: "${{ inputs.build_integration_tests == 'true' && 'True' || 'False' }}"
|
||||
run: |
|
||||
cd build
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:static="${STATIC_OPTION}" -o clio:tests=True -o clio:integration_tests=True -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:static="${STATIC_OPTION}" -o clio:tests=True -o clio:integration_tests="${INTEGRATION_TESTS_OPTION}" -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
SANITIZER_OPTION: |
|
||||
${{ inputs.sanitizer == 'tsan' && '-Dsan=thread' ||
|
||||
inputs.sanitizer == 'ubsan' && '-Dsan=undefined' ||
|
||||
inputs.sanitizer == 'asan' && '-Dsan=address' ||
|
||||
'' }}
|
||||
run: |
|
||||
cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} ${{ inputs.extra_cmake_args }} .. -G Ninja
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" ${SANITIZER_OPTION} .. -G Ninja
|
||||
|
||||
14
.github/actions/get_number_of_threads/action.yml
vendored
14
.github/actions/get_number_of_threads/action.yml
vendored
@@ -1,5 +1,10 @@
|
||||
name: Get number of threads
|
||||
description: Determines number of threads to use on macOS and Linux
|
||||
inputs:
|
||||
substract_threads:
|
||||
description: How many threads to substract from the calculated number
|
||||
required: true
|
||||
default: '0'
|
||||
outputs:
|
||||
threads_number:
|
||||
description: Number of threads to use
|
||||
@@ -19,8 +24,11 @@ runs:
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
- name: Shift and export number of threads
|
||||
id: number_of_threads_export
|
||||
shell: bash
|
||||
run: |
|
||||
echo "num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}" >> $GITHUB_OUTPUT
|
||||
num_of_threads=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
|
||||
shift_by=${{ inputs.substract_threads }}
|
||||
shifted=$((num_of_threads - shift_by))
|
||||
echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT
|
||||
|
||||
28
.github/actions/prepare_runner/action.yml
vendored
28
.github/actions/prepare_runner/action.yml
vendored
@@ -11,9 +11,35 @@ runs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq gh conan@1
|
||||
brew install llvm@14 pkg-config ninja bison ccache jq gh conan@1 ca-certificates
|
||||
echo "/opt/homebrew/opt/conan@1/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install CMake 3.31.6 on mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
# Uninstall any existing cmake
|
||||
brew uninstall cmake --ignore-dependencies || true
|
||||
|
||||
# Download specific cmake formula
|
||||
FORMULA_URL="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e46db74e74a8c1650b38b1da222284ce1ec5ce/Formula/c/cmake.rb"
|
||||
FORMULA_EXPECTED_SHA256="c7ec95d86f0657638835441871e77541165e0a2581b53b3dd657cf13ad4228d4"
|
||||
|
||||
mkdir -p /tmp/homebrew-formula
|
||||
curl -s -L $FORMULA_URL -o /tmp/homebrew-formula/cmake.rb
|
||||
|
||||
# Verify the downloaded formula
|
||||
ACTUAL_SHA256=$(shasum -a 256 /tmp/homebrew-formula/cmake.rb | cut -d ' ' -f 1)
|
||||
if [ "$ACTUAL_SHA256" != "$FORMULA_EXPECTED_SHA256" ]; then
|
||||
echo "Error: Formula checksum mismatch"
|
||||
echo "Expected: $FORMULA_EXPECTED_SHA256"
|
||||
echo "Actual: $ACTUAL_SHA256"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install cmake from the specific formula with force flag
|
||||
brew install --force /tmp/homebrew-formula/cmake.rb
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
|
||||
4
.github/actions/setup_conan/action.yml
vendored
4
.github/actions/setup_conan/action.yml
vendored
@@ -15,10 +15,10 @@ runs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_PROFILE: apple_clang_15
|
||||
CONAN_PROFILE: apple_clang_16
|
||||
id: conan_setup_mac
|
||||
run: |
|
||||
echo "Creating $CONAN_PROFILE conan profile";
|
||||
echo "Creating $CONAN_PROFILE conan profile"
|
||||
conan profile new $CONAN_PROFILE --detect --force
|
||||
conan profile update settings.compiler.libcxx=libc++ $CONAN_PROFILE
|
||||
conan profile update settings.compiler.cppstd=20 $CONAN_PROFILE
|
||||
|
||||
45
.github/scripts/execute-tests-under-sanitizer
vendored
Executable file
45
.github/scripts/execute-tests-under-sanitizer
vendored
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# Note: This script is intended to be run from the root of the repository.
|
||||
#
|
||||
# This script runs each unit-test separately and generates reports from the currently active sanitizer.
|
||||
# Output is saved in ./.sanitizer-report in the root of the repository
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
Path to clio_tests should be passed as first argument to the script.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TEST_BINARY=$1
|
||||
|
||||
if [[ ! -f "$TEST_BINARY" ]]; then
|
||||
echo "Test binary not found: $TEST_BINARY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {suite=$1}')
|
||||
|
||||
OUTPUT_DIR="./.sanitizer-report"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
for TEST in $TESTS; do
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}"
|
||||
export TSAN_OPTIONS="log_path=\"$OUTPUT_FILE\" die_after_fork=0"
|
||||
export ASAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export UBSAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export MallocNanoZone='0' # for MacOSX
|
||||
$TEST_BINARY --gtest_filter="$TEST" > /dev/null 2>&1
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "'$TEST' failed a sanitizer check."
|
||||
fi
|
||||
done
|
||||
200
.github/workflows/build.yml
vendored
200
.github/workflows/build.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
jobs:
|
||||
check_format:
|
||||
name: Check format
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
check_docs:
|
||||
name: Check documentation
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
@@ -47,140 +47,44 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: true
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
conan_profile: clang
|
||||
build_type: Release
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
conan_profile: clang
|
||||
build_type: Debug
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: macos14
|
||||
- os: macos15
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
static: false
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: ${{ matrix.conan_profile }}
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
static: ${{ matrix.static }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Show ccache's statistics
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
run: strip build/clio_tests && strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_*tests
|
||||
|
||||
- name: Upload test data
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_test_data_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/tests/unit/test_data
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
- name: Run code coverage
|
||||
if: ${{ matrix.code_coverage }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
upload_coverage_report:
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
conan_profile: ${{ matrix.conan_profile }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
static: ${{ matrix.static }}
|
||||
unit_tests: true
|
||||
integration_tests: true
|
||||
clio_server: true
|
||||
|
||||
test:
|
||||
name: Run Tests
|
||||
@@ -190,24 +94,24 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
build_type: Debug
|
||||
- os: macos14
|
||||
conan_profile: apple_clang_15
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: macos15
|
||||
conan_profile: apple_clang_16
|
||||
build_type: Release
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
@@ -219,12 +123,48 @@ jobs:
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_test_data_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
path: tests/unit/test_data
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
|
||||
check_config:
|
||||
name: Check Config Description
|
||||
needs: build
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_server_Linux_Release_gcc
|
||||
- name: Compare Config Description
|
||||
shell: bash
|
||||
run: |
|
||||
repoConfigFile=docs/config-description.md
|
||||
if ! [ -f ${repoConfigFile} ]; then
|
||||
echo "Config Description markdown file is missing in docs folder"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chmod +x ./clio_server
|
||||
configDescriptionFile=config_description_new.md
|
||||
./clio_server -d ${configDescriptionFile}
|
||||
|
||||
configDescriptionHash=$(sha256sum ${configDescriptionFile} | cut -d' ' -f1)
|
||||
repoConfigHash=$(sha256sum ${repoConfigFile} | cut -d' ' -f1)
|
||||
|
||||
if [ ${configDescriptionHash} != ${repoConfigHash} ]; then
|
||||
echo "Markdown file is not up to date"
|
||||
diff -u "${repoConfigFile}" "${configDescriptionFile}"
|
||||
rm -f ${configDescriptionFile}
|
||||
exit 1
|
||||
fi
|
||||
rm -f ${configDescriptionFile}
|
||||
exit 0
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ on:
|
||||
jobs:
|
||||
build_and_publish_image:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
192
.github/workflows/build_impl.yml
vendored
Normal file
192
.github/workflows/build_impl.yml
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
name: Reusable build
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runs_on:
|
||||
description: Runner to run the job on
|
||||
required: true
|
||||
type: string
|
||||
default: heavy
|
||||
|
||||
container:
|
||||
description: "The container object as a JSON string (leave empty to run natively)"
|
||||
required: true
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
conan_profile:
|
||||
description: Conan profile to use
|
||||
required: true
|
||||
type: string
|
||||
|
||||
build_type:
|
||||
description: Build type
|
||||
required: true
|
||||
type: string
|
||||
|
||||
disable_cache:
|
||||
description: Whether ccache and conan cache should be disabled
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
code_coverage:
|
||||
description: Whether to enable code coverage
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
static:
|
||||
description: Whether to build static binaries
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
unit_tests:
|
||||
description: Whether to run unit tests
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
integration_tests:
|
||||
description: Whether to run integration tests
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
clio_server:
|
||||
description: Whether to build clio_server
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
target:
|
||||
description: Build target name
|
||||
required: false
|
||||
type: string
|
||||
default: all
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: false
|
||||
type: string
|
||||
default: 'false'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: ${{ inputs.disable_cache }}
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Restore cache
|
||||
if: ${{ !inputs.disable_cache }}
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ !inputs.disable_cache && steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
static: ${{ inputs.static }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
target: ${{ inputs.target }}
|
||||
|
||||
- name: Show ccache's statistics
|
||||
if: ${{ !inputs.disable_cache }}
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip unit_tests
|
||||
if: ${{ inputs.unit_tests && !inputs.code_coverage && inputs.sanitizer == 'false' }}
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Strip integration_tests
|
||||
if: ${{ inputs.integration_tests && !inputs.code_coverage }}
|
||||
run: strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
if: ${{ inputs.clio_server }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ inputs.unit_tests && !inputs.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Upload clio_integration_tests
|
||||
if: ${{ inputs.integration_tests && !inputs.code_coverage }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_integration_tests
|
||||
|
||||
- name: Save cache
|
||||
if: ${{ !inputs.disable_cache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
- name: Run code coverage
|
||||
if: ${{ inputs.code_coverage }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
upload_coverage_report:
|
||||
if: ${{ inputs.code_coverage }}
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
2
.github/workflows/check_libxrpl.yml
vendored
2
.github/workflows/check_libxrpl.yml
vendored
@@ -71,7 +71,7 @@ jobs:
|
||||
name: Create an issue on failure
|
||||
needs: [build, run_tests]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
4
.github/workflows/check_pr_title.yml
vendored
4
.github/workflows/check_pr_title.yml
vendored
@@ -6,11 +6,11 @@ on:
|
||||
|
||||
jobs:
|
||||
check_title:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
# permissions:
|
||||
# pull-requests: write
|
||||
steps:
|
||||
- uses: ytanikin/PRConventionalCommits@1.2.0
|
||||
- uses: ytanikin/PRConventionalCommits@1.3.0
|
||||
with:
|
||||
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
|
||||
add_label: false
|
||||
|
||||
8
.github/workflows/clang-tidy.yml
vendored
8
.github/workflows/clang-tidy.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Clang-tidy check
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 6 * * 1-5"
|
||||
- cron: "0 9 * * 1-5"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
@@ -12,7 +12,7 @@ on:
|
||||
|
||||
jobs:
|
||||
clang_tidy:
|
||||
runs-on: [self-hosted, Linux]
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
permissions:
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-18 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
|
||||
run-clang-tidy-19 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
|
||||
|
||||
- name: Check format
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
|
||||
- name: Create PR with fixes
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
restart_clang_tidy:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
actions: write
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
|
||||
105
.github/workflows/nightly.yml
vendored
105
.github/workflows/nightly.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Nightly release
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 5 * * 1-5'
|
||||
- cron: '0 8 * * 1-5'
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -15,80 +15,29 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos14
|
||||
- os: macos15
|
||||
build_type: Release
|
||||
static: false
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
static: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
static: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: gcc
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
static: ${{ matrix.static }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests && strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_*tests
|
||||
|
||||
- name: Upload test data
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_test_data_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/tests/unit/test_data
|
||||
|
||||
- name: Compress clio_server
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
tar czf ./clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz ./clio_server
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz
|
||||
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
conan_profile: gcc
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: false
|
||||
static: ${{ matrix.static }}
|
||||
unit_tests: true
|
||||
integration_tests: true
|
||||
clio_server: true
|
||||
disable_cache: true
|
||||
|
||||
run_tests:
|
||||
needs: build
|
||||
@@ -96,15 +45,18 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos14
|
||||
- os: macos15
|
||||
conan_profile: apple_clang_16
|
||||
build_type: Release
|
||||
integration_tests: false
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
integration_tests: true
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
@@ -128,18 +80,17 @@ jobs:
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_test_data_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: tests/unit/test_data
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
|
||||
# To be enabled back once docker in mac runner arrives
|
||||
# https://github.com/XRPLF/clio/issues/1400
|
||||
- name: Run clio_integration_tests
|
||||
@@ -151,7 +102,7 @@ jobs:
|
||||
nightly_release:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
needs: run_tests
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
@@ -200,14 +151,14 @@ jobs:
|
||||
tags: |
|
||||
type=raw,value=nightly
|
||||
type=raw,value=${{ github.sha }}
|
||||
artifact_name: clio_server_Linux_Release
|
||||
artifact_name: clio_server_Linux_Release_gcc
|
||||
strip_binary: true
|
||||
publish_image: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
create_issue_on_failure:
|
||||
needs: [build, run_tests, nightly_release, build_and_publish_docker_image]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') && github.event_name != 'pull_request' }}
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
106
.github/workflows/sanitizers.yml
vendored
Normal file
106
.github/workflows/sanitizers.yml
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
name: Run tests with sanitizers
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 4 * * 1-5"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/sanitizers.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build clio tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sanitizer: tsan
|
||||
compiler: gcc
|
||||
- sanitizer: asan
|
||||
compiler: gcc
|
||||
# - sanitizer: ubsan # todo: enable when heavy runners are available
|
||||
# compiler: gcc
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ubuntu-latest # todo: change to heavy
|
||||
container: '{ "image": "rippleci/clio_ci:latest" }'
|
||||
disable_cache: true
|
||||
conan_profile: ${{ matrix.compiler }}.${{ matrix.sanitizer }}
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
static: false
|
||||
unit_tests: true
|
||||
integration_tests: false
|
||||
clio_server: false
|
||||
target: clio_tests
|
||||
sanitizer: ${{ matrix.sanitizer }}
|
||||
|
||||
# consider combining this with the previous matrix instead
|
||||
run_tests:
|
||||
needs: build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sanitizer: tsan
|
||||
compiler: gcc
|
||||
- sanitizer: asan
|
||||
compiler: gcc
|
||||
# - sanitizer: ubsan # todo: enable when heavy runners are available
|
||||
# compiler: gcc
|
||||
runs-on: ubuntu-latest # todo: change to heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_Release_${{ matrix.compiler }}.${{ matrix.sanitizer }}
|
||||
|
||||
- name: Run clio_tests [${{ matrix.compiler }} / ${{ matrix.sanitizer }}]
|
||||
shell: bash
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./.github/scripts/execute-tests-under-sanitizer ./clio_tests
|
||||
|
||||
- name: Check for sanitizer report
|
||||
shell: bash
|
||||
id: check_report
|
||||
run: |
|
||||
if ls .sanitizer-report/* 1> /dev/null 2>&1; then
|
||||
echo "found_report=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "found_report=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Upload report
|
||||
if: ${{ steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.compiler }}_${{ matrix.sanitizer }}_report
|
||||
path: .sanitizer-report/*
|
||||
include-hidden-files: true
|
||||
|
||||
#
|
||||
# todo: enable when we have fixed all currently existing issues from sanitizers
|
||||
#
|
||||
# - name: Create an issue
|
||||
# if: ${{ steps.check_report.outputs.found_report == 'true' }}
|
||||
# uses: ./.github/actions/create_issue
|
||||
# env:
|
||||
# GH_TOKEN: ${{ github.token }}
|
||||
# with:
|
||||
# labels: 'bug'
|
||||
# title: '[${{ matrix.sanitizer }}/${{ matrix.compiler }}] reported issues'
|
||||
# body: >
|
||||
# Clio tests failed one or more sanitizer checks when built with ${{ matrix.compiler }}`.
|
||||
|
||||
# Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
# Reports are available as artifacts.
|
||||
4
.github/workflows/upload_coverage_report.yml
vendored
4
.github/workflows/upload_coverage_report.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
jobs:
|
||||
upload_report:
|
||||
name: Upload report
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
- name: Upload coverage report
|
||||
if: ${{ hashFiles('build/coverage_report.xml') != '' }}
|
||||
uses: wandalen/wretry.action@v3.5.0
|
||||
uses: wandalen/wretry.action@v3.7.3
|
||||
with:
|
||||
action: codecov/codecov-action@v4
|
||||
with: |
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -6,6 +6,7 @@
|
||||
.vscode
|
||||
.python-version
|
||||
.DS_Store
|
||||
.sanitizer-report
|
||||
CMakeUserPresets.json
|
||||
config.json
|
||||
src/util/build/Build.cpp
|
||||
|
||||
@@ -16,6 +16,8 @@ option(coverage "Build test coverage report" FALSE)
|
||||
option(packaging "Create distribution packages" FALSE)
|
||||
option(lint "Run clang-tidy checks during compilation" FALSE)
|
||||
option(static "Statically linked Clio" FALSE)
|
||||
option(snapshot "Build snapshot tool" FALSE)
|
||||
|
||||
# ========================================================================== #
|
||||
set(san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
@@ -65,15 +67,21 @@ endif ()
|
||||
|
||||
# Enable selected sanitizer if enabled via `san`
|
||||
if (san)
|
||||
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
|
||||
list(FIND SUPPORTED_SANITIZERS "${san}" INDEX)
|
||||
if (INDEX EQUAL -1)
|
||||
message(FATAL_ERROR "Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}.")
|
||||
endif ()
|
||||
|
||||
target_compile_options(
|
||||
clio PUBLIC # Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1> ${SAN_FLAG} -fno-omit-frame-pointer
|
||||
clio_options INTERFACE # Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1> ${SAN_FLAG} -fno-omit-frame-pointer
|
||||
)
|
||||
target_compile_definitions(
|
||||
clio PUBLIC $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
|
||||
clio_options INTERFACE $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
|
||||
)
|
||||
target_link_libraries(clio INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
# Generate `docs` target for doxygen documentation if enabled Note: use `make docs` to generate the documentation
|
||||
@@ -85,3 +93,7 @@ include(install/install)
|
||||
if (packaging)
|
||||
include(cmake/packaging.cmake) # This file exists only in build runner
|
||||
endif ()
|
||||
|
||||
if (snapshot)
|
||||
add_subdirectory(tools/snapshot)
|
||||
endif ()
|
||||
|
||||
@@ -21,7 +21,7 @@ git config --local core.hooksPath .githooks
|
||||
```
|
||||
|
||||
## Git hooks dependencies
|
||||
The pre-commit hook requires `clang-format >= 18.0.0` and `cmake-format` to be installed on your machine.
|
||||
The pre-commit hook requires `clang-format >= 19.0.0` and `cmake-format` to be installed on your machine.
|
||||
`clang-format` can be installed using `brew` on macOS and default package manager on Linux.
|
||||
`cmake-format` can be installed using `pip`.
|
||||
The hook will also attempt to automatically use `doxygen` to verify that everything public in the codebase is covered by doc comments. If `doxygen` is not installed, the hook will raise a warning suggesting to install `doxygen` for future commits.
|
||||
@@ -105,7 +105,7 @@ The button for that is near the bottom of the PR's page on GitHub.
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
|
||||
|
||||
## Formatting
|
||||
Code must conform to `clang-format` version 18, unless the result would be unreasonably difficult to read or maintain.
|
||||
Code must conform to `clang-format` version 19, unless the result would be unreasonably difficult to read or maintain.
|
||||
In most cases the pre-commit hook will take care of formatting and will fix any issues automatically.
|
||||
To manually format your code, use `clang-format -i <your changed files>` for C++ files and `cmake-format -i <your changed files>` for CMake files.
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ Below are some useful docs to learn more about Clio.
|
||||
**For Developers**:
|
||||
|
||||
- [How to build Clio](./docs/build-clio.md)
|
||||
- [Metrics and static analysis](./docs/metrics-and-static-analysis.md)
|
||||
- [Coverage report](./docs/coverage-report.md)
|
||||
|
||||
**For Operators**:
|
||||
|
||||
@@ -188,10 +188,10 @@ public:
|
||||
static auto
|
||||
generateData()
|
||||
{
|
||||
constexpr auto TOTAL = 10'000;
|
||||
constexpr auto kTOTAL = 10'000;
|
||||
std::vector<uint64_t> data;
|
||||
data.reserve(TOTAL);
|
||||
for (auto i = 0; i < TOTAL; ++i)
|
||||
data.reserve(kTOTAL);
|
||||
for (auto i = 0; i < kTOTAL; ++i)
|
||||
data.push_back(util::Random::uniform(1, 100'000'000));
|
||||
|
||||
return data;
|
||||
@@ -208,7 +208,7 @@ benchmarkThreads(benchmark::State& state)
|
||||
}
|
||||
|
||||
template <typename CtxType>
|
||||
void
|
||||
static void
|
||||
benchmarkExecutionContextBatched(benchmark::State& state)
|
||||
{
|
||||
auto data = generateData();
|
||||
@@ -219,7 +219,7 @@ benchmarkExecutionContextBatched(benchmark::State& state)
|
||||
}
|
||||
|
||||
template <typename CtxType>
|
||||
void
|
||||
static void
|
||||
benchmarkAnyExecutionContextBatched(benchmark::State& state)
|
||||
{
|
||||
auto data = generateData();
|
||||
|
||||
@@ -23,19 +23,19 @@
|
||||
|
||||
namespace util::build {
|
||||
|
||||
static constexpr char versionString[] = "@CLIO_VERSION@";
|
||||
static constexpr char versionString[] = "@CLIO_VERSION@"; // NOLINT(readability-identifier-naming)
|
||||
|
||||
std::string const&
|
||||
getClioVersionString()
|
||||
{
|
||||
static std::string const value = versionString;
|
||||
static std::string const value = versionString; // NOLINT(readability-identifier-naming)
|
||||
return value;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getClioFullVersionString()
|
||||
{
|
||||
static std::string const value = "clio-" + getClioVersionString();
|
||||
static std::string const value = "clio-" + getClioVersionString(); // NOLINT(readability-identifier-naming)
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ if (lint)
|
||||
endif ()
|
||||
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||
else ()
|
||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-18" "clang-tidy" REQUIRED)
|
||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-19" "clang-tidy" REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (NOT _CLANG_TIDY_BIN)
|
||||
|
||||
@@ -39,6 +39,34 @@ if (is_appleclang)
|
||||
list(APPEND COMPILER_FLAGS -Wreorder-init-list)
|
||||
endif ()
|
||||
|
||||
if (san)
|
||||
# When building with sanitizers some compilers will actually produce extra warnings/errors. We don't want this yet, at
|
||||
# least not until we have fixed all runtime issues reported by the sanitizers. Once that is done we can start removing
|
||||
# some of these and trying to fix it in our codebase. We can never remove all of below because most of them are
|
||||
# reported from deep inside libraries like boost or libxrpl.
|
||||
#
|
||||
# TODO: Address in https://github.com/XRPLF/clio/issues/1885
|
||||
list(
|
||||
APPEND
|
||||
COMPILER_FLAGS
|
||||
-Wno-error=tsan # Disables treating TSAN warnings as errors
|
||||
-Wno-tsan # Disables TSAN warnings (thread-safety analysis)
|
||||
-Wno-uninitialized # Disables warnings about uninitialized variables (AddressSanitizer, UndefinedBehaviorSanitizer,
|
||||
# etc.)
|
||||
-Wno-stringop-overflow # Disables warnings about potential string operation overflows (AddressSanitizer)
|
||||
-Wno-unsafe-buffer-usage # Disables warnings about unsafe memory operations (AddressSanitizer)
|
||||
-Wno-frame-larger-than # Disables warnings about stack frame size being too large (AddressSanitizer)
|
||||
-Wno-unused-function # Disables warnings about unused functions (LeakSanitizer, memory-related issues)
|
||||
-Wno-unused-but-set-variable # Disables warnings about unused variables (MemorySanitizer)
|
||||
-Wno-thread-safety-analysis # Disables warnings related to thread safety usage (ThreadSanitizer)
|
||||
-Wno-thread-safety # Disables warnings related to thread safety usage (ThreadSanitizer)
|
||||
-Wno-sign-compare # Disables warnings about signed/unsigned comparison (UndefinedBehaviorSanitizer)
|
||||
-Wno-nonnull # Disables warnings related to null pointer dereferencing (UndefinedBehaviorSanitizer)
|
||||
-Wno-address # Disables warnings about address-related issues (UndefinedBehaviorSanitizer)
|
||||
-Wno-array-bounds # Disables array bounds checks (UndefinedBehaviorSanitizer)
|
||||
)
|
||||
endif ()
|
||||
|
||||
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for
|
||||
# the flags description
|
||||
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
|
||||
find_package(libbacktrace REQUIRED CONFIG)
|
||||
if ("${san}" STREQUAL "")
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
|
||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
|
||||
find_package(libbacktrace REQUIRED CONFIG)
|
||||
else ()
|
||||
# Some sanitizers (TSAN and ASAN for sure) can't be used with libbacktrace because they have their own backtracing
|
||||
# capabilities and there are conflicts. In any case, this makes sure Clio code knows that backtrace is not available.
|
||||
# See relevant conan profiles for sanitizers where we disable stacktrace in Boost explicitly.
|
||||
target_compile_definitions(clio_options INTERFACE CLIO_WITHOUT_STACKTRACE)
|
||||
message(STATUS "Sanitizer enabled, disabling stacktrace")
|
||||
endif ()
|
||||
|
||||
10
conanfile.py
10
conanfile.py
@@ -19,16 +19,18 @@ class Clio(ConanFile):
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
'snapshot': [True, False], # build export/import snapshot tool
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'boost/1.83.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/2.3.0-b1',
|
||||
'openssl/1.1.1v',
|
||||
'xrpl/2.4.0',
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
|
||||
@@ -43,6 +45,7 @@ class Clio(ConanFile):
|
||||
'coverage': False,
|
||||
'lint': False,
|
||||
'docs': False,
|
||||
'snapshot': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
'xrpl/*:rocksdb': False,
|
||||
@@ -91,6 +94,7 @@ class Clio(ConanFile):
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.variables['benchmark'] = self.options.benchmark
|
||||
tc.variables['snapshot'] = self.options.snapshot
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
|
||||
@@ -4,12 +4,13 @@ This image contains an environment to build [Clio](https://github.com/XRPLF/clio
|
||||
It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but can also be used to compile Clio locally.
|
||||
|
||||
The image is based on Ubuntu 20.04 and contains:
|
||||
- clang 16
|
||||
- clang 16.0.6
|
||||
- gcc 12.3
|
||||
- doxygen 1.10
|
||||
- doxygen 1.12
|
||||
- gh 2.40
|
||||
- ccache 4.8.3
|
||||
- conan
|
||||
- ccache 4.10.2
|
||||
- conan 1.62
|
||||
- and some other useful tools
|
||||
|
||||
Conan is set up to build Clio without any additional steps. There are two preset conan profiles: `clang` and `gcc` to use corresponding compiler.
|
||||
Conan is set up to build Clio without any additional steps. There are two preset conan profiles: `clang` and `gcc` to use corresponding compiler. By default conan is setup to use `gcc`.
|
||||
Sanitizer builds for `ASAN`, `TSAN` and `UBSAN` are enabled via conan profiles for each of the supported compilers. These can be selected using the following pattern (all lowercase): `[compiler].[sanitizer]` (e.g. `--profile gcc.tsan`).
|
||||
|
||||
9
docker/ci/conan/clang.asan
Normal file
9
docker/ci/conan/clang.asan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=address\" linkflags=\"-fsanitize=address\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=address"
|
||||
CXXFLAGS="-fsanitize=address"
|
||||
LDFLAGS="-fsanitize=address"
|
||||
9
docker/ci/conan/clang.tsan
Normal file
9
docker/ci/conan/clang.tsan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=thread\" linkflags=\"-fsanitize=thread\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=thread"
|
||||
CXXFLAGS="-fsanitize=thread"
|
||||
LDFLAGS="-fsanitize=thread"
|
||||
9
docker/ci/conan/clang.ubsan
Normal file
9
docker/ci/conan/clang.ubsan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=undefined\" linkflags=\"-fsanitize=undefined\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=undefined"
|
||||
CXXFLAGS="-fsanitize=undefined"
|
||||
LDFLAGS="-fsanitize=undefined"
|
||||
9
docker/ci/conan/gcc.asan
Normal file
9
docker/ci/conan/gcc.asan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=address\" linkflags=\"-fsanitize=address\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=address"
|
||||
CXXFLAGS="-fsanitize=address"
|
||||
LDFLAGS="-fsanitize=address"
|
||||
9
docker/ci/conan/gcc.tsan
Normal file
9
docker/ci/conan/gcc.tsan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=thread\" linkflags=\"-fsanitize=thread\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=thread"
|
||||
CXXFLAGS="-fsanitize=thread"
|
||||
LDFLAGS="-fsanitize=thread"
|
||||
9
docker/ci/conan/gcc.ubsan
Normal file
9
docker/ci/conan/gcc.ubsan
Normal file
@@ -0,0 +1,9 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=undefined\" linkflags=\"-fsanitize=undefined\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=undefined"
|
||||
CXXFLAGS="-fsanitize=undefined"
|
||||
LDFLAGS="-fsanitize=undefined"
|
||||
@@ -7,7 +7,7 @@ USER root
|
||||
WORKDIR /root
|
||||
|
||||
ENV CCACHE_VERSION=4.10.2 \
|
||||
LLVM_TOOLS_VERSION=18 \
|
||||
LLVM_TOOLS_VERSION=19 \
|
||||
GH_VERSION=2.40.0 \
|
||||
DOXYGEN_VERSION=1.12.0
|
||||
|
||||
@@ -98,3 +98,10 @@ RUN conan profile new clang --detect \
|
||||
&& conan profile update "conf.tools.build:compiler_executables={\"c\": \"/usr/bin/clang-16\", \"cpp\": \"/usr/bin/clang++-16\"}" clang
|
||||
|
||||
RUN echo "include(gcc)" >> .conan/profiles/default
|
||||
|
||||
COPY conan/gcc.asan /root/.conan/profiles
|
||||
COPY conan/gcc.tsan /root/.conan/profiles
|
||||
COPY conan/gcc.ubsan /root/.conan/profiles
|
||||
COPY conan/clang.asan /root/.conan/profiles
|
||||
COPY conan/clang.tsan /root/.conan/profiles
|
||||
COPY conan/clang.ubsan /root/.conan/profiles
|
||||
|
||||
@@ -181,3 +181,20 @@ Sometimes, during development, you need to build against a custom version of `li
|
||||
4. Build Clio as you would have before.
|
||||
|
||||
See [Building Clio](#building-clio) for details.
|
||||
|
||||
## Using `clang-tidy` for static analysis
|
||||
|
||||
The minimum [clang-tidy](https://clang.llvm.org/extra/clang-tidy/) version required is 19.0.
|
||||
|
||||
Clang-tidy can be run by Cmake when building the project. To achieve this, you just need to provide the option `-o lint=True` for the `conan install` command:
|
||||
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
|
||||
By default Cmake will try to find `clang-tidy` automatically in your system.
|
||||
To force Cmake to use your desired binary, set the `CLIO_CLANG_TIDY_BIN` environment variable to the path of the `clang-tidy` binary. For example:
|
||||
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@19/bin/clang-tidy
|
||||
```
|
||||
|
||||
452
docs/config-description.md
Normal file
452
docs/config-description.md
Normal file
@@ -0,0 +1,452 @@
|
||||
# Clio Config Description
|
||||
This file lists all Clio Configuration definitions in detail.
|
||||
|
||||
## Configuration Details
|
||||
|
||||
### Key: database.type
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: cassandra
|
||||
- **Constraints**: The value must be one of the following: `cassandra`
|
||||
- **Description**: Type of database to use. We currently support Cassandra and Scylladb. We default to Scylladb.
|
||||
### Key: database.cassandra.contact_points
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: localhost
|
||||
- **Constraints**: None
|
||||
- **Description**: A list of IP addresses or hostnames of the initial nodes (Cassandra/Scylladb cluster nodes) that the client will connect to when establishing a connection with the database. If you're running locally, it should be 'localhost' or 127.0.0.1
|
||||
### Key: database.cassandra.secure_connect_bundle
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Configuration file that contains the necessary security credentials and connection details for securely connecting to a Cassandra database cluster.
|
||||
### Key: database.cassandra.port
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: Port number to connect to the database.
|
||||
### Key: database.cassandra.keyspace
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: clio
|
||||
- **Constraints**: None
|
||||
- **Description**: Keyspace to use for the database.
|
||||
### Key: database.cassandra.replication_factor
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 3
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of replicated nodes for Scylladb. Visit this link for more details : https://university.scylladb.com/courses/scylla-essentials-overview/lessons/high-availability/topic/fault-tolerance-replication-factor/
|
||||
### Key: database.cassandra.table_prefix
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Prefix for Database table names.
|
||||
### Key: database.cassandra.max_write_requests_outstanding
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 10000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of outstanding write requests. Write requests are api calls that write to database
|
||||
### Key: database.cassandra.max_read_requests_outstanding
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 100000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of outstanding read requests, which reads from database
|
||||
### Key: database.cassandra.threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: The number of available CPU cores.
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of threads that will be used for database operations.
|
||||
### Key: database.cassandra.core_connections_per_host
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of core connections per host for Cassandra.
|
||||
### Key: database.cassandra.queue_size_io
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Queue size for I/O operations in Cassandra.
|
||||
### Key: database.cassandra.write_batch_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Batch size for write operations in Cassandra.
|
||||
### Key: database.cassandra.connect_timeout
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The maximum amount of time in seconds the system will wait for a connection to be successfully established with the database.
|
||||
### Key: database.cassandra.request_timeout
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The maximum amount of time in seconds the system will wait for a request to be fetched from database.
|
||||
### Key: database.cassandra.username
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The username used for authenticating with the database.
|
||||
### Key: database.cassandra.password
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The password used for authenticating with the database.
|
||||
### Key: database.cassandra.certfile
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The path to the SSL/TLS certificate file used to establish a secure connection between the client and the Cassandra database.
|
||||
### Key: allow_no_etl
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: If True, no ETL nodes will run with Clio.
|
||||
### Key: etl_sources.[].ip
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be a valid IP address
|
||||
- **Description**: IP address of the ETL source.
|
||||
### Key: etl_sources.[].ws_port
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: WebSocket port of the ETL source.
|
||||
### Key: etl_sources.[].grpc_port
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: gRPC port of the ETL source.
|
||||
### Key: forwarding.cache_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for the forwarding cache used in Rippled communication.
|
||||
### Key: forwarding.request_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 10
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for the forwarding request used in Rippled communication.
|
||||
### Key: rpc.cache_timeout
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Timeout duration for RPC requests.
|
||||
### Key: num_markers
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `256`
|
||||
- **Description**: The number of markers is the number of coroutines to download the initial ledger
|
||||
### Key: dos_guard.whitelist.[]
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: List of IP addresses to whitelist for DOS protection.
|
||||
### Key: dos_guard.max_fetches
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1000000
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of fetch operations allowed by DOS guard.
|
||||
### Key: dos_guard.max_connections
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of concurrent connections allowed by DOS guard.
|
||||
### Key: dos_guard.max_requests
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 20
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum number of requests allowed by DOS guard.
|
||||
### Key: dos_guard.sweep_interval
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Interval in seconds for DOS guard to sweep/clear its state.
|
||||
### Key: workers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: The number of available CPU cores.
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of threads to process RPC requests.
|
||||
### Key: server.ip
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be a valid IP address
|
||||
- **Description**: IP address of the Clio HTTP server.
|
||||
### Key: server.port
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535
|
||||
- **Description**: Port number of the Clio HTTP server.
|
||||
### Key: server.max_queue_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the server's request queue. Value of 0 is no limit.
|
||||
### Key: server.local_admin
|
||||
- **Required**: False
|
||||
- **Type**: boolean
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Indicates if the server should run with admin privileges. Only one of local_admin or admin_password can be set.
|
||||
### Key: server.admin_password
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Password for Clio admin-only APIs. Only one of local_admin or admin_password can be set.
|
||||
### Key: server.processing_policy
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: parallel
|
||||
- **Constraints**: The value must be one of the following: `parallel, sequent`
|
||||
- **Description**: Could be "sequent" or "parallel". For the sequent policy, requests from a single client
|
||||
connection are processed one by one, with the next request read only after the previous one is processed. For the parallel policy, Clio will accept
|
||||
all requests and process them in parallel, sending a reply for each request as soon as it is ready.
|
||||
### Key: server.parallel_requests_limit
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Optional parameter, used only if processing_strategy `parallel`. It limits the number of requests for a single client connection that are processed in parallel. If not specified, the limit is infinite.
|
||||
### Key: server.ws_max_sending_queue_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1500
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the websocket sending queue.
|
||||
### Key: prometheus.enabled
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: False
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable Prometheus metrics.
|
||||
### Key: prometheus.compress_reply
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: False
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable compression of Prometheus responses.
|
||||
### Key: io_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `65535`
|
||||
- **Description**: Number of I/O threads. Value cannot be less than 1
|
||||
### Key: subscription_workers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of worker threads or processes that are responsible for managing and processing subscription-based tasks from rippled
|
||||
### Key: graceful_period
|
||||
- **Required**: True
|
||||
- **Type**: double
|
||||
- **Default value**: 10
|
||||
- **Constraints**: The value must be a positive double number
|
||||
- **Description**: Number of milliseconds server will wait to shutdown gracefully.
|
||||
### Key: cache.num_diffs
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 32
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of diffs to cache. For more info, consult readme.md in etc
|
||||
### Key: cache.num_markers
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 48
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of markers to cache.
|
||||
### Key: cache.num_cursors_from_diff
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Num of cursors that are different.
|
||||
### Key: cache.num_cursors_from_account
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Number of cursors from an account.
|
||||
### Key: cache.page_fetch_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 512
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Page fetch size for cache operations.
|
||||
### Key: cache.load
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: async
|
||||
- **Constraints**: The value must be one of the following: `sync, async, none`
|
||||
- **Description**: Cache loading strategy ('sync' or 'async').
|
||||
### Key: log_channels.[].channel
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be one of the following: `General, WebServer, Backend, RPC, ETL, Subscriptions, Performance, Migration`
|
||||
- **Description**: Name of the log channel.'RPC', 'ETL', and 'Performance'
|
||||
### Key: log_channels.[].log_level
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: The value must be one of the following: `trace, debug, info, warning, error, fatal, count`
|
||||
- **Description**: Log level for the specific log channel.`warning`, `error`, `fatal`
|
||||
### Key: log_level
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: info
|
||||
- **Constraints**: The value must be one of the following: `trace, debug, info, warning, error, fatal, count`
|
||||
- **Description**: General logging level of Clio. This level will be applied to all log channels that do not have an explicitly defined logging level.
|
||||
### Key: log_format
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: %TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%
|
||||
- **Constraints**: None
|
||||
- **Description**: Format string for log messages.
|
||||
### Key: log_to_console
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: Enable or disable logging to console.
|
||||
### Key: log_directory
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Directory path for log files.
|
||||
### Key: log_rotation_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2048
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Log rotation size in megabytes. When the log file reaches this particular size, a new log file starts.
|
||||
### Key: log_directory_max_size
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 51200
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Maximum size of the log directory in megabytes.
|
||||
### Key: log_rotation_hour_interval
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 12
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`
|
||||
- **Description**: Interval in hours for log rotation. If the current log file reaches this value in logging, a new log file starts.
|
||||
### Key: log_tag_style
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: none
|
||||
- **Constraints**: The value must be one of the following: `int, uint, null, none, uuid`
|
||||
- **Description**: Style for log tags.
|
||||
### Key: extractor_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Number of extractor threads.
|
||||
### Key: read_only
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: True
|
||||
- **Constraints**: None
|
||||
- **Description**: Indicates if the server should have read-only privileges.
|
||||
### Key: txn_threshold
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 0
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `65535`
|
||||
- **Description**: Transaction threshold value.
|
||||
### Key: start_sequence
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Starting ledger index.
|
||||
### Key: finish_sequence
|
||||
- **Required**: False
|
||||
- **Type**: int
|
||||
- **Default value**: None
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: Ending ledger index.
|
||||
### Key: ssl_cert_file
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Path to the SSL certificate file.
|
||||
### Key: ssl_key_file
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: Path to the SSL key file.
|
||||
### Key: api_version.default
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Default API version Clio will run on.
|
||||
### Key: api_version.min
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 1
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Minimum API version.
|
||||
### Key: api_version.max
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 3
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `3`
|
||||
- **Description**: Maximum API version.
|
||||
### Key: migration.full_scan_threads
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 2
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of threads used to scan the table.
|
||||
### Key: migration.full_scan_jobs
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 4
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of coroutines used to scan the table.
|
||||
### Key: migration.cursors_per_job
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: 100
|
||||
- **Constraints**: The minimum value is `0`. The maximum value is `4294967295`
|
||||
- **Description**: The number of cursors each coroutine will scan.
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
* This is an example configuration file. Please do not use without modifying to suit your needs.
|
||||
*/
|
||||
{
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
// This option can be used to setup a secure connect bundle connection
|
||||
"secure_connect_bundle": "[path/to/zip. ignore if using contact_points]",
|
||||
// The following options are used only if using contact_points
|
||||
"contact_points": "[ip. ignore if using secure_connect_bundle]",
|
||||
"port": "[port. ignore if using_secure_connect_bundle]",
|
||||
// Authentication settings
|
||||
"username": "[username, if any]",
|
||||
"password": "[password, if any]",
|
||||
// Other common settings
|
||||
"keyspace": "clio",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
}
|
||||
},
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip": "[rippled ip]",
|
||||
"ws_port": "6006",
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
"dos_guard": {
|
||||
"whitelist": [
|
||||
"127.0.0.1"
|
||||
]
|
||||
},
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 8080
|
||||
},
|
||||
"log_level": "debug",
|
||||
"log_file": "./clio.log",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false
|
||||
}
|
||||
@@ -39,6 +39,9 @@
|
||||
"cache_timeout": 0.250, // in seconds, could be 0, which means no cache
|
||||
"request_timeout": 10.0 // time for Clio to wait for rippled to reply on a forwarded request (default is 10 seconds)
|
||||
},
|
||||
"rpc": {
|
||||
"cache_timeout": 0.5 // in seconds, could be 0, which means no cache for rpc
|
||||
},
|
||||
"dos_guard": {
|
||||
// Comma-separated list of IPs to exclude from rate limiting
|
||||
"whitelist": [
|
||||
@@ -67,7 +70,15 @@
|
||||
"admin_password": "xrp",
|
||||
// If local_admin is true, Clio will consider requests come from 127.0.0.1 as admin requests
|
||||
// It's true by default unless admin_password is set,'local_admin' : true and 'admin_password' can not be set at the same time
|
||||
"local_admin": false
|
||||
"local_admin": false,
|
||||
"processing_policy": "parallel", // Could be "sequent" or "parallel".
|
||||
// For sequent policy request from one client connection will be processed one by one and the next one will not be read before
|
||||
// the previous one is processed. For parallel policy Clio will take all requests and process them in parallel and
|
||||
// send a reply for each request whenever it is ready.
|
||||
"parallel_requests_limit": 10, // Optional parameter, used only if "processing_strategy" is "parallel". It limits the number of requests for one client connection processed in parallel. Infinite if not specified.
|
||||
// Max number of responses to queue up before sent successfully. If a client's waiting queue is too long, the server will close the connection.
|
||||
"ws_max_sending_queue_size": 1500,
|
||||
"__ng_web_server": false // Use ng web server. This is a temporary setting which will be deleted after switching to ng web server
|
||||
},
|
||||
// Time in seconds for graceful shutdown. Defaults to 10 seconds. Not fully implemented yet.
|
||||
"graceful_period": 10.0,
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# Example of clio monitoring infrastructure
|
||||
|
||||
> [!WARNING]
|
||||
> This is only an example of Grafana dashboard for Clio. It was created for demonstration purposes only and may contain errors.
|
||||
> Clio team would not recommend to relate on data from this dashboard or use it for monitoring your Clio instances.
|
||||
|
||||
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
|
||||
|
||||
The structure of the directory:
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
"graphTooltip": 0,
|
||||
"id": 1,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -79,6 +78,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -90,7 +90,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -159,6 +159,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -170,7 +171,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -243,6 +244,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -254,7 +256,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -327,6 +329,7 @@
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"percentChangeColorMode": "standard",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
@@ -338,7 +341,7 @@
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -373,6 +376,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -435,6 +439,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -491,6 +496,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -552,6 +558,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -586,6 +593,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -647,6 +655,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -681,6 +690,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -742,6 +752,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -776,6 +787,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -837,6 +849,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -872,6 +885,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -934,6 +948,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -941,7 +956,7 @@
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rpc_method_duration_us{job=\"clio\"}",
|
||||
"expr": "sum by (method) (increase(rpc_method_duration_us[$__interval]))\n / \n sum by (method,) (increase(rpc_method_total_number{status=\"finished\"}[$__interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{method}}",
|
||||
"range": true,
|
||||
@@ -968,6 +983,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1029,6 +1045,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1063,6 +1080,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1124,6 +1142,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1158,6 +1177,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
@@ -1223,7 +1243,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "10.2.0",
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1296,6 +1316,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1357,6 +1378,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1404,6 +1426,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1465,6 +1488,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1510,6 +1534,7 @@
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
@@ -1572,6 +1597,7 @@
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -1590,8 +1616,9 @@
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
"refresh": "5s",
|
||||
"schemaVersion": 39,
|
||||
"schemaVersion": 40,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Metrics and static analysis
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports [Prometheus](https://prometheus.io/) metrics collection. It accepts Prometheus requests on the port configured in the `server` section of the config.
|
||||
|
||||
Prometheus metrics are enabled by default, and replies to `/metrics` are compressed. To disable compression, and have human readable metrics, add `"prometheus": { "enabled": true, "compress_reply": false }` to Clio's config.
|
||||
|
||||
To completely disable Prometheus metrics add `"prometheus": { "enabled": false }` to Clio's config.
|
||||
|
||||
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
|
||||
|
||||
You can find an example docker-compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
|
||||
|
||||
## Using `clang-tidy` for static analysis
|
||||
|
||||
The minimum [clang-tidy](https://clang.llvm.org/extra/clang-tidy/) version required is 17.0.
|
||||
|
||||
Clang-tidy can be run by Cmake when building the project. To achieve this, you just need to provide the option `-o lint=True` for the `conan install` command:
|
||||
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
|
||||
By default Cmake will try to find `clang-tidy` automatically in your system.
|
||||
To force Cmake to use your desired binary, set the `CLIO_CLANG_TIDY_BIN` environment variable to the path of the `clang-tidy` binary. For example:
|
||||
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@17/bin/clang-tidy
|
||||
```
|
||||
@@ -80,3 +80,15 @@ Clio will fallback to hardcoded defaults when these values are not specified in
|
||||
|
||||
> [!TIP]
|
||||
> See the [example-config.json](../docs/examples/config/example-config.json) for more details.
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports [Prometheus](https://prometheus.io/) metrics collection. It accepts Prometheus requests on the port configured in the `server` section of the config.
|
||||
|
||||
Prometheus metrics are enabled by default, and replies to `/metrics` are compressed. To disable compression, and have human readable metrics, add `"prometheus": { "enabled": true, "compress_reply": false }` to Clio's config.
|
||||
|
||||
To completely disable Prometheus metrics add `"prometheus": { "enabled": false }` to Clio's config.
|
||||
|
||||
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
|
||||
|
||||
You can find an example docker-compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
add_subdirectory(util)
|
||||
add_subdirectory(data)
|
||||
add_subdirectory(etl)
|
||||
add_subdirectory(etlng)
|
||||
add_subdirectory(feed)
|
||||
add_subdirectory(rpc)
|
||||
add_subdirectory(web)
|
||||
add_subdirectory(migration)
|
||||
add_subdirectory(app)
|
||||
add_subdirectory(main)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
add_library(clio_app)
|
||||
target_sources(clio_app PRIVATE CliArgs.cpp ClioApplication.cpp)
|
||||
target_sources(clio_app PRIVATE CliArgs.cpp ClioApplication.cpp Stopper.cpp WebHandlers.cpp)
|
||||
|
||||
target_link_libraries(clio_app PUBLIC clio_etl clio_feed clio_web clio_rpc)
|
||||
target_link_libraries(clio_app PUBLIC clio_etl clio_etlng clio_feed clio_web clio_rpc clio_migration)
|
||||
|
||||
@@ -19,7 +19,9 @@
|
||||
|
||||
#include "app/CliArgs.hpp"
|
||||
|
||||
#include "migration/MigrationApplication.hpp"
|
||||
#include "util/build/Build.hpp"
|
||||
#include "util/newconfig/ConfigDescription.hpp"
|
||||
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <boost/program_options/parsers.hpp>
|
||||
@@ -28,6 +30,7 @@
|
||||
#include <boost/program_options/variables_map.hpp>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
@@ -41,9 +44,13 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
// clang-format off
|
||||
po::options_description description("Options");
|
||||
description.add_options()
|
||||
("help,h", "print help message and exit")
|
||||
("version,v", "print version and exit")
|
||||
("conf,c", po::value<std::string>()->default_value(defaultConfigPath), "configuration file")
|
||||
("help,h", "Print help message and exit")
|
||||
("version,v", "Print version and exit")
|
||||
("conf,c", po::value<std::string>()->default_value(kDEFAULT_CONFIG_PATH), "Configuration file")
|
||||
("ng-web-server,w", "Use ng-web-server")
|
||||
("migrate", po::value<std::string>(), "Start migration helper")
|
||||
("verify", "Checks the validity of config values")
|
||||
("config-description,d", po::value<std::string>(), "Generate config description markdown file")
|
||||
;
|
||||
// clang-format on
|
||||
po::positional_options_description positional;
|
||||
@@ -63,8 +70,31 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
}
|
||||
|
||||
if (parsed.count("config-description") != 0u) {
|
||||
std::filesystem::path const filePath = parsed["config-description"].as<std::string>();
|
||||
|
||||
auto const res = util::config::ClioConfigDescription::generateConfigDescriptionToFile(filePath);
|
||||
if (res.has_value())
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
|
||||
std::cerr << res.error().error << std::endl;
|
||||
return Action{Action::Exit{EXIT_FAILURE}};
|
||||
}
|
||||
|
||||
auto configPath = parsed["conf"].as<std::string>();
|
||||
return Action{Action::Run{std::move(configPath)}};
|
||||
|
||||
if (parsed.count("migrate") != 0u) {
|
||||
auto const opt = parsed["migrate"].as<std::string>();
|
||||
if (opt == "status")
|
||||
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::status()}};
|
||||
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::migration(opt)}};
|
||||
}
|
||||
|
||||
if (parsed.count("verify") != 0u)
|
||||
return Action{Action::VerifyConfig{.configPath = std::move(configPath)}};
|
||||
|
||||
return Action{Action::Run{.configPath = std::move(configPath), .useNgWebServer = parsed.count("ng-web-server") != 0}
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "migration/MigrationApplication.hpp"
|
||||
#include "util/OverloadSet.hpp"
|
||||
|
||||
#include <string>
|
||||
@@ -34,7 +35,7 @@ public:
|
||||
/**
|
||||
* @brief Default configuration path.
|
||||
*/
|
||||
static constexpr char defaultConfigPath[] = "/etc/opt/clio/config.json";
|
||||
static constexpr char kDEFAULT_CONFIG_PATH[] = "/etc/opt/clio/config.json";
|
||||
|
||||
/**
|
||||
* @brief An action parsed from the command line.
|
||||
@@ -43,14 +44,24 @@ public:
|
||||
public:
|
||||
/** @brief Run action. */
|
||||
struct Run {
|
||||
/** @brief Configuration file path. */
|
||||
std::string configPath;
|
||||
std::string configPath; ///< Configuration file path.
|
||||
bool useNgWebServer; ///< Whether to use a ng web server
|
||||
};
|
||||
|
||||
/** @brief Exit action. */
|
||||
struct Exit {
|
||||
/** @brief Exit code. */
|
||||
int exitCode;
|
||||
int exitCode; ///< Exit code.
|
||||
};
|
||||
|
||||
/** @brief Migration action. */
|
||||
struct Migrate {
|
||||
std::string configPath;
|
||||
MigrateSubCmd subCmd;
|
||||
};
|
||||
|
||||
/** @brief Verify Config action. */
|
||||
struct VerifyConfig {
|
||||
std::string configPath;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -59,7 +70,8 @@ public:
|
||||
* @param action Run action.
|
||||
*/
|
||||
template <typename ActionType>
|
||||
requires std::is_same_v<ActionType, Run> or std::is_same_v<ActionType, Exit>
|
||||
requires std::is_same_v<ActionType, Run> or std::is_same_v<ActionType, Exit> or
|
||||
std::is_same_v<ActionType, Migrate> or std::is_same_v<ActionType, VerifyConfig>
|
||||
explicit Action(ActionType&& action) : action_(std::forward<ActionType>(action))
|
||||
{
|
||||
}
|
||||
@@ -79,7 +91,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::variant<Run, Exit> action_;
|
||||
std::variant<Run, Exit, Migrate, VerifyConfig> action_;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -19,32 +19,44 @@
|
||||
|
||||
#include "app/ClioApplication.hpp"
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
#include "app/WebHandlers.hpp"
|
||||
#include "data/AmendmentCenter.hpp"
|
||||
#include "data/BackendFactory.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/NetworkValidatedLedgers.hpp"
|
||||
#include "etlng/LoadBalancer.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "migration/MigrationInspectorFactory.hpp"
|
||||
#include "rpc/Counters.hpp"
|
||||
#include "rpc/RPCEngine.hpp"
|
||||
#include "rpc/WorkQueue.hpp"
|
||||
#include "rpc/common/impl/HandlerProvider.hpp"
|
||||
#include "util/build/Build.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
#include "web/AdminVerificationStrategy.hpp"
|
||||
#include "web/RPCServerHandler.hpp"
|
||||
#include "web/Server.hpp"
|
||||
#include "web/dosguard/DOSGuard.hpp"
|
||||
#include "web/dosguard/IntervalSweepHandler.hpp"
|
||||
#include "web/dosguard/Weights.hpp"
|
||||
#include "web/dosguard/WhitelistHandler.hpp"
|
||||
#include "web/ng/RPCServerHandler.hpp"
|
||||
#include "web/ng/Server.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace app {
|
||||
@@ -72,20 +84,18 @@ start(boost::asio::io_context& ioc, std::uint32_t numThreads)
|
||||
|
||||
} // namespace
|
||||
|
||||
ClioApplication::ClioApplication(util::Config const& config) : config_(config), signalsHandler_{config_}
|
||||
ClioApplication::ClioApplication(util::config::ClioConfigDefinition const& config)
|
||||
: config_(config), signalsHandler_{config_}
|
||||
{
|
||||
LOG(util::LogService::info()) << "Clio version: " << util::build::getClioFullVersionString();
|
||||
PrometheusService::init(config);
|
||||
signalsHandler_.subscribeToStop([this]() { appStopper_.stop(); });
|
||||
}
|
||||
|
||||
int
|
||||
ClioApplication::run()
|
||||
ClioApplication::run(bool const useNgWebServer)
|
||||
{
|
||||
auto const threads = config_.valueOr("io_threads", 2);
|
||||
if (threads <= 0) {
|
||||
LOG(util::LogService::fatal()) << "io_threads is less than 1";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
auto const threads = config_.get<uint16_t>("io_threads");
|
||||
LOG(util::LogService::info()) << "Number of io threads = " << threads;
|
||||
|
||||
// IO context to handle all incoming requests, as well as other things.
|
||||
@@ -94,42 +104,102 @@ ClioApplication::run()
|
||||
|
||||
// Rate limiter, to prevent abuse
|
||||
auto whitelistHandler = web::dosguard::WhitelistHandler{config_};
|
||||
auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler};
|
||||
auto const dosguardWeights = web::dosguard::Weights::make(config_);
|
||||
auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler, dosguardWeights};
|
||||
auto sweepHandler = web::dosguard::IntervalSweepHandler{config_, ioc, dosGuard};
|
||||
auto cache = data::LedgerCache{};
|
||||
|
||||
// Interface to the database
|
||||
auto backend = data::make_Backend(config_);
|
||||
auto backend = data::makeBackend(config_, cache);
|
||||
|
||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||
|
||||
{
|
||||
auto const migrationInspector = migration::makeMigrationInspector(config_, backend);
|
||||
// Check if any migration is blocking Clio server starting.
|
||||
if (migrationInspector->isBlockingClio() and backend->hardFetchLedgerRangeNoThrow()) {
|
||||
LOG(util::LogService::error())
|
||||
<< "Existing Migration is blocking Clio, Please complete the database migration first.";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
// Manages clients subscribed to streams
|
||||
auto subscriptionsRunner = feed::SubscriptionManagerRunner(config_, backend);
|
||||
|
||||
auto const subscriptions = subscriptionsRunner.getManager();
|
||||
auto subscriptions = feed::SubscriptionManager::makeSubscriptionManager(config_, backend, amendmentCenter);
|
||||
|
||||
// Tracks which ledgers have been validated by the network
|
||||
auto ledgers = etl::NetworkValidatedLedgers::make_ValidatedLedgers();
|
||||
auto ledgers = etl::NetworkValidatedLedgers::makeValidatedLedgers();
|
||||
|
||||
// Handles the connection to one or more rippled nodes.
|
||||
// ETL uses the balancer to extract data.
|
||||
// The server uses the balancer to forward RPCs to a rippled node.
|
||||
// The balancer itself publishes to streams (transactions_proposed and accounts_proposed)
|
||||
auto balancer = etl::LoadBalancer::make_LoadBalancer(config_, ioc, backend, subscriptions, ledgers);
|
||||
auto balancer = [&] -> std::shared_ptr<etlng::LoadBalancerInterface> {
|
||||
if (config_.get<bool>("__ng_etl"))
|
||||
return etlng::LoadBalancer::makeLoadBalancer(config_, ioc, backend, subscriptions, ledgers);
|
||||
|
||||
return etl::LoadBalancer::makeLoadBalancer(config_, ioc, backend, subscriptions, ledgers);
|
||||
}();
|
||||
|
||||
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
|
||||
auto etl = etl::ETLService::make_ETLService(config_, ioc, backend, subscriptions, balancer, ledgers);
|
||||
auto etl = etl::ETLService::makeETLService(config_, ioc, backend, subscriptions, balancer, ledgers);
|
||||
|
||||
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
||||
auto counters = rpc::Counters::makeCounters(workQueue);
|
||||
|
||||
auto workQueue = rpc::WorkQueue::make_WorkQueue(config_);
|
||||
auto counters = rpc::Counters::make_Counters(workQueue);
|
||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||
auto const handlerProvider = std::make_shared<rpc::impl::ProductionHandlerProvider const>(
|
||||
config_, backend, subscriptions, balancer, etl, amendmentCenter, counters
|
||||
);
|
||||
|
||||
using RPCEngineType = rpc::RPCEngine<rpc::Counters>;
|
||||
auto const rpcEngine =
|
||||
rpc::RPCEngine::make_RPCEngine(backend, balancer, dosGuard, workQueue, counters, handlerProvider);
|
||||
RPCEngineType::makeRPCEngine(config_, backend, balancer, dosGuard, workQueue, counters, handlerProvider);
|
||||
|
||||
if (useNgWebServer or config_.get<bool>("server.__ng_web_server")) {
|
||||
web::ng::RPCServerHandler<RPCEngineType> handler{config_, backend, rpcEngine, etl, dosGuard};
|
||||
|
||||
auto expectedAdminVerifier = web::makeAdminVerificationStrategy(config_);
|
||||
if (not expectedAdminVerifier.has_value()) {
|
||||
LOG(util::LogService::error()) << "Error creating admin verifier: " << expectedAdminVerifier.error();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
auto const adminVerifier = std::move(expectedAdminVerifier).value();
|
||||
|
||||
auto httpServer = web::ng::makeServer(config_, OnConnectCheck{dosGuard}, DisconnectHook{dosGuard}, ioc);
|
||||
|
||||
if (not httpServer.has_value()) {
|
||||
LOG(util::LogService::error()) << "Error creating web server: " << httpServer.error();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
httpServer->onGet("/metrics", MetricsHandler{adminVerifier});
|
||||
httpServer->onGet("/health", HealthCheckHandler{});
|
||||
auto requestHandler = RequestHandler{adminVerifier, handler};
|
||||
httpServer->onPost("/", requestHandler);
|
||||
httpServer->onWs(std::move(requestHandler));
|
||||
|
||||
auto const maybeError = httpServer->run();
|
||||
if (maybeError.has_value()) {
|
||||
LOG(util::LogService::error()) << "Error starting web server: " << *maybeError;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
appStopper_.setOnStop(
|
||||
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, ioc)
|
||||
);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
// Calls destructors on all resources, and destructs in order
|
||||
start(ioc, threads);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
// Init the web server
|
||||
auto handler =
|
||||
std::make_shared<web::RPCServerHandler<rpc::RPCEngine, etl::ETLService>>(config_, backend, rpcEngine, etl);
|
||||
auto const httpServer = web::make_HttpServer(config_, ioc, dosGuard, handler);
|
||||
auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(config_, backend, rpcEngine, etl, dosGuard);
|
||||
|
||||
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
|
||||
@@ -19,8 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
#include "util/SignalsHandler.hpp"
|
||||
#include "util/config//Config.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
namespace app {
|
||||
|
||||
@@ -28,8 +29,9 @@ namespace app {
|
||||
* @brief The main application class
|
||||
*/
|
||||
class ClioApplication {
|
||||
util::Config const& config_;
|
||||
util::config::ClioConfigDefinition const& config_;
|
||||
util::SignalsHandler signalsHandler_;
|
||||
Stopper appStopper_;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -37,15 +39,17 @@ public:
|
||||
*
|
||||
* @param config The configuration of the application
|
||||
*/
|
||||
ClioApplication(util::Config const& config);
|
||||
ClioApplication(util::config::ClioConfigDefinition const& config);
|
||||
|
||||
/**
|
||||
* @brief Run the application
|
||||
*
|
||||
* @param useNgWebServer Whether to use the new web server
|
||||
*
|
||||
* @return exit code
|
||||
*/
|
||||
int
|
||||
run();
|
||||
run(bool useNgWebServer);
|
||||
};
|
||||
|
||||
} // namespace app
|
||||
|
||||
52
src/app/Stopper.cpp
Normal file
52
src/app/Stopper.cpp
Normal file
@@ -0,0 +1,52 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
|
||||
Stopper::~Stopper()
|
||||
{
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
}
|
||||
|
||||
void
|
||||
Stopper::setOnStop(std::function<void(boost::asio::yield_context)> cb)
|
||||
{
|
||||
boost::asio::spawn(ctx_, std::move(cb));
|
||||
}
|
||||
|
||||
void
|
||||
Stopper::stop()
|
||||
{
|
||||
// Do nothing if worker_ is already running
|
||||
if (worker_.joinable())
|
||||
return;
|
||||
|
||||
worker_ = std::thread{[this]() { ctx_.run(); }};
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
115
src/app/Stopper.hpp
Normal file
115
src/app/Stopper.hpp
Normal file
@@ -0,0 +1,115 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etlng/ETLServiceInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/CoroutineGroup.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/ng/Server.hpp"
|
||||
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <thread>
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief Application stopper class. On stop it will create a new thread to run all the shutdown tasks.
|
||||
*/
|
||||
class Stopper {
|
||||
boost::asio::io_context ctx_;
|
||||
std::thread worker_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Destroy the Stopper object
|
||||
*/
|
||||
~Stopper();
|
||||
|
||||
/**
|
||||
* @brief Set the callback to be called when the application is stopped.
|
||||
*
|
||||
* @param cb The callback to be called on application stop.
|
||||
*/
|
||||
void
|
||||
setOnStop(std::function<void(boost::asio::yield_context)> cb);
|
||||
|
||||
/**
|
||||
* @brief Stop the application and run the shutdown tasks.
|
||||
*/
|
||||
void
|
||||
stop();
|
||||
|
||||
/**
|
||||
* @brief Create a callback to be called on application stop.
|
||||
*
|
||||
* @param server The server to stop.
|
||||
* @param balancer The load balancer to stop.
|
||||
* @param etl The ETL service to stop.
|
||||
* @param subscriptions The subscription manager to stop.
|
||||
* @param backend The backend to stop.
|
||||
* @param ioc The io_context to stop.
|
||||
* @return The callback to be called on application stop.
|
||||
*/
|
||||
template <web::ng::SomeServer ServerType>
|
||||
static std::function<void(boost::asio::yield_context)>
|
||||
makeOnStopCallback(
|
||||
ServerType& server,
|
||||
etlng::LoadBalancerInterface& balancer,
|
||||
etlng::ETLServiceInterface& etl,
|
||||
feed::SubscriptionManagerInterface& subscriptions,
|
||||
data::BackendInterface& backend,
|
||||
boost::asio::io_context& ioc
|
||||
)
|
||||
{
|
||||
return [&](boost::asio::yield_context yield) {
|
||||
util::CoroutineGroup coroutineGroup{yield};
|
||||
coroutineGroup.spawn(yield, [&server](auto innerYield) {
|
||||
server.stop(innerYield);
|
||||
LOG(util::LogService::info()) << "Server stopped";
|
||||
});
|
||||
coroutineGroup.spawn(yield, [&balancer](auto innerYield) {
|
||||
balancer.stop(innerYield);
|
||||
LOG(util::LogService::info()) << "LoadBalancer stopped";
|
||||
});
|
||||
coroutineGroup.asyncWait(yield);
|
||||
|
||||
etl.stop();
|
||||
LOG(util::LogService::info()) << "ETL stopped";
|
||||
|
||||
subscriptions.stop();
|
||||
LOG(util::LogService::info()) << "SubscriptionManager stopped";
|
||||
|
||||
backend.waitForWritesToFinish();
|
||||
LOG(util::LogService::info()) << "Backend writes finished";
|
||||
|
||||
ioc.stop();
|
||||
LOG(util::LogService::info()) << "io_context stopped";
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace app
|
||||
58
src/app/VerifyConfig.hpp
Normal file
58
src/app/VerifyConfig.hpp
Normal file
@@ -0,0 +1,58 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
#include "util/newconfig/ConfigFileJson.hpp"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <string_view>
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief Verifies user's config values are correct
|
||||
*
|
||||
* @param configPath The path to config
|
||||
* @return true if config values are all correct, false otherwise
|
||||
*/
|
||||
inline bool
|
||||
parseConfig(std::string_view configPath)
|
||||
{
|
||||
using namespace util::config;
|
||||
|
||||
auto const json = ConfigFileJson::makeConfigFileJson(configPath);
|
||||
if (!json.has_value()) {
|
||||
std::cerr << "Error parsing json from config: " << configPath << "\n" << json.error().error << std::endl;
|
||||
return false;
|
||||
}
|
||||
auto const errors = gClioConfig.parse(json.value());
|
||||
if (errors.has_value()) {
|
||||
for (auto const& err : errors.value()) {
|
||||
std::cerr << "Issues found in provided config '" << configPath << "':\n";
|
||||
std::cerr << err.error << std::endl;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
111
src/app/WebHandlers.cpp
Normal file
111
src/app/WebHandlers.cpp
Normal file
@@ -0,0 +1,111 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "app/WebHandlers.hpp"
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/prometheus/Http.hpp"
|
||||
#include "web/AdminVerificationStrategy.hpp"
|
||||
#include "web/SubscriptionContextInterface.hpp"
|
||||
#include "web/dosguard/DOSGuardInterface.hpp"
|
||||
#include "web/ng/Connection.hpp"
|
||||
#include "web/ng/Request.hpp"
|
||||
#include "web/ng/Response.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/beast/http/status.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
|
||||
OnConnectCheck::OnConnectCheck(web::dosguard::DOSGuardInterface& dosguard) : dosguard_{dosguard}
|
||||
{
|
||||
}
|
||||
|
||||
std::expected<void, web::ng::Response>
|
||||
OnConnectCheck::operator()(web::ng::Connection const& connection)
|
||||
{
|
||||
dosguard_.get().increment(connection.ip());
|
||||
if (not dosguard_.get().isOk(connection.ip())) {
|
||||
return std::unexpected{
|
||||
web::ng::Response{boost::beast::http::status::too_many_requests, "Too many requests", connection}
|
||||
};
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
DisconnectHook::DisconnectHook(web::dosguard::DOSGuardInterface& dosguard) : dosguard_{dosguard}
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
DisconnectHook::operator()(web::ng::Connection const& connection)
|
||||
{
|
||||
dosguard_.get().decrement(connection.ip());
|
||||
}
|
||||
|
||||
MetricsHandler::MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier)
|
||||
: adminVerifier_{std::move(adminVerifier)}
|
||||
{
|
||||
}
|
||||
|
||||
web::ng::Response
|
||||
MetricsHandler::operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata& connectionMetadata,
|
||||
web::SubscriptionContextPtr,
|
||||
boost::asio::yield_context
|
||||
)
|
||||
{
|
||||
auto const maybeHttpRequest = request.asHttpRequest();
|
||||
ASSERT(maybeHttpRequest.has_value(), "Got not a http request in Get");
|
||||
auto const& httpRequest = maybeHttpRequest->get();
|
||||
|
||||
// FIXME(#1702): Using veb server thread to handle prometheus request. Better to post on work queue.
|
||||
auto maybeResponse = util::prometheus::handlePrometheusRequest(
|
||||
httpRequest, adminVerifier_->isAdmin(httpRequest, connectionMetadata.ip())
|
||||
);
|
||||
ASSERT(maybeResponse.has_value(), "Got unexpected request for Prometheus");
|
||||
return web::ng::Response{std::move(maybeResponse).value(), request};
|
||||
}
|
||||
|
||||
web::ng::Response
|
||||
HealthCheckHandler::operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata&,
|
||||
web::SubscriptionContextPtr,
|
||||
boost::asio::yield_context
|
||||
)
|
||||
{
|
||||
static auto constexpr kHEALTH_CHECK_HTML = R"html(
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Test page for Clio</title></head>
|
||||
<body><h1>Clio Test</h1><p>This page shows Clio http(s) connectivity is working.</p></body>
|
||||
</html>
|
||||
)html";
|
||||
|
||||
return web::ng::Response{boost::beast::http::status::ok, kHEALTH_CHECK_HTML, request};
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
200
src/app/WebHandlers.hpp
Normal file
200
src/app/WebHandlers.hpp
Normal file
@@ -0,0 +1,200 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/AdminVerificationStrategy.hpp"
|
||||
#include "web/SubscriptionContextInterface.hpp"
|
||||
#include "web/dosguard/DOSGuardInterface.hpp"
|
||||
#include "web/ng/Connection.hpp"
|
||||
#include "web/ng/Request.hpp"
|
||||
#include "web/ng/Response.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/beast/http/status.hpp>
|
||||
#include <boost/json/array.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief A function object that checks if the connection is allowed to proceed.
|
||||
*/
|
||||
class OnConnectCheck {
|
||||
std::reference_wrapper<web::dosguard::DOSGuardInterface> dosguard_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new OnConnectCheck object
|
||||
*
|
||||
* @param dosguard The DOSGuardInterface to use for checking the connection.
|
||||
*/
|
||||
OnConnectCheck(web::dosguard::DOSGuardInterface& dosguard);
|
||||
|
||||
/**
|
||||
* @brief Check if the connection is allowed to proceed.
|
||||
*
|
||||
* @param connection The connection to check.
|
||||
* @return A response if the connection is not allowed to proceed or void otherwise.
|
||||
*/
|
||||
std::expected<void, web::ng::Response>
|
||||
operator()(web::ng::Connection const& connection);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object to be called when a connection is disconnected.
|
||||
*/
|
||||
class DisconnectHook {
|
||||
std::reference_wrapper<web::dosguard::DOSGuardInterface> dosguard_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new DisconnectHook object
|
||||
*
|
||||
* @param dosguard The DOSGuardInterface to use for disconnecting the connection.
|
||||
*/
|
||||
DisconnectHook(web::dosguard::DOSGuardInterface& dosguard);
|
||||
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param connection The connection which has disconnected.
|
||||
*/
|
||||
void
|
||||
operator()(web::ng::Connection const& connection);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object that handles the metrics endpoint.
|
||||
*/
|
||||
class MetricsHandler {
|
||||
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new MetricsHandler object
|
||||
*
|
||||
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access.
|
||||
*/
|
||||
MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier);
|
||||
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param request The request to handle.
|
||||
* @param connectionMetadata The connection metadata.
|
||||
* @return The response to the request.
|
||||
*/
|
||||
web::ng::Response
|
||||
operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata& connectionMetadata,
|
||||
web::SubscriptionContextPtr,
|
||||
boost::asio::yield_context
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object that handles the health check endpoint.
|
||||
*/
|
||||
class HealthCheckHandler {
|
||||
public:
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param request The request to handle.
|
||||
* @return The response to the request
|
||||
*/
|
||||
web::ng::Response
|
||||
operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata&,
|
||||
web::SubscriptionContextPtr,
|
||||
boost::asio::yield_context
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object that handles the websocket endpoint.
|
||||
*
|
||||
* @tparam RpcHandlerType The type of the RPC handler.
|
||||
*/
|
||||
template <typename RpcHandlerType>
|
||||
class RequestHandler {
|
||||
util::Logger webServerLog_{"WebServer"};
|
||||
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier_;
|
||||
std::reference_wrapper<RpcHandlerType> rpcHandler_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new RequestHandler object
|
||||
*
|
||||
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access.
|
||||
* @param rpcHandler The RPC handler to use for handling the request.
|
||||
*/
|
||||
RequestHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier, RpcHandlerType& rpcHandler)
|
||||
: adminVerifier_(std::move(adminVerifier)), rpcHandler_(rpcHandler)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param request The request to handle.
|
||||
* @param connectionMetadata The connection metadata.
|
||||
* @param subscriptionContext The subscription context.
|
||||
* @param yield The yield context.
|
||||
* @return The response to the request.
|
||||
*/
|
||||
web::ng::Response
|
||||
operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata& connectionMetadata,
|
||||
web::SubscriptionContextPtr subscriptionContext,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
LOG(webServerLog_.info()) << connectionMetadata.tag()
|
||||
<< "Received request from ip = " << connectionMetadata.ip()
|
||||
<< " - posting to WorkQueue";
|
||||
|
||||
connectionMetadata.setIsAdmin([this, &request, &connectionMetadata]() {
|
||||
return adminVerifier_->isAdmin(request.httpHeaders(), connectionMetadata.ip());
|
||||
});
|
||||
|
||||
try {
|
||||
return rpcHandler_(request, connectionMetadata, std::move(subscriptionContext), yield);
|
||||
} catch (std::exception const&) {
|
||||
return web::ng::Response{
|
||||
boost::beast::http::status::internal_server_error,
|
||||
rpc::makeError(rpc::RippledError::rpcINTERNAL),
|
||||
request
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace app
|
||||
@@ -50,10 +50,10 @@
|
||||
namespace {
|
||||
|
||||
std::unordered_set<std::string>&
|
||||
SUPPORTED_AMENDMENTS()
|
||||
supportedAmendments()
|
||||
{
|
||||
static std::unordered_set<std::string> amendments = {};
|
||||
return amendments;
|
||||
static std::unordered_set<std::string> kAMENDMENTS = {};
|
||||
return kAMENDMENTS;
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -72,8 +72,8 @@ namespace impl {
|
||||
|
||||
WritingAmendmentKey::WritingAmendmentKey(std::string amendmentName) : AmendmentKey{std::move(amendmentName)}
|
||||
{
|
||||
ASSERT(not SUPPORTED_AMENDMENTS().contains(name), "Attempt to register the same amendment twice");
|
||||
SUPPORTED_AMENDMENTS().insert(name);
|
||||
ASSERT(not supportedAmendments().contains(name), "Attempt to register the same amendment twice");
|
||||
supportedAmendments().insert(name);
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
@@ -90,7 +90,7 @@ AmendmentKey::operator std::string_view() const
|
||||
|
||||
AmendmentKey::operator ripple::uint256() const
|
||||
{
|
||||
return Amendment::GetAmendmentId(name);
|
||||
return Amendment::getAmendmentId(name);
|
||||
}
|
||||
|
||||
AmendmentCenter::AmendmentCenter(std::shared_ptr<data::BackendInterface> const& backend) : backend_{backend}
|
||||
@@ -103,9 +103,9 @@ AmendmentCenter::AmendmentCenter(std::shared_ptr<data::BackendInterface> const&
|
||||
auto const& [name, support] = p;
|
||||
return Amendment{
|
||||
.name = name,
|
||||
.feature = Amendment::GetAmendmentId(name),
|
||||
.feature = Amendment::getAmendmentId(name),
|
||||
.isSupportedByXRPL = support != ripple::AmendmentSupport::Unsupported,
|
||||
.isSupportedByClio = rg::find(SUPPORTED_AMENDMENTS(), name) != rg::end(SUPPORTED_AMENDMENTS()),
|
||||
.isSupportedByClio = rg::find(supportedAmendments(), name) != rg::end(supportedAmendments()),
|
||||
.isRetired = support == ripple::AmendmentSupport::Retired
|
||||
};
|
||||
}),
|
||||
@@ -180,7 +180,7 @@ AmendmentCenter::operator[](AmendmentKey const& key) const
|
||||
}
|
||||
|
||||
ripple::uint256
|
||||
Amendment::GetAmendmentId(std::string_view name)
|
||||
Amendment::getAmendmentId(std::string_view name)
|
||||
{
|
||||
return ripple::sha512Half(ripple::Slice(name.data(), name.size()));
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ struct Amendments {
|
||||
// Most of the time it's going to be no changes at all.
|
||||
|
||||
/** @cond */
|
||||
// NOLINTBEGIN(readability-identifier-naming)
|
||||
REGISTER(OwnerPaysFee);
|
||||
REGISTER(Flow);
|
||||
REGISTER(FlowCross);
|
||||
@@ -124,6 +125,18 @@ struct Amendments {
|
||||
REGISTER(NFTokenMintOffer);
|
||||
REGISTER(fixReducedOffersV2);
|
||||
REGISTER(fixEnforceNFTokenTrustline);
|
||||
REGISTER(fixInnerObjTemplate2);
|
||||
REGISTER(fixNFTokenPageLinks);
|
||||
REGISTER(InvariantsV1_1);
|
||||
REGISTER(MPTokensV1);
|
||||
REGISTER(fixAMMv1_2);
|
||||
REGISTER(AMMClawback);
|
||||
REGISTER(Credentials);
|
||||
REGISTER(DynamicNFT);
|
||||
REGISTER(PermissionedDomains);
|
||||
REGISTER(fixInvalidTxFlags);
|
||||
REGISTER(fixFrozenLPTokenTransfer);
|
||||
REGISTER(DeepFreeze);
|
||||
|
||||
// Obsolete but supported by libxrpl
|
||||
REGISTER(CryptoConditionsSuite);
|
||||
@@ -147,6 +160,7 @@ struct Amendments {
|
||||
REGISTER(fix1512);
|
||||
REGISTER(fix1523);
|
||||
REGISTER(fix1528);
|
||||
// NOLINTEND(readability-identifier-naming)
|
||||
/** @endcond */
|
||||
};
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ namespace data {
|
||||
|
||||
namespace {
|
||||
|
||||
std::vector<std::int64_t> const histogramBuckets{1, 2, 5, 10, 20, 50, 100, 200, 500, 700, 1000};
|
||||
std::vector<std::int64_t> const kHISTOGRAM_BUCKETS{1, 2, 5, 10, 20, 50, 100, 200, 500, 700, 1000};
|
||||
|
||||
std::int64_t
|
||||
durationInMillisecondsSince(std::chrono::steady_clock::time_point const startTime)
|
||||
@@ -69,13 +69,13 @@ BackendCounters::BackendCounters()
|
||||
, readDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "read"}}),
|
||||
histogramBuckets,
|
||||
kHISTOGRAM_BUCKETS,
|
||||
"The duration of backend read operations including retries"
|
||||
))
|
||||
, writeDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "write"}}),
|
||||
histogramBuckets,
|
||||
kHISTOGRAM_BUCKETS,
|
||||
"The duration of backend write operations including retries"
|
||||
))
|
||||
{
|
||||
|
||||
@@ -21,9 +21,10 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/CassandraBackend.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
@@ -38,22 +39,25 @@ namespace data {
|
||||
* @brief A factory function that creates the backend based on a config.
|
||||
*
|
||||
* @param config The clio config to use
|
||||
* @param cache The ledger cache to use
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
make_Backend(util::Config const& config)
|
||||
makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
|
||||
{
|
||||
static util::Logger const log{"Backend"};
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
auto const readOnly = config.valueOr("read_only", false);
|
||||
auto const readOnly = config.get<bool>("read_only");
|
||||
|
||||
auto const type = config.value<std::string>("database.type");
|
||||
auto const type = config.get<std::string>("database.type");
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
if (boost::iequals(type, "cassandra")) {
|
||||
auto cfg = config.section("database." + type);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
auto const cfg = config.getObject("database." + type);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(
|
||||
data::cassandra::SettingsProvider{cfg}, cache, readOnly
|
||||
);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
|
||||
@@ -87,7 +87,7 @@ BackendInterface::fetchLedgerObject(
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
auto obj = cache_.get().get(key, sequence);
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return obj;
|
||||
@@ -126,7 +126,7 @@ BackendInterface::fetchLedgerObjects(
|
||||
results.resize(keys.size());
|
||||
std::vector<ripple::uint256> misses;
|
||||
for (size_t i = 0; i < keys.size(); ++i) {
|
||||
auto obj = cache_.get(keys[i], sequence);
|
||||
auto obj = cache_.get().get(keys[i], sequence);
|
||||
if (obj) {
|
||||
results[i] = *obj;
|
||||
} else {
|
||||
@@ -156,7 +156,7 @@ BackendInterface::fetchSuccessorKey(
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
auto succ = cache_.get().getSuccessor(key, ledgerSequence);
|
||||
if (succ) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
} else {
|
||||
@@ -176,9 +176,9 @@ BackendInterface::fetchSuccessorObject(
|
||||
if (succ) {
|
||||
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
|
||||
if (!obj)
|
||||
return {{*succ, {}}};
|
||||
return {{.key = *succ, .blob = {}}};
|
||||
|
||||
return {{*succ, *obj}};
|
||||
return {{.key = *succ, .blob = *obj}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
@@ -267,7 +267,7 @@ std::optional<LedgerRange>
|
||||
BackendInterface::fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock const lck(rngMtx_);
|
||||
return range;
|
||||
return range_;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -276,16 +276,16 @@ BackendInterface::updateRange(uint32_t newMax)
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
|
||||
ASSERT(
|
||||
!range || newMax >= range->maxSequence,
|
||||
!range_ || newMax >= range_->maxSequence,
|
||||
"Range shouldn't exist yet or newMax should be greater. newMax = {}, range->maxSequence = {}",
|
||||
newMax,
|
||||
range->maxSequence
|
||||
range_->maxSequence
|
||||
);
|
||||
|
||||
if (!range) {
|
||||
range = {newMax, newMax};
|
||||
if (!range_) {
|
||||
range_ = {.minSequence = newMax, .maxSequence = newMax};
|
||||
} else {
|
||||
range->maxSequence = newMax;
|
||||
range_->maxSequence = newMax;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -296,10 +296,10 @@ BackendInterface::setRange(uint32_t min, uint32_t max, bool force)
|
||||
|
||||
if (!force) {
|
||||
ASSERT(min <= max, "Range min must be less than or equal to max");
|
||||
ASSERT(not range.has_value(), "Range was already set");
|
||||
ASSERT(not range_.has_value(), "Range was already set");
|
||||
}
|
||||
|
||||
range = {min, max};
|
||||
range_ = {.minSequence = min, .maxSequence = max};
|
||||
}
|
||||
|
||||
LedgerPage
|
||||
@@ -320,10 +320,10 @@ BackendInterface::fetchLedgerPage(
|
||||
ripple::uint256 const& curCursor = [&]() {
|
||||
if (!keys.empty())
|
||||
return keys.back();
|
||||
return (cursor ? *cursor : firstKey);
|
||||
return (cursor ? *cursor : kFIRST_KEY);
|
||||
}();
|
||||
|
||||
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
std::uint32_t const seq = outOfOrder ? range_->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
|
||||
if (!succ) {
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
@@ -65,7 +66,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
static constexpr std::size_t kDEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
/**
|
||||
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
|
||||
*
|
||||
@@ -76,9 +77,9 @@ static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
*/
|
||||
template <typename FnType>
|
||||
auto
|
||||
retryOnTimeout(FnType func, size_t waitMs = DEFAULT_WAIT_BETWEEN_RETRY)
|
||||
retryOnTimeout(FnType func, size_t waitMs = kDEFAULT_WAIT_BETWEEN_RETRY)
|
||||
{
|
||||
static util::Logger const log{"Backend"};
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
@@ -138,19 +139,28 @@ synchronousAndRetryOnTimeout(FnType&& func)
|
||||
class BackendInterface {
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
LedgerCache cache_;
|
||||
std::optional<etl::CorruptionDetector<LedgerCache>> corruptionDetector_;
|
||||
std::optional<LedgerRange> range_;
|
||||
std::reference_wrapper<LedgerCacheInterface> cache_;
|
||||
std::optional<etl::CorruptionDetector> corruptionDetector_;
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
/**
|
||||
* @brief Construct a new backend interface instance.
|
||||
*
|
||||
* @param cache The ledger cache to use
|
||||
*/
|
||||
BackendInterface(LedgerCacheInterface& cache) : cache_{cache}
|
||||
{
|
||||
}
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
// TODO: Remove this hack. Cache should not be exposed thru BackendInterface
|
||||
// TODO https://github.com/XRPLF/clio/issues/1956: Remove this hack once old ETL is removed.
|
||||
// Cache should not be exposed thru BackendInterface
|
||||
|
||||
/**
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
LedgerCacheInterface const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
@@ -159,7 +169,7 @@ public:
|
||||
/**
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
LedgerCacheInterface&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
@@ -171,7 +181,7 @@ public:
|
||||
* @param detector The corruption detector to set
|
||||
*/
|
||||
void
|
||||
setCorruptionDetector(etl::CorruptionDetector<LedgerCache> detector)
|
||||
setCorruptionDetector(etl::CorruptionDetector detector)
|
||||
{
|
||||
corruptionDetector_ = std::move(detector);
|
||||
}
|
||||
@@ -364,6 +374,25 @@ public:
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all holders' balances for a MPTIssuanceID
|
||||
*
|
||||
* @param mptID MPTIssuanceID you wish you query.
|
||||
* @param limit Paging limit.
|
||||
* @param cursorIn Optional cursor to allow us to pick up from where we last left off.
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<Blob> of MPToken balances and an optional marker
|
||||
*/
|
||||
virtual MPTHoldersAndCursor
|
||||
fetchMPTHolders(
|
||||
ripple::uint192 const& mptID,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::AccountID> const& cursorIn,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger object.
|
||||
*
|
||||
@@ -529,6 +558,16 @@ public:
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the status of migrator by name.
|
||||
*
|
||||
* @param migratorName The name of the migrator
|
||||
* @param yield The coroutine context
|
||||
* @return The status of the migrator if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<std::string>
|
||||
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Synchronously fetches the ledger range from DB.
|
||||
*
|
||||
@@ -609,6 +648,14 @@ public:
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new account transaction.
|
||||
*
|
||||
* @param record An object representing the account transaction
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransaction(AccountTransactionsData record) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write NFTs transactions.
|
||||
*
|
||||
@@ -617,6 +664,14 @@ public:
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write accounts that started holding onto a MPT.
|
||||
*
|
||||
* @param data A vector of MPT ID and account pairs
|
||||
*/
|
||||
virtual void
|
||||
writeMPTHolders(std::vector<MPTHolderData> const& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
@@ -646,6 +701,21 @@ public:
|
||||
bool
|
||||
finishWrites(std::uint32_t ledgerSequence);
|
||||
|
||||
/**
|
||||
* @brief Wait for all pending writes to finish.
|
||||
*/
|
||||
virtual void
|
||||
waitForWritesToFinish() = 0;
|
||||
|
||||
/**
|
||||
* @brief Mark the migration status of a migrator as Migrated in the database
|
||||
*
|
||||
* @param migratorName The name of the migrator
|
||||
* @param status The status to set
|
||||
*/
|
||||
virtual void
|
||||
writeMigratorStatus(std::string const& migratorName, std::string const& status) = 0;
|
||||
|
||||
/**
|
||||
* @return true if database is overwhelmed; false otherwise
|
||||
*/
|
||||
|
||||
@@ -21,7 +21,9 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Schema.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
@@ -35,6 +37,7 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
@@ -43,6 +46,7 @@
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/nft.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
@@ -72,28 +76,32 @@ class BasicCassandraBackend : public BackendInterface {
|
||||
|
||||
SettingsProviderType settingsProvider_;
|
||||
Schema<SettingsProviderType> schema_;
|
||||
|
||||
std::atomic_uint32_t ledgerSequence_ = 0u;
|
||||
|
||||
protected:
|
||||
Handle handle_;
|
||||
|
||||
// have to be mutable because BackendInterface constness :(
|
||||
mutable ExecutionStrategyType executor_;
|
||||
|
||||
std::atomic_uint32_t ledgerSequence_ = 0u;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
*
|
||||
* @param settingsProvider The settings provider to use
|
||||
* @param cache The ledger cache to use
|
||||
* @param readOnly Whether the database should be in readonly mode
|
||||
*/
|
||||
BasicCassandraBackend(SettingsProviderType settingsProvider, bool readOnly)
|
||||
: settingsProvider_{std::move(settingsProvider)}
|
||||
BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
|
||||
: BackendInterface(cache)
|
||||
, settingsProvider_{std::move(settingsProvider)}
|
||||
, schema_{settingsProvider_}
|
||||
, handle_{settingsProvider_.getSettings()}
|
||||
, executor_{settingsProvider_.getSettings(), handle_}
|
||||
{
|
||||
if (auto const res = handle_.connect(); not res)
|
||||
throw std::runtime_error("Could not connect to databse: " + res.error());
|
||||
throw std::runtime_error("Could not connect to database: " + res.error());
|
||||
|
||||
if (not readOnly) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
|
||||
@@ -110,13 +118,24 @@ public:
|
||||
try {
|
||||
schema_.prepareStatements(handle_);
|
||||
} catch (std::runtime_error const& ex) {
|
||||
LOG(log_.error()) << "Failed to prepare the statements: " << ex.what() << "; readOnly: " << readOnly;
|
||||
throw;
|
||||
auto const error = fmt::format(
|
||||
"Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
|
||||
"node with write access to DB should be started first.",
|
||||
ex.what(),
|
||||
readOnly
|
||||
);
|
||||
LOG(log_.error()) << error;
|
||||
throw std::runtime_error(error);
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Created (revamped) CassandraBackend";
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Move constructor is deleted because handle_ is shared by reference with executor
|
||||
*/
|
||||
BasicCassandraBackend(BasicCassandraBackend&&) = delete;
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
@@ -128,7 +147,7 @@ public:
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
return {.txns = {}, .cursor = {}};
|
||||
|
||||
Statement const statement = [this, forward, &account]() {
|
||||
if (forward)
|
||||
@@ -185,13 +204,18 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
void
|
||||
waitForWritesToFinish() override
|
||||
{
|
||||
executor_.sync();
|
||||
}
|
||||
|
||||
bool
|
||||
doFinishWrites() override
|
||||
{
|
||||
// wait for other threads to finish their writes
|
||||
executor_.sync();
|
||||
waitForWritesToFinish();
|
||||
|
||||
if (!range) {
|
||||
if (!range_) {
|
||||
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
|
||||
}
|
||||
|
||||
@@ -399,7 +423,7 @@ public:
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
return {.txns = {}, .cursor = {}};
|
||||
|
||||
Statement const statement = [this, forward, &tokenID]() {
|
||||
if (forward)
|
||||
@@ -547,6 +571,45 @@ public:
|
||||
return ret;
|
||||
}
|
||||
|
||||
MPTHoldersAndCursor
|
||||
fetchMPTHolders(
|
||||
ripple::uint192 const& mptID,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::AccountID> const& cursorIn,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto const holderEntries = executor_.read(
|
||||
yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
|
||||
);
|
||||
|
||||
auto const& holderResults = holderEntries.value();
|
||||
if (not holderResults.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> mptKeys;
|
||||
std::optional<ripple::AccountID> cursor;
|
||||
for (auto const [holder] : extract<ripple::AccountID>(holderResults)) {
|
||||
mptKeys.push_back(ripple::keylet::mptoken(mptID, holder).key);
|
||||
cursor = holder;
|
||||
}
|
||||
|
||||
auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
|
||||
|
||||
auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
|
||||
|
||||
mptObjects.erase(it, mptObjects.end());
|
||||
|
||||
ASSERT(mptKeys.size() <= limit, "Number of keys can't exceed the limit");
|
||||
if (mptKeys.size() == limit)
|
||||
return {mptObjects, cursor};
|
||||
|
||||
return {mptObjects, {}};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
@@ -577,7 +640,6 @@ public:
|
||||
return seq;
|
||||
}
|
||||
LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
|
||||
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
|
||||
}
|
||||
@@ -608,7 +670,7 @@ public:
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
|
||||
if (auto const result = res->template get<ripple::uint256>(); result) {
|
||||
if (*result == lastKey)
|
||||
if (*result == kLAST_KEY)
|
||||
return std::nullopt;
|
||||
return result;
|
||||
}
|
||||
@@ -796,12 +858,32 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
std::optional<std::string>
|
||||
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch migrator status: " << res.error();
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& results = res.value();
|
||||
if (not results) {
|
||||
return {};
|
||||
}
|
||||
|
||||
for (auto [statusString] : extract<std::string>(results))
|
||||
return statusString;
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
|
||||
{
|
||||
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
|
||||
|
||||
if (range)
|
||||
if (range_)
|
||||
executor_.write(schema_->insertDiff, seq, key);
|
||||
|
||||
executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
|
||||
@@ -825,30 +907,42 @@ public:
|
||||
statements.reserve(data.size() * 10); // assume 10 transactions avg
|
||||
|
||||
for (auto& record : data) {
|
||||
std::transform(
|
||||
std::begin(record.accounts),
|
||||
std::end(record.accounts),
|
||||
std::back_inserter(statements),
|
||||
[this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash
|
||||
);
|
||||
}
|
||||
);
|
||||
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeAccountTransaction(AccountTransactionsData record) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(record.accounts.size());
|
||||
|
||||
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash
|
||||
);
|
||||
});
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
|
||||
std::transform(std::cbegin(data), std::cend(data), std::back_inserter(statements), [this](auto const& record) {
|
||||
std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
|
||||
return schema_->insertNFTTx.bind(
|
||||
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
|
||||
);
|
||||
@@ -881,27 +975,45 @@ public:
|
||||
statements.reserve(data.size() * 3);
|
||||
|
||||
for (NFTsData const& record : data) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
if (!record.onlyUriChanged) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID
|
||||
));
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID
|
||||
));
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// only uri changed, we update the uri table only
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
executor_.writeEach(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeMPTHolders(std::vector<MPTHolderData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
for (auto [mptId, holder] : data)
|
||||
statements.push_back(schema_->insertMPTHolder.bind(mptId, holder));
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
@@ -912,6 +1024,14 @@ public:
|
||||
// probably was used in PG to start a transaction or smth.
|
||||
}
|
||||
|
||||
void
|
||||
writeMigratorStatus(std::string const& migratorName, std::string const& status) override
|
||||
{
|
||||
executor_.writeSync(
|
||||
schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
|
||||
);
|
||||
}
|
||||
|
||||
bool
|
||||
isTooBusy() const override
|
||||
{
|
||||
|
||||
@@ -54,7 +54,7 @@ struct AccountTransactionsData {
|
||||
* @param meta The transaction metadata
|
||||
* @param txHash The transaction hash
|
||||
*/
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
|
||||
AccountTransactionsData(ripple::TxMeta const& meta, ripple::uint256 const& txHash)
|
||||
: accounts(meta.getAffectedAccounts())
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
@@ -107,6 +107,7 @@ struct NFTsData {
|
||||
ripple::AccountID owner;
|
||||
std::optional<ripple::Blob> uri;
|
||||
bool isBurned = false;
|
||||
bool onlyUriChanged = false; // Whether only the URI was changed
|
||||
|
||||
/**
|
||||
* @brief Construct a new NFTsData object
|
||||
@@ -170,6 +171,31 @@ struct NFTsData {
|
||||
: tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a new NFTsData object with only the URI changed
|
||||
*
|
||||
* @param tokenID The token ID
|
||||
* @param meta The transaction metadata
|
||||
* @param uri The new URI
|
||||
*
|
||||
*/
|
||||
NFTsData(ripple::uint256 const& tokenID, ripple::TxMeta const& meta, ripple::Blob const& uri)
|
||||
: tokenID(tokenID)
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
, uri(uri)
|
||||
, onlyUriChanged(true)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents an MPT and holder pair
|
||||
*/
|
||||
struct MPTHolderData {
|
||||
ripple::uint192 mptID;
|
||||
ripple::AccountID holder;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -182,11 +208,11 @@ template <typename T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
{
|
||||
static constexpr short OFFER_OFFSET = 0x006f;
|
||||
static constexpr short SHIFT = 8;
|
||||
static constexpr short kOFFER_OFFSET = 0x006f;
|
||||
static constexpr short kSHIFT = 8;
|
||||
|
||||
short offer_bytes = (object[1] << SHIFT) | object[2];
|
||||
return offer_bytes == OFFER_OFFSET;
|
||||
short offerBytes = (object[1] << kSHIFT) | object[2];
|
||||
return offerBytes == kOFFER_OFFSET;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -215,9 +241,9 @@ template <typename T>
|
||||
inline bool
|
||||
isDirNode(T const& object)
|
||||
{
|
||||
static constexpr short DIR_NODE_SPACE_KEY = 0x0064;
|
||||
static constexpr short kDIR_NODE_SPACE_KEY = 0x0064;
|
||||
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == DIR_NODE_SPACE_KEY;
|
||||
return spaceKey == kDIR_NODE_SPACE_KEY;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -265,12 +291,12 @@ template <typename T>
|
||||
inline ripple::uint256
|
||||
getBookBase(T const& key)
|
||||
{
|
||||
static constexpr size_t KEY_SIZE = 24;
|
||||
static constexpr size_t kEY_SIZE = 24;
|
||||
|
||||
ASSERT(key.size() == ripple::uint256::size(), "Invalid key size {}", key.size());
|
||||
|
||||
ripple::uint256 ret;
|
||||
for (size_t i = 0; i < KEY_SIZE; ++i)
|
||||
for (size_t i = 0; i < kEY_SIZE; ++i)
|
||||
ret.data()[i] = key.data()[i];
|
||||
|
||||
return ret;
|
||||
@@ -289,4 +315,4 @@ uint256ToString(ripple::uint256 const& input)
|
||||
}
|
||||
|
||||
/** @brief The ripple epoch start timestamp. Midnight on 1st January 2000. */
|
||||
static constexpr std::uint32_t rippleEpochStart = 946684800;
|
||||
static constexpr std::uint32_t kRIPPLE_EPOCH_START = 946684800;
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include "data/LedgerCache.hpp"
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
@@ -75,7 +76,7 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq) {
|
||||
e = {seq, obj.blob};
|
||||
e = {.seq = seq, .blob = obj.blob};
|
||||
}
|
||||
} else {
|
||||
map_.erase(obj.key);
|
||||
@@ -87,6 +88,42 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCache::update(std::vector<etlng::model::Object> const& objs, uint32_t seq)
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
|
||||
std::scoped_lock const lck{mtx_};
|
||||
if (seq > latestSeq_) {
|
||||
ASSERT(
|
||||
seq == latestSeq_ + 1 || latestSeq_ == 0,
|
||||
"New sequence must be either next or first. seq = {}, latestSeq_ = {}",
|
||||
seq,
|
||||
latestSeq_
|
||||
);
|
||||
latestSeq_ = seq;
|
||||
}
|
||||
|
||||
deleted_.clear(); // previous update's deletes no longer needed
|
||||
|
||||
for (auto const& obj : objs) {
|
||||
if (!obj.data.empty()) {
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq)
|
||||
e = {.seq = seq, .blob = obj.data};
|
||||
} else {
|
||||
if (map_.contains(obj.key))
|
||||
deleted_[obj.key] = map_[obj.key];
|
||||
|
||||
map_.erase(obj.key);
|
||||
if (!full_)
|
||||
deletes_.insert(obj.key);
|
||||
}
|
||||
}
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
@@ -101,7 +138,7 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
++successorHitCounter_.get();
|
||||
return {{e->first, e->second.blob}};
|
||||
return {{.key = e->first, .blob = e->second.blob}};
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
@@ -117,7 +154,7 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
if (e == map_.begin())
|
||||
return {};
|
||||
--e;
|
||||
return {{e->first, e->second.blob}};
|
||||
return {{.key = e->first, .blob = e->second.blob}};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
@@ -139,6 +176,29 @@ LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
LedgerCache::getDeleted(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (disabled_)
|
||||
return std::nullopt;
|
||||
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
return std::nullopt;
|
||||
|
||||
++objectReqCounter_.get();
|
||||
|
||||
auto e = deleted_.find(key);
|
||||
if (e == deleted_.end())
|
||||
return std::nullopt;
|
||||
|
||||
if (seq < e->second.seq)
|
||||
return std::nullopt;
|
||||
|
||||
++objectHitCounter_.get();
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCache::setDisabled()
|
||||
{
|
||||
|
||||
@@ -19,7 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "util/prometheus/Bool.hpp"
|
||||
#include "util/prometheus/Counter.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
@@ -27,7 +30,6 @@
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/hardened_hash.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -43,7 +45,7 @@ namespace data {
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCache {
|
||||
class LedgerCache : public LedgerCacheInterface {
|
||||
struct CacheEntry {
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
@@ -72,120 +74,70 @@ class LedgerCache {
|
||||
)};
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
std::map<ripple::uint256, CacheEntry> deleted_;
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::condition_variable_any cv_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
util::prometheus::Bool full_{PrometheusService::boolMetric(
|
||||
"ledger_cache_full",
|
||||
util::prometheus::Labels{},
|
||||
"Whether ledger cache full or not"
|
||||
)};
|
||||
util::prometheus::Bool disabled_{PrometheusService::boolMetric(
|
||||
"ledger_cache_disabled",
|
||||
util::prometheus::Labels{},
|
||||
"Whether ledger cache is disabled or not"
|
||||
)};
|
||||
|
||||
// temporary set to prevent background thread from writing already deleted data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param objs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
void
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false);
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground) override;
|
||||
|
||||
void
|
||||
update(std::vector<etlng::model::Object> const& objs, uint32_t seq) override;
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
get(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
std::optional<Blob>
|
||||
getDeleted(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached successor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached predcessor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
|
||||
/**
|
||||
* @brief Disables the cache.
|
||||
*/
|
||||
void
|
||||
setDisabled();
|
||||
setDisabled() override;
|
||||
|
||||
/**
|
||||
* @return true if the cache is disabled; false otherwise
|
||||
*/
|
||||
bool
|
||||
isDisabled() const;
|
||||
isDisabled() const override;
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
|
||||
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
|
||||
* config).
|
||||
*/
|
||||
void
|
||||
setFull();
|
||||
setFull() override;
|
||||
|
||||
/**
|
||||
* @return The latest ledger sequence for which cache is available.
|
||||
*/
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
latestLedgerSequence() const override;
|
||||
|
||||
/**
|
||||
* @return true if the cache has all data for the most recent ledger; false otherwise
|
||||
*/
|
||||
bool
|
||||
isFull() const;
|
||||
isFull() const override;
|
||||
|
||||
/**
|
||||
* @return The total size of the cache.
|
||||
*/
|
||||
size_t
|
||||
size() const;
|
||||
size() const override;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting an object in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
getObjectHitRate() const override;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
getSuccessorHitRate() const override;
|
||||
|
||||
/**
|
||||
* @brief Waits until the cache contains a specific sequence.
|
||||
*
|
||||
* @param seq The sequence to wait for
|
||||
*/
|
||||
void
|
||||
waitUntilCacheContainsSeq(uint32_t seq);
|
||||
waitUntilCacheContainsSeq(uint32_t seq) override;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
|
||||
173
src/data/LedgerCacheInterface.hpp
Normal file
173
src/data/LedgerCacheInterface.hpp
Normal file
@@ -0,0 +1,173 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/hardened_hash.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCacheInterface {
|
||||
public:
|
||||
virtual ~LedgerCacheInterface() = default;
|
||||
LedgerCacheInterface() = default;
|
||||
LedgerCacheInterface(LedgerCacheInterface&&) = delete;
|
||||
LedgerCacheInterface(LedgerCacheInterface const&) = delete;
|
||||
LedgerCacheInterface&
|
||||
operator=(LedgerCacheInterface&&) = delete;
|
||||
LedgerCacheInterface&
|
||||
operator=(LedgerCacheInterface const&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param objs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
virtual void
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false) = 0;
|
||||
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param objs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
*/
|
||||
virtual void
|
||||
update(std::vector<etlng::model::Object> const& objs, uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch a recently deleted object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in deleted cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
getDeleted(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached successor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached predcessor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
|
||||
*/
|
||||
virtual std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Disables the cache.
|
||||
*/
|
||||
virtual void
|
||||
setDisabled() = 0;
|
||||
|
||||
/**
|
||||
* @return true if the cache is disabled; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isDisabled() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
|
||||
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
|
||||
* config).
|
||||
*/
|
||||
virtual void
|
||||
setFull() = 0;
|
||||
|
||||
/**
|
||||
* @return The latest ledger sequence for which cache is available.
|
||||
*/
|
||||
virtual uint32_t
|
||||
latestLedgerSequence() const = 0;
|
||||
|
||||
/**
|
||||
* @return true if the cache has all data for the most recent ledger; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isFull() const = 0;
|
||||
|
||||
/**
|
||||
* @return The total size of the cache.
|
||||
*/
|
||||
virtual size_t
|
||||
size() const = 0;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting an object in the cache versus missing it.
|
||||
*/
|
||||
virtual float
|
||||
getObjectHitRate() const = 0;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
|
||||
*/
|
||||
virtual float
|
||||
getSuccessorHitRate() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Waits until the cache contains a specific sequence.
|
||||
*
|
||||
* @param seq The sequence to wait for
|
||||
*/
|
||||
virtual void
|
||||
waitUntilCacheContainsSeq(uint32_t seq) = 0;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -262,3 +262,15 @@ CREATE TABLE clio.nf_token_transactions (
|
||||
```
|
||||
|
||||
The `nf_token_transactions` table serves as the NFT counterpart to `account_tx`, inspired by the same motivations and fulfilling a similar role within this context. It drives the `nft_history` API.
|
||||
|
||||
### migrator_status
|
||||
|
||||
```
|
||||
CREATE TABLE clio.migrator_status (
|
||||
migrator_name TEXT, # The name of the migrator
|
||||
status TEXT, # The status of the migrator
|
||||
PRIMARY KEY (migrator_name)
|
||||
)
|
||||
```
|
||||
|
||||
The `migrator_status` table stores the status of the migratior in this database. If a migrator's status is `migrated`, it means this database has finished data migration for this migrator.
|
||||
|
||||
@@ -233,6 +233,14 @@ struct NFTsAndCursor {
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents an array of MPTokens
|
||||
*/
|
||||
struct MPTHoldersAndCursor {
|
||||
std::vector<Blob> mptokens;
|
||||
std::optional<ripple::AccountID> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Stores a range of sequences as a min and max pair.
|
||||
*/
|
||||
@@ -258,7 +266,7 @@ struct Amendment {
|
||||
* @return The amendment Id as uint256
|
||||
*/
|
||||
static ripple::uint256
|
||||
GetAmendmentId(std::string_view const name);
|
||||
getAmendmentId(std::string_view const name);
|
||||
|
||||
/**
|
||||
* @brief Equality comparison operator
|
||||
@@ -304,8 +312,8 @@ struct AmendmentKey {
|
||||
operator<=>(AmendmentKey const& other) const = default;
|
||||
};
|
||||
|
||||
constexpr ripple::uint256 firstKey{"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 lastKey{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 hi192{"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
constexpr ripple::uint256 kFIRST_KEY{"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 kLAST_KEY{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 kHI192{"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
|
||||
} // namespace data
|
||||
|
||||
@@ -60,7 +60,7 @@ Handle::connect() const
|
||||
Handle::FutureType
|
||||
Handle::asyncConnect(std::string_view keyspace) const
|
||||
{
|
||||
return cass_session_connect_keyspace(session_, cluster_, keyspace.data());
|
||||
return cass_session_connect_keyspace_n(session_, cluster_, keyspace.data(), keyspace.size());
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
@@ -155,7 +155,7 @@ Handle::asyncExecute(std::vector<StatementType> const& statements, std::function
|
||||
Handle::PreparedStatementType
|
||||
Handle::prepare(std::string_view query) const
|
||||
{
|
||||
Handle::FutureType const future = cass_session_prepare(session_, query.data());
|
||||
Handle::FutureType const future = cass_session_prepare_n(session_, query.data(), query.size());
|
||||
auto const rc = future.await();
|
||||
if (rc)
|
||||
return cass_future_get_prepared(future);
|
||||
|
||||
@@ -74,7 +74,7 @@ public:
|
||||
'class': 'SimpleStrategy',
|
||||
'replication_factor': '{}'
|
||||
}}
|
||||
AND durable_writes = true
|
||||
AND durable_writes = True
|
||||
)",
|
||||
settingsProvider_.get().getKeyspace(),
|
||||
settingsProvider_.get().getReplicationFactor()
|
||||
@@ -257,6 +257,31 @@ public:
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
mpt_id blob,
|
||||
holder blob,
|
||||
PRIMARY KEY (mpt_id, holder)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (holder ASC)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "mp_token_holders")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
migrator_name TEXT,
|
||||
status TEXT,
|
||||
PRIMARY KEY (migrator_name)
|
||||
)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "migrator_status")
|
||||
));
|
||||
|
||||
return statements;
|
||||
}();
|
||||
|
||||
@@ -393,6 +418,17 @@ public:
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertMPTHolder = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(mpt_id, holder)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "mp_token_holders")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHeader = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
@@ -436,12 +472,23 @@ public:
|
||||
R"(
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = false
|
||||
WHERE is_latest = False
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertMigratorStatus = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(migrator_name, status)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "migrator_status")
|
||||
));
|
||||
}();
|
||||
|
||||
//
|
||||
// Select queries
|
||||
//
|
||||
@@ -687,6 +734,20 @@ public:
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectMPTHolders = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT holder
|
||||
FROM {}
|
||||
WHERE mpt_id = ?
|
||||
AND holder > ?
|
||||
ORDER BY holder ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "mp_token_holders")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerByHash = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
@@ -715,7 +776,7 @@ public:
|
||||
R"(
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
WHERE is_latest = true
|
||||
WHERE is_latest = True
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
@@ -726,10 +787,22 @@ public:
|
||||
R"(
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
WHERE is_latest in (True, False)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectMigratorStatus = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT status
|
||||
FROM {}
|
||||
WHERE migrator_name = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "migrator_status")
|
||||
));
|
||||
}();
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -22,10 +22,7 @@
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/Cluster.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include "util/newconfig/ObjectView.hpp"
|
||||
|
||||
#include <cerrno>
|
||||
#include <chrono>
|
||||
@@ -36,43 +33,17 @@
|
||||
#include <ios>
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <system_error>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
namespace impl {
|
||||
inline Settings::ContactPoints
|
||||
tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_object()) {
|
||||
throw std::runtime_error("Feed entire Cassandra section to parse Settings::ContactPoints instead");
|
||||
}
|
||||
|
||||
util::Config const obj{value};
|
||||
Settings::ContactPoints out;
|
||||
|
||||
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
|
||||
out.port = obj.maybeValue<uint16_t>("port");
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
inline Settings::SecureConnectionBundle
|
||||
tag_invoke(boost::json::value_to_tag<Settings::SecureConnectionBundle>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_string())
|
||||
throw std::runtime_error("`secure_connect_bundle` must be a string");
|
||||
return Settings::SecureConnectionBundle{value.as_string().data()};
|
||||
}
|
||||
} // namespace impl
|
||||
|
||||
SettingsProvider::SettingsProvider(util::Config const& cfg)
|
||||
SettingsProvider::SettingsProvider(util::config::ObjectView const& cfg)
|
||||
: config_{cfg}
|
||||
, keyspace_{cfg.valueOr<std::string>("keyspace", "clio")}
|
||||
, keyspace_{cfg.get<std::string>("keyspace")}
|
||||
, tablePrefix_{cfg.maybeValue<std::string>("table_prefix")}
|
||||
, replicationFactor_{cfg.valueOr<uint16_t>("replication_factor", 3)}
|
||||
, replicationFactor_{cfg.get<uint16_t>("replication_factor")}
|
||||
, settings_{parseSettings()}
|
||||
{
|
||||
}
|
||||
@@ -86,8 +57,8 @@ SettingsProvider::getSettings() const
|
||||
std::optional<std::string>
|
||||
SettingsProvider::parseOptionalCertificate() const
|
||||
{
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath) {
|
||||
auto const path = std::filesystem::path(*certPath);
|
||||
if (auto const certPath = config_.getValueView("certfile"); certPath.hasValue()) {
|
||||
auto const path = std::filesystem::path(certPath.asString());
|
||||
std::ifstream fileStream(path.string(), std::ios::in);
|
||||
if (!fileStream) {
|
||||
throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string());
|
||||
@@ -108,30 +79,34 @@ Settings
|
||||
SettingsProvider::parseSettings() const
|
||||
{
|
||||
auto settings = Settings::defaultSettings();
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle) {
|
||||
settings.connectionInfo = *bundle;
|
||||
|
||||
// all config values used in settings is under "database.cassandra" prefix
|
||||
if (config_.getValueView("secure_connect_bundle").hasValue()) {
|
||||
auto const bundle = Settings::SecureConnectionBundle{(config_.get<std::string>("secure_connect_bundle"))};
|
||||
settings.connectionInfo = bundle;
|
||||
} else {
|
||||
settings.connectionInfo =
|
||||
config_.valueOrThrow<Settings::ContactPoints>("Missing contact_points in Cassandra config");
|
||||
Settings::ContactPoints out;
|
||||
out.contactPoints = config_.get<std::string>("contact_points");
|
||||
out.port = config_.maybeValue<uint32_t>("port");
|
||||
settings.connectionInfo = out;
|
||||
}
|
||||
|
||||
settings.threads = config_.valueOr<uint32_t>("threads", settings.threads);
|
||||
settings.maxWriteRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding);
|
||||
settings.maxReadRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
|
||||
settings.coreConnectionsPerHost =
|
||||
config_.valueOr<uint32_t>("core_connections_per_host", settings.coreConnectionsPerHost);
|
||||
settings.threads = config_.get<uint32_t>("threads");
|
||||
settings.maxWriteRequestsOutstanding = config_.get<uint32_t>("max_write_requests_outstanding");
|
||||
settings.maxReadRequestsOutstanding = config_.get<uint32_t>("max_read_requests_outstanding");
|
||||
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
settings.writeBatchSize = config_.valueOr<std::size_t>("write_batch_size", settings.writeBatchSize);
|
||||
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
|
||||
|
||||
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
|
||||
if (connectTimeoutSecond)
|
||||
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
if (config_.getValueView("connect_timeout").hasValue()) {
|
||||
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");
|
||||
settings.connectionTimeout = std::chrono::milliseconds{connectTimeoutSecond * util::kMILLISECONDS_PER_SECOND};
|
||||
}
|
||||
|
||||
auto const requestTimeoutSecond = config_.maybeValue<uint32_t>("request_timeout");
|
||||
if (requestTimeoutSecond)
|
||||
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
if (config_.getValueView("request_timeout").hasValue()) {
|
||||
auto const requestTimeoutSecond = config_.get<uint32_t>("request_timeout");
|
||||
settings.requestTimeout = std::chrono::milliseconds{requestTimeoutSecond * util::kMILLISECONDS_PER_SECOND};
|
||||
}
|
||||
|
||||
settings.certificate = parseOptionalCertificate();
|
||||
settings.username = config_.maybeValue<std::string>("username");
|
||||
|
||||
@@ -19,10 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "data/cassandra/impl/Cluster.hpp"
|
||||
#include "util/newconfig/ObjectView.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -34,7 +33,7 @@ namespace data::cassandra {
|
||||
* @brief Provides settings for @ref BasicCassandraBackend.
|
||||
*/
|
||||
class SettingsProvider {
|
||||
util::Config config_;
|
||||
util::config::ObjectView config_;
|
||||
|
||||
std::string keyspace_;
|
||||
std::optional<std::string> tablePrefix_;
|
||||
@@ -47,7 +46,7 @@ public:
|
||||
*
|
||||
* @param cfg The config of Clio to use
|
||||
*/
|
||||
explicit SettingsProvider(util::Config const& cfg);
|
||||
explicit SettingsProvider(util::config::ObjectView const& cfg);
|
||||
|
||||
/**
|
||||
* @return The cluster settings
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
@@ -55,6 +57,26 @@ struct Limit {
|
||||
int32_t limit;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A strong type wrapper for string
|
||||
*
|
||||
* This is unfortunately needed right now to support TEXT properly
|
||||
* because clio uses string to represent BLOB
|
||||
* If we want to bind TEXT with string, we need to use this type
|
||||
*/
|
||||
struct Text {
|
||||
std::string text;
|
||||
|
||||
/**
|
||||
* @brief Construct a new Text object from string type
|
||||
*
|
||||
* @param text The text to wrap
|
||||
*/
|
||||
explicit Text(std::string text) : text{std::move(text)}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
class Handle;
|
||||
class CassandraError;
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/RetryPolicy.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
@@ -64,8 +65,8 @@ class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<Statemen
|
||||
RetryCallbackType onRetry_;
|
||||
|
||||
// does not exist during initial construction, hence optional
|
||||
std::optional<FutureWithCallbackType> future_;
|
||||
std::mutex mtx_;
|
||||
using OptionalFuture = std::optional<FutureWithCallbackType>;
|
||||
util::Mutex<OptionalFuture> future_;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -127,8 +128,8 @@ private:
|
||||
self = nullptr; // explicitly decrement refcount
|
||||
};
|
||||
|
||||
std::scoped_lock const lck{mtx_};
|
||||
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
auto future = future_.template lock<std::scoped_lock>();
|
||||
future->emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -31,14 +31,14 @@
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
constexpr auto kBATCH_DELETER = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
|
||||
Batch::Batch(std::vector<Statement> const& statements)
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), batchDeleter}
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), kBATCH_DELETER}
|
||||
{
|
||||
cass_batch_set_is_idempotent(*this, cass_true);
|
||||
|
||||
|
||||
@@ -33,13 +33,13 @@
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
constexpr auto kCLUSTER_DELETER = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
|
||||
}; // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), clusterDeleter}
|
||||
Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), kCLUSTER_DELETER}
|
||||
{
|
||||
using std::to_string;
|
||||
|
||||
|
||||
@@ -25,6 +25,8 @@
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
@@ -39,10 +41,10 @@ namespace data::cassandra::impl {
|
||||
* @brief Bundles all cassandra settings in one place.
|
||||
*/
|
||||
struct Settings {
|
||||
static constexpr std::size_t DEFAULT_CONNECTION_TIMEOUT = 10000;
|
||||
static constexpr uint32_t DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t DEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
static constexpr std::size_t DEFAULT_BATCH_SIZE = 20;
|
||||
static constexpr std::size_t kDEFAULT_CONNECTION_TIMEOUT = 10000;
|
||||
static constexpr uint32_t kDEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
static constexpr std::size_t kDEFAULT_BATCH_SIZE = 20;
|
||||
|
||||
/**
|
||||
* @brief Represents the configuration of contact points for cassandra.
|
||||
@@ -63,7 +65,7 @@ struct Settings {
|
||||
bool enableLog = false;
|
||||
|
||||
/** @brief Connect timeout specified in milliseconds */
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{DEFAULT_CONNECTION_TIMEOUT};
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{kDEFAULT_CONNECTION_TIMEOUT};
|
||||
|
||||
/** @brief Request timeout specified in milliseconds */
|
||||
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
|
||||
@@ -75,16 +77,16 @@ struct Settings {
|
||||
uint32_t threads = std::thread::hardware_concurrency();
|
||||
|
||||
/** @brief The maximum number of outstanding write requests at any given moment */
|
||||
uint32_t maxWriteRequestsOutstanding = DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
|
||||
uint32_t maxWriteRequestsOutstanding = kDEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The maximum number of outstanding read requests at any given moment */
|
||||
uint32_t maxReadRequestsOutstanding = DEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
uint32_t maxReadRequestsOutstanding = kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The number of connection per host to always have active */
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
|
||||
/** @brief Size of batches when writing */
|
||||
std::size_t writeBatchSize = DEFAULT_BATCH_SIZE;
|
||||
std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
class Collection : public ManagedObject<CassCollection> {
|
||||
static constexpr auto deleter = [](CassCollection* ptr) { cass_collection_free(ptr); };
|
||||
static constexpr auto kDELETER = [](CassCollection* ptr) { cass_collection_free(ptr); };
|
||||
|
||||
static void
|
||||
throwErrorIfNeeded(CassError const rc, std::string_view const label)
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
|
||||
template <typename Type>
|
||||
explicit Collection(std::vector<Type> const& value)
|
||||
: ManagedObject{cass_collection_new(CASS_COLLECTION_TYPE_LIST, value.size()), deleter}
|
||||
: ManagedObject{cass_collection_new(CASS_COLLECTION_TYPE_LIST, value.size()), kDELETER}
|
||||
{
|
||||
bind(value);
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
@@ -192,10 +193,24 @@ public:
|
||||
template <typename... Args>
|
||||
void
|
||||
write(PreparedStatementType const& preparedStatement, Args&&... args)
|
||||
{
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
write(std::move(statement));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor
|
||||
*
|
||||
* @param statement Statement to execute
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
void
|
||||
write(StatementType&& statement)
|
||||
{
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
@@ -251,6 +266,21 @@ public:
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data. Constrast with write, this method does not execute
|
||||
* the statements in a batch.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor.
|
||||
*
|
||||
* @param statements Vector of statements to execute
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
void
|
||||
writeEach(std::vector<StatementType>&& statements)
|
||||
{
|
||||
std::ranges::for_each(std::move(statements), [this](auto& statement) { this->write(std::move(statement)); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Coroutine-based query execution used for reading data.
|
||||
*
|
||||
|
||||
@@ -32,12 +32,12 @@
|
||||
#include <utility>
|
||||
|
||||
namespace {
|
||||
constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
constexpr auto kFUTURE_DELETER = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, futureDeleter}
|
||||
/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, kFUTURE_DELETER}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -30,8 +30,8 @@ protected:
|
||||
std::unique_ptr<Managed, void (*)(Managed*)> ptr_;
|
||||
|
||||
public:
|
||||
template <typename deleterCallable>
|
||||
ManagedObject(Managed* rawPtr, deleterCallable deleter) : ptr_{rawPtr, deleter}
|
||||
template <typename DeleterCallable>
|
||||
ManagedObject(Managed* rawPtr, DeleterCallable deleter) : ptr_{rawPtr, deleter}
|
||||
{
|
||||
if (rawPtr == nullptr)
|
||||
throw std::runtime_error("Could not create DB object - got nullptr");
|
||||
|
||||
@@ -26,13 +26,13 @@
|
||||
#include <cstddef>
|
||||
|
||||
namespace {
|
||||
constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto kRESULT_DELETER = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
constexpr auto kRESULT_ITERATOR_DELETER = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
/* implicit */ Result::Result(CassResult const* ptr) : ManagedObject{ptr, resultDeleter}
|
||||
/* implicit */ Result::Result(CassResult const* ptr) : ManagedObject{ptr, kRESULT_DELETER}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ Result::hasRows() const
|
||||
}
|
||||
|
||||
/* implicit */ ResultIterator::ResultIterator(CassIterator* ptr)
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr) != 0u}
|
||||
: ManagedObject{ptr, kRESULT_ITERATOR_DELETER}, hasMore_{cass_iterator_next(ptr) != 0u}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -26,10 +26,10 @@
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
class Session : public ManagedObject<CassSession> {
|
||||
static constexpr auto deleter = [](CassSession* ptr) { cass_session_free(ptr); };
|
||||
static constexpr auto kDELETER = [](CassSession* ptr) { cass_session_free(ptr); };
|
||||
|
||||
public:
|
||||
Session() : ManagedObject{cass_session_new(), deleter}
|
||||
Session() : ManagedObject{cass_session_new(), kDELETER}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
@@ -27,12 +27,12 @@
|
||||
#include <string>
|
||||
|
||||
namespace {
|
||||
constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
constexpr auto kCONTEXT_DELETER = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), contextDeleter}
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), kCONTEXT_DELETER}
|
||||
{
|
||||
cass_ssl_set_verify_flags(*this, CASS_SSL_VERIFY_NONE);
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK) {
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
class Statement : public ManagedObject<CassStatement> {
|
||||
static constexpr auto deleter = [](CassStatement* ptr) { cass_statement_free(ptr); };
|
||||
static constexpr auto kDELETER = [](CassStatement* ptr) { cass_statement_free(ptr); };
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -54,14 +54,14 @@ public:
|
||||
*/
|
||||
template <typename... Args>
|
||||
explicit Statement(std::string_view query, Args&&... args)
|
||||
: ManagedObject{cass_statement_new(query.data(), sizeof...(args)), deleter}
|
||||
: ManagedObject{cass_statement_new_n(query.data(), query.size(), sizeof...(args)), kDELETER}
|
||||
{
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
bind<Args...>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/* implicit */ Statement(CassStatement* ptr) : ManagedObject{ptr, deleter}
|
||||
/* implicit */ Statement(CassStatement* ptr) : ManagedObject{ptr, kDELETER}
|
||||
{
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
@@ -106,9 +106,9 @@ public:
|
||||
using UintByteTupleType = std::tuple<uint32_t, ripple::uint256>;
|
||||
using ByteVectorType = std::vector<ripple::uint256>;
|
||||
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256> || std::is_same_v<DecayedType, ripple::uint192>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
throwErrorIfNeeded(rc, "Bind ripple::base_uint");
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::AccountID");
|
||||
@@ -119,6 +119,9 @@ public:
|
||||
// reinterpret_cast is needed here :'(
|
||||
auto const rc = bindBytes(reinterpret_cast<unsigned char const*>(value.data()), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as bytes)");
|
||||
} else if constexpr (std::is_convertible_v<DecayedType, Text>) {
|
||||
auto const rc = cass_statement_bind_string_n(*this, idx, value.text.c_str(), value.text.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as TEXT)");
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType> ||
|
||||
std::is_same_v<DecayedType, UintByteTupleType>) {
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::forward<Type>(value)});
|
||||
@@ -150,10 +153,10 @@ public:
|
||||
* This is used to produce Statement objects that can be executed.
|
||||
*/
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const> {
|
||||
static constexpr auto deleter = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
static constexpr auto kDELETER = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
|
||||
public:
|
||||
/* implicit */ PreparedStatement(CassPrepared const* ptr) : ManagedObject{ptr, deleter}
|
||||
/* implicit */ PreparedStatement(CassPrepared const* ptr) : ManagedObject{ptr, kDELETER}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -24,17 +24,17 @@
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace {
|
||||
constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto kTUPLE_DELETER = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
constexpr auto kTUPLE_ITERATOR_DELETER = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
/* implicit */ Tuple::Tuple(CassTuple* ptr) : ManagedObject{ptr, tupleDeleter}
|
||||
/* implicit */ Tuple::Tuple(CassTuple* ptr) : ManagedObject{ptr, kTUPLE_DELETER}
|
||||
{
|
||||
}
|
||||
|
||||
/* implicit */ TupleIterator::TupleIterator(CassIterator* ptr) : ManagedObject{ptr, tupleIteratorDeleter}
|
||||
/* implicit */ TupleIterator::TupleIterator(CassIterator* ptr) : ManagedObject{ptr, kTUPLE_ITERATOR_DELETER}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -37,14 +37,14 @@
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
class Tuple : public ManagedObject<CassTuple> {
|
||||
static constexpr auto deleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
static constexpr auto kDELETER = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
|
||||
public:
|
||||
/* implicit */ Tuple(CassTuple* ptr);
|
||||
|
||||
template <typename... Types>
|
||||
explicit Tuple(std::tuple<Types...>&& value)
|
||||
: ManagedObject{cass_tuple_new(std::tuple_size<std::tuple<Types...>>{}), deleter}
|
||||
: ManagedObject{cass_tuple_new(std::tuple_size<std::tuple<Types...>>{}), kDELETER}
|
||||
{
|
||||
std::apply(std::bind_front(&Tuple::bind<Types...>, this), std::move(value));
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ target_sources(
|
||||
NetworkValidatedLedgers.cpp
|
||||
NFTHelpers.cpp
|
||||
Source.cpp
|
||||
MPTHelpers.cpp
|
||||
impl/AmendmentBlockHandler.cpp
|
||||
impl/ForwardingCache.cpp
|
||||
impl/ForwardingSource.cpp
|
||||
impl/GrpcSource.cpp
|
||||
impl/SubscriptionSource.cpp
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "etl/CacheLoaderSettings.hpp"
|
||||
#include "etl/impl/CacheLoader.hpp"
|
||||
#include "etl/impl/CursorFromAccountProvider.hpp"
|
||||
@@ -44,13 +45,13 @@ namespace etl {
|
||||
* @tparam CursorProviderType The type of the cursor provider to use
|
||||
* @tparam ExecutionContextType The type of the execution context to use
|
||||
*/
|
||||
template <typename CacheType, typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
template <typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
class CacheLoader {
|
||||
using CacheLoaderType = impl::CacheLoaderImpl<CacheType>;
|
||||
using CacheLoaderType = impl::CacheLoaderImpl<data::LedgerCacheInterface>;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
|
||||
CacheLoaderSettings settings_;
|
||||
ExecutionContextType ctx_;
|
||||
@@ -64,8 +65,12 @@ public:
|
||||
* @param backend The backend to use
|
||||
* @param cache The cache to load into
|
||||
*/
|
||||
CacheLoader(util::Config const& config, std::shared_ptr<BackendInterface> const& backend, CacheType& cache)
|
||||
: backend_{backend}, cache_{cache}, settings_{make_CacheLoaderSettings(config)}, ctx_{settings_.numThreads}
|
||||
CacheLoader(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
std::shared_ptr<BackendInterface> const& backend,
|
||||
data::LedgerCacheInterface& cache
|
||||
)
|
||||
: backend_{backend}, cache_{cache}, settings_{makeCacheLoaderSettings(config)}, ctx_{settings_.numThreads}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -126,7 +131,8 @@ public:
|
||||
void
|
||||
stop() noexcept
|
||||
{
|
||||
loader_->stop();
|
||||
if (loader_ != nullptr)
|
||||
loader_->stop();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -135,7 +141,8 @@ public:
|
||||
void
|
||||
wait() noexcept
|
||||
{
|
||||
loader_->wait();
|
||||
if (loader_ != nullptr)
|
||||
loader_->wait();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -19,11 +19,12 @@
|
||||
|
||||
#include "etl/CacheLoaderSettings.hpp"
|
||||
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
namespace etl {
|
||||
@@ -47,31 +48,29 @@ CacheLoaderSettings::isDisabled() const
|
||||
}
|
||||
|
||||
[[nodiscard]] CacheLoaderSettings
|
||||
make_CacheLoaderSettings(util::Config const& config)
|
||||
makeCacheLoaderSettings(util::config::ClioConfigDefinition const& config)
|
||||
{
|
||||
CacheLoaderSettings settings;
|
||||
settings.numThreads = config.valueOr("io_threads", settings.numThreads);
|
||||
if (config.contains("cache")) {
|
||||
auto const cache = config.section("cache");
|
||||
// Given diff number to generate cursors
|
||||
settings.numCacheDiffs = cache.valueOr<size_t>("num_diffs", settings.numCacheDiffs);
|
||||
// Given cursors number fetching from diff
|
||||
settings.numCacheCursorsFromDiff = cache.valueOr<size_t>("num_cursors_from_diff", 0);
|
||||
// Given cursors number fetching from account
|
||||
settings.numCacheCursorsFromAccount = cache.valueOr<size_t>("num_cursors_from_account", 0);
|
||||
settings.numThreads = config.get<uint16_t>("io_threads");
|
||||
auto const cache = config.getObject("cache");
|
||||
// Given diff number to generate cursors
|
||||
settings.numCacheDiffs = cache.get<std::size_t>("num_diffs");
|
||||
// Given cursors number fetching from diff
|
||||
settings.numCacheCursorsFromDiff = cache.get<std::size_t>("num_cursors_from_diff");
|
||||
// Given cursors number fetching from account
|
||||
settings.numCacheCursorsFromAccount = cache.get<std::size_t>("num_cursors_from_account");
|
||||
|
||||
settings.numCacheMarkers = cache.valueOr<size_t>("num_markers", settings.numCacheMarkers);
|
||||
settings.cachePageFetchSize = cache.valueOr<size_t>("page_fetch_size", settings.cachePageFetchSize);
|
||||
settings.numCacheMarkers = cache.get<std::size_t>("num_markers");
|
||||
settings.cachePageFetchSize = cache.get<std::size_t>("page_fetch_size");
|
||||
|
||||
auto const entry = cache.get<std::string>("load");
|
||||
if (boost::iequals(entry, "sync"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::SYNC;
|
||||
if (boost::iequals(entry, "async"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::ASYNC;
|
||||
if (boost::iequals(entry, "none") or boost::iequals(entry, "no"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::NONE;
|
||||
|
||||
if (auto entry = cache.maybeValue<std::string>("load"); entry) {
|
||||
if (boost::iequals(*entry, "sync"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::SYNC;
|
||||
if (boost::iequals(*entry, "async"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::ASYNC;
|
||||
if (boost::iequals(*entry, "none") or boost::iequals(*entry, "no"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::NONE;
|
||||
}
|
||||
}
|
||||
return settings;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
@@ -64,6 +64,6 @@ struct CacheLoaderSettings {
|
||||
* @returns The CacheLoaderSettings object
|
||||
*/
|
||||
[[nodiscard]] CacheLoaderSettings
|
||||
make_CacheLoaderSettings(util::Config const& config);
|
||||
makeCacheLoaderSettings(util::config::ClioConfigDefinition const& config);
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
@@ -31,10 +32,9 @@ namespace etl {
|
||||
*
|
||||
* @tparam CacheType The type of the cache to disable on corruption
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class CorruptionDetector {
|
||||
std::reference_wrapper<SystemState> state_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::reference_wrapper<data::LedgerCacheInterface> cache_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
@@ -45,7 +45,8 @@ public:
|
||||
* @param state The system state
|
||||
* @param cache The cache to disable on corruption
|
||||
*/
|
||||
CorruptionDetector(SystemState& state, CacheType& cache) : state_{std::ref(state)}, cache_{std::ref(cache)}
|
||||
CorruptionDetector(SystemState& state, data::LedgerCacheInterface& cache)
|
||||
: state_{std::ref(state)}, cache_{std::ref(cache)}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -20,14 +20,14 @@
|
||||
#include "etl/ETLService.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
@@ -44,6 +44,7 @@
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
|
||||
// Database must be populated when this starts
|
||||
std::optional<uint32_t>
|
||||
ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
@@ -88,9 +89,9 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
|
||||
static constexpr auto NANOSECONDS_PER_SECOND = 1'000'000'000.0;
|
||||
static constexpr auto kNANOSECONDS_PER_SECOND = 1'000'000'000.0;
|
||||
LOG(log_.debug()) << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / NANOSECONDS_PER_SECOND;
|
||||
<< ((end - begin).count()) / kNANOSECONDS_PER_SECOND;
|
||||
|
||||
state_.isWriting = false;
|
||||
|
||||
@@ -134,7 +135,7 @@ ETLService::monitor()
|
||||
}
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
|
||||
amendmentBlockHandler_.onAmendmentBlock();
|
||||
amendmentBlockHandler_.notifyAmendmentBlocked();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -168,7 +169,7 @@ ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence) {
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
} else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::MILLISECONDS_PER_SECOND)) {
|
||||
} else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::kMILLISECONDS_PER_SECOND)) {
|
||||
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
@@ -178,8 +179,8 @@ ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool const success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
constexpr size_t kTIMEOUT_SECONDS = 10;
|
||||
bool const success = ledgerPublisher_.publish(nextSequence, kTIMEOUT_SECONDS);
|
||||
|
||||
if (!success) {
|
||||
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
@@ -233,7 +234,7 @@ ETLService::monitorReadOnly()
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs
|
||||
// first. Even if we don't hear from rippled, if ledgers are being written to the db, we publish
|
||||
// them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::MILLISECONDS_PER_SECOND);
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::kMILLISECONDS_PER_SECOND);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -262,11 +263,11 @@ ETLService::doWork()
|
||||
}
|
||||
|
||||
ETLService::ETLService(
|
||||
util::Config const& config,
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
)
|
||||
: backend_(backend)
|
||||
@@ -280,11 +281,11 @@ ETLService::ETLService(
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
state_.isReadOnly = config.valueOr("read_only", static_cast<bool>(state_.isReadOnly));
|
||||
extractorThreads_ = config.valueOr<uint32_t>("extractor_threads", extractorThreads_);
|
||||
txnThreshold_ = config.valueOr<size_t>("txn_threshold", txnThreshold_);
|
||||
state_.isReadOnly = config.get<bool>("read_only");
|
||||
extractorThreads_ = config.get<uint32_t>("extractor_threads");
|
||||
txnThreshold_ = config.get<std::size_t>("txn_threshold");
|
||||
|
||||
// This should probably be done in the backend factory but we don't have state available until here
|
||||
backend_->setCorruptionDetector(CorruptionDetector<data::LedgerCache>{state_, backend->cache()});
|
||||
backend_->setCorruptionDetector(CorruptionDetector{state_, backend->cache()});
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/CacheLoader.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
@@ -33,7 +32,12 @@
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etl/impl/LedgerPublisher.hpp"
|
||||
#include "etl/impl/Transformer.hpp"
|
||||
#include "etlng/ETLService.hpp"
|
||||
#include "etlng/ETLServiceInterface.hpp"
|
||||
#include "etlng/LoadBalancer.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
@@ -58,6 +62,16 @@ struct NFTsData;
|
||||
*/
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief A tag class to help identify ETLService in templated code.
|
||||
*/
|
||||
struct ETLServiceTag {
|
||||
virtual ~ETLServiceTag() = default;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
concept SomeETLService = std::derived_from<T, ETLServiceTag>;
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for continuously extracting data from a p2p node, and writing that data to the
|
||||
* databases.
|
||||
@@ -71,16 +85,14 @@ namespace etl {
|
||||
* the others will fall back to monitoring/publishing. In this sense, this class dynamically transitions from monitoring
|
||||
* to writing and from writing to monitoring, based on the activity of other processes running on different machines.
|
||||
*/
|
||||
class ETLService {
|
||||
class ETLService : public etlng::ETLServiceInterface, ETLServiceTag {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using DataPipeType = etl::impl::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheType = data::LedgerCache;
|
||||
using CacheLoaderType = etl::CacheLoader<CacheType>;
|
||||
using LedgerFetcherType = etl::impl::LedgerFetcher<LoadBalancerType>;
|
||||
using CacheLoaderType = etl::CacheLoader<>;
|
||||
using LedgerFetcherType = etl::impl::LedgerFetcher;
|
||||
using ExtractorType = etl::impl::Extractor<DataPipeType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::impl::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher<CacheType>;
|
||||
using LedgerLoaderType = etl::impl::LedgerLoader<LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher;
|
||||
using AmendmentBlockHandlerType = etl::impl::AmendmentBlockHandler;
|
||||
using TransformerType =
|
||||
etl::impl::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
@@ -88,7 +100,7 @@ class ETLService {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> loadBalancer_;
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> networkValidatedLedgers_;
|
||||
|
||||
std::uint32_t extractorThreads_ = 1;
|
||||
@@ -119,14 +131,19 @@ public:
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
*/
|
||||
ETLService(
|
||||
util::Config const& config,
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Move constructor is deleted because ETL service shares its fields by reference
|
||||
*/
|
||||
ETLService(ETLService&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief A factory function to spawn new ETLService instances.
|
||||
*
|
||||
@@ -140,29 +157,52 @@ public:
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
* @return A shared pointer to a new instance of ETLService
|
||||
*/
|
||||
static std::shared_ptr<ETLService>
|
||||
make_ETLService(
|
||||
util::Config const& config,
|
||||
static std::shared_ptr<etlng::ETLServiceInterface>
|
||||
makeETLService(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
)
|
||||
{
|
||||
auto etl = std::make_shared<ETLService>(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
etl->run();
|
||||
std::shared_ptr<etlng::ETLServiceInterface> ret;
|
||||
|
||||
return etl;
|
||||
if (config.get<bool>("__ng_etl")) {
|
||||
ASSERT(
|
||||
std::dynamic_pointer_cast<etlng::LoadBalancer>(balancer),
|
||||
"LoadBalancer type must be etlng::LoadBalancer"
|
||||
);
|
||||
ret = std::make_shared<etlng::ETLService>(config, backend, subscriptions, balancer, ledgers);
|
||||
} else {
|
||||
ASSERT(
|
||||
std::dynamic_pointer_cast<etl::LoadBalancer>(balancer), "LoadBalancer type must be etl::LoadBalancer"
|
||||
);
|
||||
ret = std::make_shared<etl::ETLService>(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
}
|
||||
|
||||
ret->run();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stops components and joins worker thread.
|
||||
*/
|
||||
~ETLService()
|
||||
~ETLService() override
|
||||
{
|
||||
LOG(log_.info()) << "onStop called";
|
||||
LOG(log_.debug()) << "Stopping Reporting ETL";
|
||||
if (not state_.isStopping)
|
||||
stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stop the ETL service.
|
||||
* @note This method blocks until the ETL service has stopped.
|
||||
*/
|
||||
void
|
||||
stop() override
|
||||
{
|
||||
LOG(log_.info()) << "Stop called";
|
||||
|
||||
state_.isStopping = true;
|
||||
cacheLoader_.stop();
|
||||
@@ -179,7 +219,7 @@ public:
|
||||
* @return Time passed since last ledger close
|
||||
*/
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const
|
||||
lastCloseAgeSeconds() const override
|
||||
{
|
||||
return ledgerPublisher_.lastCloseAgeSeconds();
|
||||
}
|
||||
@@ -190,7 +230,7 @@ public:
|
||||
* @return true if currently amendment blocked; false otherwise
|
||||
*/
|
||||
bool
|
||||
isAmendmentBlocked() const
|
||||
isAmendmentBlocked() const override
|
||||
{
|
||||
return state_.isAmendmentBlocked;
|
||||
}
|
||||
@@ -201,7 +241,7 @@ public:
|
||||
* @return true if corruption of DB was detected and cache was stopped.
|
||||
*/
|
||||
bool
|
||||
isCorruptionDetected() const
|
||||
isCorruptionDetected() const override
|
||||
{
|
||||
return state_.isCorruptionDetected;
|
||||
}
|
||||
@@ -212,7 +252,7 @@ public:
|
||||
* @return The state of ETL as a JSON object
|
||||
*/
|
||||
boost::json::object
|
||||
getInfo() const
|
||||
getInfo() const override
|
||||
{
|
||||
boost::json::object result;
|
||||
|
||||
@@ -230,11 +270,17 @@ public:
|
||||
* @return The etl nodes' state, nullopt if etl nodes are not connected
|
||||
*/
|
||||
std::optional<etl::ETLState>
|
||||
getETLState() const noexcept
|
||||
getETLState() const noexcept override
|
||||
{
|
||||
return loadBalancer_->getETLState();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Start all components to run ETL service.
|
||||
*/
|
||||
void
|
||||
run() override;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Run the ETL pipeline.
|
||||
@@ -301,12 +347,6 @@ private:
|
||||
return numMarkers_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Start all components to run ETL service.
|
||||
*/
|
||||
void
|
||||
run();
|
||||
|
||||
/**
|
||||
* @brief Spawn the worker thread and start monitoring.
|
||||
*/
|
||||
|
||||
@@ -31,15 +31,12 @@
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::optional<ETLState>
|
||||
tag_invoke(boost::json::value_to_tag<std::optional<ETLState>>, boost::json::value const& jv)
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv)
|
||||
{
|
||||
ETLState state;
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
if (jsonObject.contains(JS(error)))
|
||||
return std::nullopt;
|
||||
|
||||
if (jsonObject.contains(JS(result)) && jsonObject.at(JS(result)).as_object().contains(JS(info))) {
|
||||
auto const rippledInfo = jsonObject.at(JS(result)).as_object().at(JS(info)).as_object();
|
||||
if (rippledInfo.contains(JS(network_id)))
|
||||
|
||||
@@ -20,12 +20,14 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -54,8 +56,9 @@ struct ETLState {
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
if (serverInfoRippled)
|
||||
return boost::json::value_to<std::optional<ETLState>>(boost::json::value(*serverInfoRippled));
|
||||
if (serverInfoRippled && not serverInfoRippled->contains(JS(error))) {
|
||||
return boost::json::value_to<ETLState>(boost::json::value(*serverInfoRippled));
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -67,7 +70,7 @@ struct ETLState {
|
||||
* @param jv The json value to convert
|
||||
* @return The ETLState
|
||||
*/
|
||||
std::optional<ETLState>
|
||||
tag_invoke(boost::json::value_to_tag<std::optional<ETLState>>, boost::json::value const& jv);
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv);
|
||||
|
||||
} // namespace etl
|
||||
|
||||
65
src/etl/LedgerFetcherInterface.hpp
Normal file
65
src/etl/LedgerFetcherInterface.hpp
Normal file
@@ -0,0 +1,65 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/ledger.pb.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief An interface for LedgerFetcher
|
||||
*/
|
||||
struct LedgerFetcherInterface {
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
using OptionalGetLedgerResponseType = std::optional<GetLedgerResponseType>;
|
||||
|
||||
virtual ~LedgerFetcherInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Extract data for a particular ledger from an ETL source
|
||||
*
|
||||
* This function continously tries to extract the specified ledger (using all available ETL sources) until the
|
||||
* extraction succeeds, or the server shuts down.
|
||||
*
|
||||
* @param seq sequence of the ledger to extract
|
||||
* @return Ledger header and transaction+metadata blobs; Empty optional if the server is shutting down
|
||||
*/
|
||||
[[nodiscard]] virtual OptionalGetLedgerResponseType
|
||||
fetchData(uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Extract diff data for a particular ledger from an ETL source.
|
||||
*
|
||||
* This function continously tries to extract the specified ledger (using all available ETL sources) until the
|
||||
* extraction succeeds, or the server shuts down.
|
||||
*
|
||||
* @param seq sequence of the ledger to extract
|
||||
* @return Ledger data diff between sequance and parent; Empty optional if the server is shutting down
|
||||
*/
|
||||
[[nodiscard]] virtual OptionalGetLedgerResponseType
|
||||
fetchDataAndDiff(uint32_t seq) = 0;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
@@ -23,17 +23,27 @@
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/CoroutineGroup.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/Random.hpp"
|
||||
#include "util/ResponseExpirationCache.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ArrayView.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
#include "util/newconfig/ObjectView.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/array.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <fmt/core.h>
|
||||
|
||||
#include <algorithm>
|
||||
@@ -49,13 +59,14 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace util;
|
||||
using namespace util::config;
|
||||
using util::prometheus::Labels;
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::shared_ptr<LoadBalancer>
|
||||
LoadBalancer::make_LoadBalancer(
|
||||
Config const& config,
|
||||
std::shared_ptr<etlng::LoadBalancerInterface>
|
||||
LoadBalancer::makeLoadBalancer(
|
||||
ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
@@ -69,28 +80,59 @@ LoadBalancer::make_LoadBalancer(
|
||||
}
|
||||
|
||||
LoadBalancer::LoadBalancer(
|
||||
Config const& config,
|
||||
ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
SourceFactory sourceFactory
|
||||
)
|
||||
: forwardingCounters_{
|
||||
.successDuration = PrometheusService::counterInt(
|
||||
"forwarding_duration_milliseconds_counter",
|
||||
Labels({util::prometheus::Label{"status", "success"}}),
|
||||
"The duration of processing successful forwarded requests"
|
||||
),
|
||||
.failDuration = PrometheusService::counterInt(
|
||||
"forwarding_duration_milliseconds_counter",
|
||||
Labels({util::prometheus::Label{"status", "fail"}}),
|
||||
"The duration of processing failed forwarded requests"
|
||||
),
|
||||
.retries = PrometheusService::counterInt(
|
||||
"forwarding_retries_counter",
|
||||
Labels(),
|
||||
"The number of retries before a forwarded request was successful. Initial attempt excluded"
|
||||
),
|
||||
.cacheHit = PrometheusService::counterInt(
|
||||
"forwarding_cache_hit_counter",
|
||||
Labels(),
|
||||
"The number of requests that we served from the cache"
|
||||
),
|
||||
.cacheMiss = PrometheusService::counterInt(
|
||||
"forwarding_cache_miss_counter",
|
||||
Labels(),
|
||||
"The number of requests that were not served from the cache"
|
||||
)
|
||||
}
|
||||
|
||||
{
|
||||
auto const forwardingCacheTimeout = config.valueOr<float>("forwarding.cache_timeout", 0.f);
|
||||
auto const forwardingCacheTimeout = config.get<float>("forwarding.cache_timeout");
|
||||
if (forwardingCacheTimeout > 0.f) {
|
||||
forwardingCache_ = impl::ForwardingCache{Config::toMilliseconds(forwardingCacheTimeout)};
|
||||
forwardingCache_ = util::ResponseExpirationCache{
|
||||
util::config::ClioConfigDefinition::toMilliseconds(forwardingCacheTimeout),
|
||||
{"server_info", "server_state", "server_definitions", "fee", "ledger_closed"}
|
||||
};
|
||||
}
|
||||
|
||||
static constexpr std::uint32_t MAX_DOWNLOAD = 256;
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value) {
|
||||
ASSERT(*value > 0 and *value <= MAX_DOWNLOAD, "'num_markers' value in config must be in range 1-256");
|
||||
downloadRanges_ = *value;
|
||||
auto const numMarkers = config.getValueView("num_markers");
|
||||
if (numMarkers.hasValue()) {
|
||||
auto const value = numMarkers.asIntType<uint32_t>();
|
||||
downloadRanges_ = value;
|
||||
} else if (backend->fetchLedgerRange()) {
|
||||
downloadRanges_ = 4;
|
||||
}
|
||||
|
||||
auto const allowNoEtl = config.valueOr("allow_no_etl", false);
|
||||
auto const allowNoEtl = config.get<bool>("allow_no_etl");
|
||||
|
||||
auto const checkOnETLFailure = [this, allowNoEtl](std::string const& log) {
|
||||
LOG(log_.warn()) << log;
|
||||
@@ -101,20 +143,25 @@ LoadBalancer::LoadBalancer(
|
||||
}
|
||||
};
|
||||
|
||||
auto const forwardingTimeout = Config::toMilliseconds(config.valueOr<float>("forwarding.request_timeout", 10.));
|
||||
for (auto const& entry : config.array("etl_sources")) {
|
||||
auto const forwardingTimeout =
|
||||
ClioConfigDefinition::toMilliseconds(config.get<float>("forwarding.request_timeout"));
|
||||
auto const etlArray = config.getArray("etl_sources");
|
||||
for (auto it = etlArray.begin<ObjectView>(); it != etlArray.end<ObjectView>(); ++it) {
|
||||
auto source = sourceFactory(
|
||||
entry,
|
||||
*it,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
validatedLedgers,
|
||||
forwardingTimeout,
|
||||
[this]() {
|
||||
if (not hasForwardingSource_)
|
||||
if (not hasForwardingSource_.lock().get())
|
||||
chooseForwardingSource();
|
||||
},
|
||||
[this](bool wasForwarding) {
|
||||
if (wasForwarding)
|
||||
chooseForwardingSource();
|
||||
},
|
||||
[this]() { chooseForwardingSource(); },
|
||||
[this]() {
|
||||
if (forwardingCache_.has_value())
|
||||
forwardingCache_->invalidate();
|
||||
@@ -161,12 +208,12 @@ LoadBalancer::~LoadBalancer()
|
||||
}
|
||||
|
||||
std::vector<std::string>
|
||||
LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly, std::chrono::steady_clock::duration retryAfter)
|
||||
LoadBalancer::loadInitialLedger(uint32_t sequence, std::chrono::steady_clock::duration retryAfter)
|
||||
{
|
||||
std::vector<std::string> response;
|
||||
execute(
|
||||
[this, &response, &sequence, cacheOnly](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
[this, &response, &sequence](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_);
|
||||
|
||||
if (!res) {
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
@@ -221,27 +268,37 @@ LoadBalancer::forwardToRippled(
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
if (not request.contains("command"))
|
||||
return std::unexpected{rpc::ClioError::RpcCommandIsMissing};
|
||||
|
||||
auto const cmd = boost::json::value_to<std::string>(request.at("command"));
|
||||
if (forwardingCache_) {
|
||||
if (auto cachedResponse = forwardingCache_->get(request); cachedResponse) {
|
||||
if (auto cachedResponse = forwardingCache_->get(cmd); cachedResponse) {
|
||||
++forwardingCounters_.cacheHit.get();
|
||||
return std::move(cachedResponse).value();
|
||||
}
|
||||
}
|
||||
++forwardingCounters_.cacheMiss.get();
|
||||
|
||||
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
|
||||
std::size_t sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0u;
|
||||
|
||||
auto xUserValue = isAdmin ? ADMIN_FORWARDING_X_USER_VALUE : USER_FORWARDING_X_USER_VALUE;
|
||||
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
|
||||
|
||||
std::optional<boost::json::object> response;
|
||||
rpc::ClioError error = rpc::ClioError::etlCONNECTION_ERROR;
|
||||
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
|
||||
while (numAttempts < sources_.size()) {
|
||||
auto res = sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield);
|
||||
auto [res, duration] =
|
||||
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
|
||||
if (res) {
|
||||
forwardingCounters_.successDuration.get() += duration;
|
||||
response = std::move(res).value();
|
||||
break;
|
||||
}
|
||||
forwardingCounters_.failDuration.get() += duration;
|
||||
++forwardingCounters_.retries.get();
|
||||
error = std::max(error, res.error()); // Choose the best result between all sources
|
||||
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
@@ -250,7 +307,7 @@ LoadBalancer::forwardToRippled(
|
||||
|
||||
if (response) {
|
||||
if (forwardingCache_ and not response->contains("error"))
|
||||
forwardingCache_->put(request, *response);
|
||||
forwardingCache_->put(cmd, *response);
|
||||
return std::move(response).value();
|
||||
}
|
||||
|
||||
@@ -319,14 +376,26 @@ LoadBalancer::getETLState() noexcept
|
||||
return etlState_;
|
||||
}
|
||||
|
||||
void
|
||||
LoadBalancer::stop(boost::asio::yield_context yield)
|
||||
{
|
||||
util::CoroutineGroup group{yield};
|
||||
std::ranges::for_each(sources_, [&group, yield](auto& source) {
|
||||
group.spawn(yield, [&source](boost::asio::yield_context innerYield) { source->stop(innerYield); });
|
||||
});
|
||||
group.asyncWait(yield);
|
||||
}
|
||||
|
||||
void
|
||||
LoadBalancer::chooseForwardingSource()
|
||||
{
|
||||
hasForwardingSource_ = false;
|
||||
LOG(log_.info()) << "Choosing a new source to forward subscriptions";
|
||||
auto hasForwardingSourceLock = hasForwardingSource_.lock();
|
||||
hasForwardingSourceLock.get() = false;
|
||||
for (auto& source : sources_) {
|
||||
if (not hasForwardingSource_ and source->isConnected()) {
|
||||
if (not hasForwardingSourceLock.get() and source->isConnected()) {
|
||||
source->setForwarding(true);
|
||||
hasForwardingSource_ = true;
|
||||
hasForwardingSourceLock.get() = true;
|
||||
} else {
|
||||
source->setForwarding(false);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user