mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-27 15:15:52 +00:00
Compare commits
90 Commits
2.6.0-rc2
...
release/2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4da4b49eda | ||
|
|
e3170203de | ||
|
|
8b280e7742 | ||
|
|
7ed30bc40d | ||
|
|
ac608004bc | ||
|
|
6ab92ca0a6 | ||
|
|
77387d8f9f | ||
|
|
b62cfe949f | ||
|
|
56f074e6ee | ||
|
|
f0becbbec3 | ||
|
|
2075171ca5 | ||
|
|
3a4249dcc3 | ||
|
|
8742dcab3d | ||
|
|
1ef7ec3464 | ||
|
|
20e7e275cf | ||
|
|
addb17ae7d | ||
|
|
346c9f9bdf | ||
|
|
c6308ce036 | ||
|
|
d023ed2be2 | ||
|
|
6236941140 | ||
|
|
59b7b249ff | ||
|
|
893daab8f8 | ||
|
|
be9f0615fa | ||
|
|
093606106c | ||
|
|
224e835e7c | ||
|
|
138a2d3440 | ||
|
|
c0eedd273d | ||
|
|
a5b1dcfe55 | ||
|
|
c973e99f4b | ||
|
|
51dbd09ef6 | ||
|
|
1ecc6a6040 | ||
|
|
1d3e34b392 | ||
|
|
2f8a704071 | ||
|
|
fcc5a5425e | ||
|
|
316126746b | ||
|
|
6d79dd6b2b | ||
|
|
d6ab2cc1e4 | ||
|
|
13baa42993 | ||
|
|
b485fdc18d | ||
|
|
7e4e12385f | ||
|
|
c117f470f2 | ||
|
|
30e88fe72c | ||
|
|
cecf082952 | ||
|
|
d5b95c2e61 | ||
|
|
8375eb1766 | ||
|
|
be6aaffa7a | ||
|
|
104ef6a9dc | ||
|
|
eed757e0c4 | ||
|
|
3b61a85ba0 | ||
|
|
7c8152d76f | ||
|
|
0425d34b55 | ||
|
|
8c8a7ff3b8 | ||
|
|
16493abd0d | ||
|
|
3dd72d94e1 | ||
|
|
5e914abf29 | ||
|
|
9603968808 | ||
|
|
0124c06a53 | ||
|
|
1bfdd0dd89 | ||
|
|
f41d574204 | ||
|
|
d0ec60381b | ||
|
|
0b19a42a96 | ||
|
|
030f4f1b22 | ||
|
|
2de49b4d33 | ||
|
|
3de2bf2910 | ||
|
|
7538efb01e | ||
|
|
685f611434 | ||
|
|
2528dee6b6 | ||
|
|
b2be4b51d1 | ||
|
|
b4e40558c9 | ||
|
|
b361e3a108 | ||
|
|
a4b47da57a | ||
|
|
2ed1a45ef1 | ||
|
|
dabaa5bf80 | ||
|
|
b4fb3e42b8 | ||
|
|
aa64bb7b6b | ||
|
|
dc5f8b9c23 | ||
|
|
7300529484 | ||
|
|
33802f475f | ||
|
|
213752862c | ||
|
|
a189eeb952 | ||
|
|
3c1811233a | ||
|
|
693ed2061c | ||
|
|
1e2f4b5ca2 | ||
|
|
1da8464d75 | ||
|
|
d48fb168c6 | ||
|
|
92595f95a0 | ||
|
|
fc9de87136 | ||
|
|
67f5ca445f | ||
|
|
897c255b8c | ||
|
|
aa9eea0d99 |
@@ -49,6 +49,7 @@ IndentFunctionDeclarationAfterType: false
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
IndentRequiresClause: true
|
||||
InsertNewlineAtEOF: true
|
||||
RequiresClausePosition: OwnLine
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
|
||||
@@ -54,7 +54,7 @@ format:
|
||||
_help_max_pargs_hwrap:
|
||||
- If a positional argument group contains more than this many
|
||||
- arguments, then force it to a vertical layout.
|
||||
max_pargs_hwrap: 6
|
||||
max_pargs_hwrap: 5
|
||||
_help_max_rows_cmdline:
|
||||
- If a cmdline positional group consumes more than this many
|
||||
- lines without nesting, then invalidate the layout (and nest)
|
||||
|
||||
31
.github/actions/build-clio/action.yml
vendored
Normal file
31
.github/actions/build-clio/action.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
|
||||
inputs:
|
||||
targets:
|
||||
description: Space-separated build target names
|
||||
default: all
|
||||
nproc_subtract:
|
||||
description: The number of processors to subtract when calculating parallelism.
|
||||
required: true
|
||||
default: "0"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ inputs.nproc_subtract }}
|
||||
|
||||
- name: Build targets
|
||||
shell: bash
|
||||
env:
|
||||
CMAKE_TARGETS: ${{ inputs.targets }}
|
||||
run: |
|
||||
cd build
|
||||
cmake \
|
||||
--build . \
|
||||
--parallel "${{ steps.nproc.outputs.nproc }}" \
|
||||
--target ${CMAKE_TARGETS}
|
||||
@@ -34,25 +34,25 @@ runs:
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USER }}
|
||||
password: ${{ env.DOCKERHUB_PW }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ inputs.push_image == 'true' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ env.GITHUB_TOKEN }}
|
||||
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
- uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
with:
|
||||
cache-image: false
|
||||
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||
- uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0
|
||||
id: meta
|
||||
with:
|
||||
images: ${{ inputs.images }}
|
||||
29
.github/actions/build_clio/action.yml
vendored
29
.github/actions/build_clio/action.yml
vendored
@@ -1,29 +0,0 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
|
||||
inputs:
|
||||
targets:
|
||||
description: Space-separated build target names
|
||||
default: all
|
||||
subtract_threads:
|
||||
description: An option for the action get_number_of_threads. See get_number_of_threads
|
||||
required: true
|
||||
default: "0"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
with:
|
||||
subtract_threads: ${{ inputs.subtract_threads }}
|
||||
|
||||
- name: Build targets
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
cmake \
|
||||
--build . \
|
||||
--parallel "${{ steps.number_of_threads.outputs.threads_number }}" \
|
||||
--target ${{ inputs.targets }}
|
||||
41
.github/actions/cache-key/action.yml
vendored
Normal file
41
.github/actions/cache-key/action.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Cache key
|
||||
description: Generate cache key for ccache
|
||||
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
default: Release
|
||||
code_coverage:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
outputs:
|
||||
key:
|
||||
description: Generated cache key for ccache
|
||||
value: ${{ steps.key_without_commit.outputs.key }}-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
restore_keys:
|
||||
description: Cache restore keys for fallback
|
||||
value: ${{ steps.key_without_commit.outputs.key }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git-common-ancestor
|
||||
|
||||
- name: Set cache key without commit
|
||||
id: key_without_commit
|
||||
shell: bash
|
||||
env:
|
||||
RUNNER_OS: ${{ runner.os }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
CODE_COVERAGE: ${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}
|
||||
CONAN_PROFILE: ${{ inputs.conan_profile }}
|
||||
run: |
|
||||
echo "key=clio-ccache-${RUNNER_OS}-${BUILD_TYPE}${CODE_COVERAGE}-${CONAN_PROFILE}-develop" >> "${GITHUB_OUTPUT}"
|
||||
3
.github/actions/cmake/action.yml
vendored
3
.github/actions/cmake/action.yml
vendored
@@ -44,6 +44,7 @@ runs:
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_DIR: "${{ inputs.build_dir }}"
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
SANITIZER_OPTION: |-
|
||||
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
|
||||
@@ -58,7 +59,7 @@ runs:
|
||||
PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}"
|
||||
run: |
|
||||
cmake \
|
||||
-B ${{inputs.build_dir}} \
|
||||
-B "${BUILD_DIR}" \
|
||||
-S . \
|
||||
-G Ninja \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
|
||||
@@ -24,7 +24,7 @@ runs:
|
||||
-j8 --exclude-throw-branches
|
||||
|
||||
- name: Archive coverage report
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build/coverage_report.xml
|
||||
11
.github/actions/conan/action.yml
vendored
11
.github/actions/conan/action.yml
vendored
@@ -28,11 +28,14 @@ runs:
|
||||
- name: Run conan
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_DIR: "${{ inputs.build_dir }}"
|
||||
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
CONAN_PROFILE: "${{ inputs.conan_profile }}"
|
||||
run: |
|
||||
conan \
|
||||
install . \
|
||||
-of build \
|
||||
-b "$CONAN_BUILD_OPTION" \
|
||||
-s "build_type=${{ inputs.build_type }}" \
|
||||
--profile:all "${{ inputs.conan_profile }}"
|
||||
-of "${BUILD_DIR}" \
|
||||
-b "${CONAN_BUILD_OPTION}" \
|
||||
-s "build_type=${BUILD_TYPE}" \
|
||||
--profile:all "${CONAN_PROFILE}"
|
||||
|
||||
@@ -28,12 +28,17 @@ runs:
|
||||
- name: Create an issue
|
||||
id: create_issue
|
||||
shell: bash
|
||||
env:
|
||||
ISSUE_BODY: ${{ inputs.body }}
|
||||
ISSUE_ASSIGNEES: ${{ inputs.assignees }}
|
||||
ISSUE_LABELS: ${{ inputs.labels }}
|
||||
ISSUE_TITLE: ${{ inputs.title }}
|
||||
run: |
|
||||
echo -e '${{ inputs.body }}' > issue.md
|
||||
echo -e "${ISSUE_BODY}" > issue.md
|
||||
gh issue create \
|
||||
--assignee '${{ inputs.assignees }}' \
|
||||
--label '${{ inputs.labels }}' \
|
||||
--title '${{ inputs.title }}' \
|
||||
--assignee "${ISSUE_ASSIGNEES}" \
|
||||
--label "${ISSUE_LABELS}" \
|
||||
--title "${ISSUE_TITLE}" \
|
||||
--body-file ./issue.md \
|
||||
> create_issue.log
|
||||
created_issue="$(sed 's|.*/||' create_issue.log)"
|
||||
36
.github/actions/get_number_of_threads/action.yml
vendored
36
.github/actions/get_number_of_threads/action.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: Get number of threads
|
||||
description: Determines number of threads to use on macOS and Linux
|
||||
|
||||
inputs:
|
||||
subtract_threads:
|
||||
description: How many threads to subtract from the calculated number
|
||||
required: true
|
||||
default: "0"
|
||||
outputs:
|
||||
threads_number:
|
||||
description: Number of threads to use
|
||||
value: ${{ steps.number_of_threads_export.outputs.num }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Shift and export number of threads
|
||||
id: number_of_threads_export
|
||||
shell: bash
|
||||
run: |
|
||||
num_of_threads="${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}"
|
||||
shift_by="${{ inputs.subtract_threads }}"
|
||||
shifted="$((num_of_threads - shift_by))"
|
||||
echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT
|
||||
38
.github/actions/restore_cache/action.yml
vendored
38
.github/actions/restore_cache/action.yml
vendored
@@ -1,38 +0,0 @@
|
||||
name: Restore cache
|
||||
description: Find and restores ccache cache
|
||||
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
default: Release
|
||||
code_coverage:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
outputs:
|
||||
ccache_cache_hit:
|
||||
description: True if ccache cache has been downloaded
|
||||
value: ${{ steps.ccache_cache.outputs.cache-hit }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v4
|
||||
id: ccache_cache
|
||||
if: ${{ env.CCACHE_DISABLE != '1' }}
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
38
.github/actions/save_cache/action.yml
vendored
38
.github/actions/save_cache/action.yml
vendored
@@ -1,38 +0,0 @@
|
||||
name: Save cache
|
||||
description: Save ccache cache for develop branch
|
||||
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
default: Release
|
||||
code_coverage:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
ccache_cache_hit:
|
||||
description: Whether ccache cache has been downloaded
|
||||
required: true
|
||||
ccache_cache_miss_rate:
|
||||
description: How many ccache cache misses happened
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
38
.github/dependabot.yml
vendored
38
.github/dependabot.yml
vendored
@@ -14,7 +14,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/build_clio/
|
||||
directory: .github/actions/build-clio/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -27,7 +27,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/build_docker_image/
|
||||
directory: .github/actions/build-docker-image/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -53,7 +53,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/code_coverage/
|
||||
directory: .github/actions/code-coverage/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -79,7 +79,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/create_issue/
|
||||
directory: .github/actions/create-issue/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -92,7 +92,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/get_number_of_threads/
|
||||
directory: .github/actions/git-common-ancestor/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -105,33 +105,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/git_common_ancestor/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
time: "04:00"
|
||||
timezone: Etc/GMT
|
||||
reviewers:
|
||||
- XRPLF/clio-dev-team
|
||||
commit-message:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/restore_cache/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
time: "04:00"
|
||||
timezone: Etc/GMT
|
||||
reviewers:
|
||||
- XRPLF/clio-dev-team
|
||||
commit-message:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/save_cache/
|
||||
directory: .github/actions/cache-key/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
|
||||
2
.github/scripts/conan/apple-clang-17.profile
vendored
2
.github/scripts/conan/apple-clang-17.profile
vendored
@@ -4,7 +4,7 @@ build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=17
|
||||
compiler.version=17.0
|
||||
os=Macos
|
||||
|
||||
[conf]
|
||||
|
||||
4
.github/scripts/conan/generate_matrix.py
vendored
4
.github/scripts/conan/generate_matrix.py
vendored
@@ -3,7 +3,9 @@ import itertools
|
||||
import json
|
||||
|
||||
LINUX_OS = ["heavy", "heavy-arm64"]
|
||||
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }']
|
||||
LINUX_CONTAINERS = [
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
]
|
||||
LINUX_COMPILERS = ["gcc", "clang"]
|
||||
|
||||
MACOS_OS = ["macos15"]
|
||||
|
||||
@@ -31,15 +31,16 @@ TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {su
|
||||
OUTPUT_DIR="./.sanitizer-report"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
for TEST in $TESTS; do
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}"
|
||||
export TSAN_OPTIONS="log_path=\"$OUTPUT_FILE\" die_after_fork=0"
|
||||
export ASAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export UBSAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export TSAN_OPTIONS="die_after_fork=0"
|
||||
export MallocNanoZone='0' # for MacOSX
|
||||
$TEST_BINARY --gtest_filter="$TEST" > /dev/null 2>&1
|
||||
|
||||
for TEST in $TESTS; do
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}.log"
|
||||
$TEST_BINARY --gtest_filter="$TEST" > "$OUTPUT_FILE" 2>&1
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "'$TEST' failed a sanitizer check."
|
||||
else
|
||||
rm "$OUTPUT_FILE"
|
||||
fi
|
||||
done
|
||||
@@ -38,32 +38,37 @@ on:
|
||||
description: Whether to strip clio binary
|
||||
default: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build_and_publish_image:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Download Clio binary from artifact
|
||||
if: ${{ inputs.artifact_name != null }}
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: ${{ inputs.artifact_name }}
|
||||
path: ./docker/clio/artifact/
|
||||
|
||||
- name: Download Clio binary from url
|
||||
if: ${{ inputs.clio_server_binary_url != null }}
|
||||
shell: bash
|
||||
env:
|
||||
BINARY_URL: ${{ inputs.clio_server_binary_url }}
|
||||
BINARY_SHA256: ${{ inputs.binary_sha256 }}
|
||||
run: |
|
||||
wget "${{inputs.clio_server_binary_url}}" -P ./docker/clio/artifact/
|
||||
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${{inputs.binary_sha256}}" ]; then
|
||||
wget "${BINARY_URL}" -P ./docker/clio/artifact/
|
||||
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${BINARY_SHA256}" ]; then
|
||||
echo "Binary sha256 sum doesn't match"
|
||||
exit 1
|
||||
fi
|
||||
- name: Unpack binary
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y tar unzip
|
||||
cd docker/clio/artifact
|
||||
@@ -80,7 +85,6 @@ jobs:
|
||||
|
||||
- name: Strip binary
|
||||
if: ${{ inputs.strip_binary }}
|
||||
shell: bash
|
||||
run: strip ./docker/clio/clio_server
|
||||
|
||||
- name: Set GHCR_REPO
|
||||
@@ -89,7 +93,7 @@ jobs:
|
||||
echo "GHCR_REPO=$(echo ghcr.io/${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_OUTPUT}
|
||||
|
||||
- name: Build Docker image
|
||||
uses: ./.github/actions/build_docker_image
|
||||
uses: ./.github/actions/build-docker-image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
37
.github/workflows/build.yml
vendored
37
.github/workflows/build.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/build.yml
|
||||
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
- .github/workflows/upload_coverage_report.yml
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
- .github/workflows/reusable-upload-coverage-report.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/build_docker_image/**"
|
||||
- "!.github/actions/create_issue/**"
|
||||
- "!.github/actions/build-docker-image/**"
|
||||
- "!.github/actions/create-issue/**"
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
@@ -33,6 +33,10 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/develop' && github.run_number || 'branch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
name: Build and Test
|
||||
@@ -45,7 +49,7 @@ jobs:
|
||||
build_type: [Release, Debug]
|
||||
container:
|
||||
[
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }',
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }',
|
||||
]
|
||||
static: [true]
|
||||
|
||||
@@ -56,7 +60,7 @@ jobs:
|
||||
container: ""
|
||||
static: false
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -72,14 +76,14 @@ jobs:
|
||||
code_coverage:
|
||||
name: Run Code Coverage
|
||||
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
download_ccache: true
|
||||
upload_ccache: false
|
||||
upload_ccache: true
|
||||
code_coverage: true
|
||||
static: true
|
||||
upload_clio_server: false
|
||||
@@ -91,10 +95,10 @@ jobs:
|
||||
package:
|
||||
name: Build packages
|
||||
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
download_ccache: true
|
||||
@@ -111,17 +115,16 @@ jobs:
|
||||
needs: build-and-test
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: clio_server_Linux_Release_gcc
|
||||
|
||||
- name: Compare Config Description
|
||||
shell: bash
|
||||
run: |
|
||||
repoConfigFile=docs/config-description.md
|
||||
configDescriptionFile=config_description_new.md
|
||||
|
||||
@@ -12,31 +12,33 @@ concurrency:
|
||||
env:
|
||||
CONAN_PROFILE: gcc
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@8abb0722cbff83a9a2dc7d06c473f7a4964b7382
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Update libXRPL version requirement
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.conan_ref }}'|g" conanfile.py
|
||||
rm -f conanfile.py.bak
|
||||
|
||||
- name: Update conan lockfile
|
||||
shell: bash
|
||||
run: |
|
||||
conan lock create . --profile:all ${{ env.CONAN_PROFILE }}
|
||||
|
||||
@@ -51,13 +53,13 @@ jobs:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
uses: ./.github/actions/build-clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_tests_check_libxrpl
|
||||
path: build/clio_tests
|
||||
@@ -67,10 +69,10 @@ jobs:
|
||||
needs: build
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: clio_tests_check_libxrpl
|
||||
|
||||
@@ -90,10 +92,10 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Create an issue
|
||||
uses: ./.github/actions/create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
@@ -5,13 +5,26 @@ on:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
branches: [develop]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
check_title:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # v1.4.2
|
||||
- uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # 1.4.2
|
||||
with:
|
||||
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
|
||||
add_label: false
|
||||
custom_labels: '{"build":"build", "feat":"enhancement", "fix":"bug", "docs":"documentation", "test":"testability", "ci":"ci", "style":"refactoring", "refactor":"refactoring", "perf":"performance", "chore":"tooling"}'
|
||||
|
||||
- name: Check if message starts with upper-case letter
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
run: |
|
||||
if [[ ! "${PR_TITLE}" =~ ^[a-z]+:\ [\[A-Z] ]]; then
|
||||
echo "Error: PR title must start with an upper-case letter."
|
||||
exit 1
|
||||
fi
|
||||
31
.github/workflows/clang-tidy.yml
vendored
31
.github/workflows/clang-tidy.yml
vendored
@@ -22,12 +22,16 @@ env:
|
||||
CONAN_PROFILE: clang
|
||||
LLVM_TOOLS_VERSION: 20
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
clang_tidy:
|
||||
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -35,22 +39,15 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@8abb0722cbff83a9a2dc7d06c473f7a4964b7382
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Run conan
|
||||
uses: ./.github/actions/conan
|
||||
with:
|
||||
@@ -61,27 +58,24 @@ jobs:
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
|
||||
- name: Run clang-tidy
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
|
||||
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.nproc.outputs.nproc }}" -fix -quiet 1>output.txt
|
||||
|
||||
- name: Fix local includes and clang-format style
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: |
|
||||
pre-commit run --all-files fix-local-includes || true
|
||||
pre-commit run --all-files clang-format || true
|
||||
|
||||
- name: Print issues found
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i '/error\||/!d' ./output.txt
|
||||
cat output.txt
|
||||
@@ -90,7 +84,7 @@ jobs:
|
||||
- name: Create an issue
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
|
||||
id: create_issue
|
||||
uses: ./.github/actions/create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
@@ -126,5 +120,4 @@ jobs:
|
||||
|
||||
- name: Fail the job
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: exit 1
|
||||
|
||||
16
.github/workflows/docs.yml
vendored
16
.github/workflows/docs.yml
vendored
@@ -10,20 +10,24 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@8abb0722cbff83a9a2dc7d06c473f7a4964b7382
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
@@ -39,10 +43,10 @@ jobs:
|
||||
run: cmake --build . --target docs
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.0.0
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0
|
||||
with:
|
||||
path: build_docs/html
|
||||
name: docs-develop
|
||||
@@ -62,6 +66,6 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5
|
||||
with:
|
||||
artifact_name: docs-develop
|
||||
|
||||
57
.github/workflows/nightly.yml
vendored
57
.github/workflows/nightly.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/nightly.yml
|
||||
|
||||
- .github/workflows/release_impl.yml
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
- .github/workflows/build_clio_docker_image.yml
|
||||
- .github/workflows/reusable-release.yml
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
- .github/workflows/build-clio-docker-image.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/code_coverage/**"
|
||||
- "!.github/actions/code-coverage/**"
|
||||
- .github/scripts/prepare-release-artifacts.sh
|
||||
|
||||
concurrency:
|
||||
@@ -23,6 +23,10 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
name: Build and Test
|
||||
@@ -39,19 +43,19 @@ jobs:
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
- os: heavy
|
||||
conan_profile: gcc.ubsan
|
||||
build_type: Release
|
||||
static: false
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -73,13 +77,13 @@ jobs:
|
||||
include:
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
static: true
|
||||
- os: macos15
|
||||
conan_profile: apple-clang
|
||||
container: ""
|
||||
static: false
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -93,23 +97,34 @@ jobs:
|
||||
targets: all
|
||||
analyze_build_time: true
|
||||
|
||||
get_date:
|
||||
name: Get Date
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
date: ${{ steps.get_date.outputs.date }}
|
||||
steps:
|
||||
- name: Get current date
|
||||
id: get_date
|
||||
run: |
|
||||
echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
nightly_release:
|
||||
needs: build-and-test
|
||||
uses: ./.github/workflows/release_impl.yml
|
||||
needs: [build-and-test, get_date]
|
||||
uses: ./.github/workflows/reusable-release.yml
|
||||
with:
|
||||
overwrite_release: true
|
||||
delete_pattern: "nightly-*"
|
||||
prerelease: true
|
||||
title: "Clio development (nightly) build"
|
||||
version: nightly
|
||||
title: "Clio development build (nightly-${{ needs.get_date.outputs.date }})"
|
||||
version: nightly-${{ needs.get_date.outputs.date }}
|
||||
header: >
|
||||
> **Note:** Please remember that this is a development release and it is not recommended for production use.
|
||||
|
||||
Changelog (including previous releases): <https://github.com/XRPLF/clio/commits/nightly>
|
||||
Changelog (including previous releases): <https://github.com/XRPLF/clio/commits/nightly-${{ needs.get_date.outputs.date }}>
|
||||
generate_changelog: false
|
||||
draft: false
|
||||
|
||||
build_and_publish_docker_image:
|
||||
uses: ./.github/workflows/build_clio_docker_image.yml
|
||||
uses: ./.github/workflows/build-clio-docker-image.yml
|
||||
needs: build-and-test
|
||||
secrets: inherit
|
||||
with:
|
||||
@@ -130,10 +145,10 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Create an issue
|
||||
uses: ./.github/actions/create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
||||
2
.github/workflows/pre-commit-autoupdate.yml
vendored
2
.github/workflows/pre-commit-autoupdate.yml
vendored
@@ -1,8 +1,8 @@
|
||||
name: Pre-commit auto-update
|
||||
|
||||
on:
|
||||
# every first day of the month
|
||||
schedule:
|
||||
# every first day of the month
|
||||
- cron: "0 0 1 * *"
|
||||
pull_request:
|
||||
branches: [release/*, develop]
|
||||
|
||||
4
.github/workflows/pre-commit.yml
vendored
4
.github/workflows/pre-commit.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
|
||||
jobs:
|
||||
run-hooks:
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@afbcbdafbe0ce5439492fb87eda6441371086386
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:c117f470f2ef954520ab5d1c8a5ed2b9e68d6f8a" }'
|
||||
|
||||
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
@@ -29,9 +29,9 @@ jobs:
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -47,13 +47,13 @@ jobs:
|
||||
|
||||
release:
|
||||
needs: build-and-test
|
||||
uses: ./.github/workflows/release_impl.yml
|
||||
uses: ./.github/workflows/reusable-release.yml
|
||||
with:
|
||||
overwrite_release: false
|
||||
delete_pattern: ""
|
||||
prerelease: ${{ contains(github.ref_name, '-') }}
|
||||
title: "${{ github.ref_name }}"
|
||||
version: "${{ github.ref_name }}"
|
||||
header: >
|
||||
${{ contains(github.ref_name, '-') && '> **Note:** Please remember that this is a release candidate and it is not recommended for production use.' || '' }}
|
||||
generate_changelog: ${{ !contains(github.ref_name, '-') }}
|
||||
draft: true
|
||||
draft: ${{ !contains(github.ref_name, '-') }}
|
||||
|
||||
@@ -77,7 +77,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
with:
|
||||
runs_on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container }}
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
|
||||
test:
|
||||
needs: build
|
||||
uses: ./.github/workflows/test_impl.yml
|
||||
uses: ./.github/workflows/reusable-test.yml
|
||||
with:
|
||||
runs_on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container }}
|
||||
@@ -75,6 +75,10 @@ on:
|
||||
CODECOV_TOKEN:
|
||||
required: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
@@ -86,7 +90,7 @@ jobs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# We need to fetch tags to have correct version in the release
|
||||
@@ -95,25 +99,31 @@ jobs:
|
||||
ref: ${{ github.ref }}
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@8abb0722cbff83a9a2dc7d06c473f7a4964b7382
|
||||
with:
|
||||
disable_ccache: ${{ !inputs.download_ccache }}
|
||||
|
||||
- name: Setup conan on macOS
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: ./.github/scripts/conan/init.sh
|
||||
|
||||
- name: Restore cache
|
||||
if: ${{ inputs.download_ccache }}
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
- name: Generate cache key
|
||||
uses: ./.github/actions/cache-key
|
||||
id: cache_key
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }}
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: ${{ steps.cache_key.outputs.key }}
|
||||
restore-keys: |
|
||||
${{ steps.cache_key.outputs.restore_keys }}
|
||||
|
||||
- name: Run conan
|
||||
uses: ./.github/actions/conan
|
||||
with:
|
||||
@@ -131,7 +141,7 @@ jobs:
|
||||
package: ${{ inputs.package }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
uses: ./.github/actions/build-clio
|
||||
with:
|
||||
targets: ${{ inputs.targets }}
|
||||
|
||||
@@ -141,24 +151,26 @@ jobs:
|
||||
ClangBuildAnalyzer --all build/ build_time_report.bin
|
||||
ClangBuildAnalyzer --analyze build_time_report.bin > build_time_report.txt
|
||||
cat build_time_report.txt
|
||||
shell: bash
|
||||
|
||||
- name: Upload build time analyze report
|
||||
if: ${{ inputs.analyze_build_time }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: build_time_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build_time_report.txt
|
||||
|
||||
- name: Show ccache's statistics
|
||||
- name: Show ccache's statistics and zero it
|
||||
if: ${{ inputs.download_ccache }}
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
ccache --show-stats
|
||||
ccache --zero-stats
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: ${{ steps.cache_key.outputs.key }}
|
||||
|
||||
- name: Strip unit_tests
|
||||
if: ${{ !endsWith(inputs.conan_profile, 'san') && !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
@@ -170,44 +182,32 @@ jobs:
|
||||
|
||||
- name: Upload clio_server
|
||||
if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Upload clio_integration_tests
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_integration_tests
|
||||
|
||||
- name: Upload Clio Linux package
|
||||
if: ${{ inputs.package }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/*.deb
|
||||
|
||||
- name: Save cache
|
||||
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
|
||||
# This is run as part of the build job, because it requires the following:
|
||||
# - source code
|
||||
# - conan packages
|
||||
@@ -216,17 +216,18 @@ jobs:
|
||||
# It's all available in the build job, but not in the test job
|
||||
- name: Run code coverage
|
||||
if: ${{ inputs.code_coverage }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
uses: ./.github/actions/code-coverage
|
||||
|
||||
- name: Verify expected version
|
||||
if: ${{ inputs.expected_version != '' }}
|
||||
shell: bash
|
||||
env:
|
||||
INPUT_EXPECTED_VERSION: ${{ inputs.expected_version }}
|
||||
run: |
|
||||
set -e
|
||||
EXPECTED_VERSION="clio-${{ inputs.expected_version }}"
|
||||
EXPECTED_VERSION="clio-${INPUT_EXPECTED_VERSION}"
|
||||
actual_version=$(./build/clio_server --version)
|
||||
if [[ "$actual_version" != "$EXPECTED_VERSION" ]]; then
|
||||
echo "Expected version '$EXPECTED_VERSION', but got '$actual_version'"
|
||||
echo "Expected version '${EXPECTED_VERSION}', but got '${actual_version}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -238,6 +239,6 @@ jobs:
|
||||
if: ${{ inputs.code_coverage }}
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
uses: ./.github/workflows/reusable-upload-coverage-report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -3,10 +3,10 @@ name: Make release
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
overwrite_release:
|
||||
description: "Overwrite the current release and tag"
|
||||
delete_pattern:
|
||||
description: "Pattern to delete previous releases"
|
||||
required: true
|
||||
type: boolean
|
||||
type: string
|
||||
|
||||
prerelease:
|
||||
description: "Create a prerelease"
|
||||
@@ -38,11 +38,15 @@ on:
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
@@ -51,29 +55,29 @@ jobs:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@8abb0722cbff83a9a2dc7d06c473f7a4964b7382
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
path: release_artifacts
|
||||
pattern: clio_server_*
|
||||
|
||||
- name: Create release notes
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_HEADER: ${{ inputs.header }}
|
||||
run: |
|
||||
echo "# Release notes" > "${RUNNER_TEMP}/release_notes.md"
|
||||
echo "" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
printf '%s\n' "${{ inputs.header }}" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
printf '%s\n' "${RELEASE_HEADER}" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
|
||||
- name: Generate changelog
|
||||
shell: bash
|
||||
if: ${{ inputs.generate_changelog }}
|
||||
run: |
|
||||
LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)"
|
||||
@@ -83,30 +87,39 @@ jobs:
|
||||
cat CHANGELOG.md >> "${RUNNER_TEMP}/release_notes.md"
|
||||
|
||||
- name: Prepare release artifacts
|
||||
shell: bash
|
||||
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
|
||||
|
||||
- name: Upload release notes
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: release_notes_${{ inputs.version }}
|
||||
path: "${RUNNER_TEMP}/release_notes.md"
|
||||
|
||||
- name: Remove current release and tag
|
||||
if: ${{ github.event_name != 'pull_request' && inputs.overwrite_release }}
|
||||
shell: bash
|
||||
- name: Remove previous release with a pattern
|
||||
if: ${{ github.event_name != 'pull_request' && inputs.delete_pattern != '' }}
|
||||
env:
|
||||
DELETE_PATTERN: ${{ inputs.delete_pattern }}
|
||||
run: |
|
||||
gh release delete ${{ inputs.version }} --yes || true
|
||||
git push origin :${{ inputs.version }} || true
|
||||
RELEASES_TO_DELETE=$(gh release list --limit 50 --repo "${GH_REPO}" | grep -E "${DELETE_PATTERN}" | awk -F'\t' '{print $3}' || true)
|
||||
if [ -n "$RELEASES_TO_DELETE" ]; then
|
||||
for RELEASE in $RELEASES_TO_DELETE; do
|
||||
echo "Deleting release: $RELEASE"
|
||||
gh release delete "$RELEASE" --repo "${GH_REPO}" --yes --cleanup-tag
|
||||
done
|
||||
fi
|
||||
|
||||
- name: Publish release
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_VERSION: ${{ inputs.version }}
|
||||
PRERELEASE_OPTION: ${{ inputs.prerelease && '--prerelease' || '' }}
|
||||
RELEASE_TITLE: ${{ inputs.title }}
|
||||
DRAFT_OPTION: ${{ inputs.draft && '--draft' || '' }}
|
||||
run: |
|
||||
gh release create "${{ inputs.version }}" \
|
||||
${{ inputs.prerelease && '--prerelease' || '' }} \
|
||||
--title "${{ inputs.title }}" \
|
||||
gh release create "${RELEASE_VERSION}" \
|
||||
${PRERELEASE_OPTION} \
|
||||
--title "${RELEASE_TITLE}" \
|
||||
--target "${GITHUB_SHA}" \
|
||||
${{ inputs.draft && '--draft' || '' }} \
|
||||
${DRAFT_OPTION} \
|
||||
--notes-file "${RUNNER_TEMP}/release_notes.md" \
|
||||
./release_artifacts/clio_server*
|
||||
@@ -33,6 +33,10 @@ on:
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
unit_tests:
|
||||
name: Unit testing
|
||||
@@ -43,23 +47,22 @@ jobs:
|
||||
|
||||
env:
|
||||
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || inputs.conan_profile == 'clang.asan' || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') }}
|
||||
|
||||
steps:
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
|
||||
- name: Make clio_tests executable
|
||||
shell: bash
|
||||
run: chmod +x ./clio_tests
|
||||
|
||||
- name: Run clio_tests (regular)
|
||||
@@ -68,11 +71,10 @@ jobs:
|
||||
|
||||
- name: Run clio_tests (sanitizer errors ignored)
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
|
||||
run: ./.github/scripts/execute-tests-under-sanitizer ./clio_tests
|
||||
run: ./.github/scripts/execute-tests-under-sanitizer.sh ./clio_tests
|
||||
|
||||
- name: Check for sanitizer report
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
|
||||
shell: bash
|
||||
id: check_report
|
||||
run: |
|
||||
if ls .sanitizer-report/* 1> /dev/null 2>&1; then
|
||||
@@ -83,7 +85,7 @@ jobs:
|
||||
|
||||
- name: Upload sanitizer report
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: .sanitizer-report/*
|
||||
@@ -91,7 +93,7 @@ jobs:
|
||||
|
||||
- name: Create an issue
|
||||
if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: ./.github/actions/create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
@@ -144,7 +146,7 @@ jobs:
|
||||
sleep 5
|
||||
done
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
|
||||
@@ -1,24 +1,27 @@
|
||||
name: Upload report
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
required: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
upload_report:
|
||||
name: Upload report
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download report artifact
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build
|
||||
19
.github/workflows/sanitizers.yml
vendored
19
.github/workflows/sanitizers.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/sanitizers.yml
|
||||
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/build_docker_image/**"
|
||||
- "!.github/actions/create_issue/**"
|
||||
- .github/scripts/execute-tests-under-sanitizer
|
||||
- "!.github/actions/build-docker-image/**"
|
||||
- "!.github/actions/create-issue/**"
|
||||
- .github/scripts/execute-tests-under-sanitizer.sh
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
@@ -41,17 +41,16 @@ jobs:
|
||||
sanitizer_ext: [.asan, .tsan, .ubsan]
|
||||
build_type: [Release, Debug]
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7" }'
|
||||
download_ccache: false
|
||||
upload_ccache: false
|
||||
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
static: false
|
||||
# Currently, both gcc.tsan and clang.tsan unit tests hang
|
||||
run_unit_tests: ${{ matrix.sanitizer_ext != '.tsan' }}
|
||||
run_unit_tests: true
|
||||
run_integration_tests: false
|
||||
upload_clio_server: false
|
||||
targets: clio_tests clio_integration_tests
|
||||
|
||||
@@ -3,23 +3,23 @@ name: Update CI docker image
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
- .github/workflows/update-docker-ci.yml
|
||||
|
||||
- ".github/actions/build_docker_image/**"
|
||||
- ".github/actions/build-docker-image/**"
|
||||
|
||||
- "docker/ci/**"
|
||||
- "docker/compilers/**"
|
||||
- "docker/tools/**"
|
||||
- "docker/**"
|
||||
- "!docker/clio/**"
|
||||
- "!docker/develop/**"
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
- .github/workflows/update-docker-ci.yml
|
||||
|
||||
- ".github/actions/build_docker_image/**"
|
||||
- ".github/actions/build-docker-image/**"
|
||||
|
||||
- "docker/ci/**"
|
||||
- "docker/compilers/**"
|
||||
- "docker/tools/**"
|
||||
- "docker/**"
|
||||
- "!docker/clio/**"
|
||||
- "!docker/develop/**"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -33,6 +33,10 @@ env:
|
||||
GCC_MAJOR_VERSION: 15
|
||||
GCC_VERSION: 15.2.0
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
repo:
|
||||
name: Calculate repo name
|
||||
@@ -52,7 +56,7 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -60,7 +64,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -90,15 +94,15 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -128,7 +132,7 @@ jobs:
|
||||
needs: [repo, gcc-amd64, gcc-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -137,11 +141,11 @@ jobs:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -149,7 +153,7 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PW }}
|
||||
@@ -179,7 +183,7 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -187,7 +191,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/compilers/clang/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -215,7 +219,7 @@ jobs:
|
||||
needs: [repo, gcc-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -223,7 +227,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -246,15 +250,15 @@ jobs:
|
||||
needs: [repo, gcc-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -277,7 +281,7 @@ jobs:
|
||||
needs: [repo, tools-amd64, tools-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -286,11 +290,11 @@ jobs:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -306,14 +310,36 @@ jobs:
|
||||
$image:arm64-latest \
|
||||
$image:amd64-latest
|
||||
|
||||
pre-commit:
|
||||
name: Build and push pre-commit docker image
|
||||
runs-on: heavy
|
||||
needs: [repo, tools-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-pre-commit
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/pre-commit
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_args: |
|
||||
GHCR_REPO=${{ needs.repo.outputs.GHCR_REPO }}
|
||||
|
||||
ci:
|
||||
name: Build and push CI docker image
|
||||
runs-on: heavy
|
||||
needs: [repo, gcc-merge, clang, tools-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
@@ -18,7 +18,7 @@ on:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
- .github/workflows/upload-conan-deps.yml
|
||||
|
||||
- .github/actions/conan/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
@@ -28,7 +28,7 @@ on:
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
- .github/workflows/upload-conan-deps.yml
|
||||
|
||||
- .github/actions/conan/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
@@ -40,13 +40,17 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
generate-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Calculate conan matrix
|
||||
id: set-matrix
|
||||
@@ -69,16 +73,15 @@ jobs:
|
||||
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@8abb0722cbff83a9a2dc7d06c473f7a4964b7382
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan on macOS
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: ./.github/scripts/conan/init.sh
|
||||
|
||||
- name: Show conan profile
|
||||
@@ -99,4 +102,6 @@ jobs:
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
|
||||
run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
env:
|
||||
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
run: conan upload "*" -r=xrplf --confirm ${FORCE_OPTION}
|
||||
@@ -11,7 +11,10 @@
|
||||
#
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
exclude: ^(docs/doxygen-awesome-theme/|conan\.lock$)
|
||||
exclude: |
|
||||
(?x)^(
|
||||
docs/doxygen-awesome-theme/.*
|
||||
)$
|
||||
|
||||
repos:
|
||||
# `pre-commit sample-config` default hooks
|
||||
@@ -43,7 +46,7 @@ repos:
|
||||
# hadolint-docker is a special hook that runs hadolint in a Docker container
|
||||
# Docker is not installed in the environment where pre-commit is run
|
||||
stages: [manual]
|
||||
entry: hadolint/hadolint:v2.14 hadolint
|
||||
entry: hadolint/hadolint:v2.14.0 hadolint
|
||||
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: 63c8f8312b7559622c0d82815639671ae42132ac # frozen: v2.4.1
|
||||
|
||||
@@ -75,10 +75,6 @@ if (san)
|
||||
endif ()
|
||||
target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer)
|
||||
|
||||
target_compile_definitions(
|
||||
clio_options INTERFACE $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
|
||||
)
|
||||
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
|
||||
@@ -34,7 +34,6 @@ Below are some useful docs to learn more about Clio.
|
||||
|
||||
- [How to configure Clio and rippled](./docs/configure-clio.md)
|
||||
- [How to run Clio](./docs/run-clio.md)
|
||||
- [Logging](./docs/logging.md)
|
||||
- [Troubleshooting guide](./docs/trouble_shooting.md)
|
||||
|
||||
**General reference material:**
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
|
||||
"xrpl/2.6.1#973af2bf9631f239941dd9f5a100bb84%1759275059.342",
|
||||
"xrpl/3.0.0-rc1#f5c8ecd42bdf511ad36f57bc702dacd2%1762975621.294",
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||
"spdlog/1.15.3#3ca0e9e6b83af4d0151e26541d140c86%1754401846.61",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||
@@ -11,7 +11,7 @@
|
||||
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
|
||||
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
|
||||
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1763150366.909",
|
||||
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
|
||||
"libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592",
|
||||
@@ -45,7 +45,7 @@
|
||||
],
|
||||
"protobuf/3.21.12": [
|
||||
null,
|
||||
"protobuf/3.21.12"
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1"
|
||||
],
|
||||
"lz4/1.9.4": [
|
||||
"lz4/1.10.0"
|
||||
|
||||
@@ -18,7 +18,7 @@ class ClioConan(ConanFile):
|
||||
'protobuf/3.21.12',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1w',
|
||||
'xrpl/2.6.1',
|
||||
'xrpl/3.0.0-rc1',
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118',
|
||||
'spdlog/1.15.3',
|
||||
|
||||
@@ -43,25 +43,22 @@ RUN apt-get update \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python tools
|
||||
ARG PYTHON_VERSION=3.13
|
||||
|
||||
RUN add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& apt-get update \
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-venv \
|
||||
python3 \
|
||||
python3-pip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}
|
||||
|
||||
# Create a virtual environment for python tools
|
||||
RUN python${PYTHON_VERSION} -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install -q --no-cache-dir \
|
||||
# TODO: Remove this once we switch to newer Ubuntu base image
|
||||
# lxml 6.0.0 is not compatible with our image
|
||||
'lxml<6.0.0' \
|
||||
cmake \
|
||||
conan==2.20.1 \
|
||||
conan==2.22.1 \
|
||||
gcovr \
|
||||
# We're adding pre-commit to this image as well,
|
||||
# because clang-tidy workflow requires it
|
||||
pre-commit
|
||||
|
||||
# Install LLVM tools
|
||||
|
||||
@@ -5,17 +5,17 @@ It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but c
|
||||
|
||||
The image is based on Ubuntu 20.04 and contains:
|
||||
|
||||
- ccache 4.11.3
|
||||
- ccache 4.12.1
|
||||
- Clang 19
|
||||
- ClangBuildAnalyzer 1.6.0
|
||||
- Conan 2.20.1
|
||||
- Doxygen 1.12
|
||||
- Conan 2.22.1
|
||||
- Doxygen 1.15.0
|
||||
- GCC 15.2.0
|
||||
- GDB 16.3
|
||||
- gh 2.74
|
||||
- git-cliff 2.9.1
|
||||
- mold 2.40.1
|
||||
- Python 3.13
|
||||
- gh 2.82.1
|
||||
- git-cliff 2.10.1
|
||||
- mold 2.40.4
|
||||
- Python 3.8
|
||||
- and some other useful tools
|
||||
|
||||
Conan is set up to build Clio without any additional steps.
|
||||
|
||||
@@ -3,6 +3,13 @@
|
||||
{% set sanitizer_opt_map = {"asan": "address", "tsan": "thread", "ubsan": "undefined"} %}
|
||||
{% set sanitizer = sanitizer_opt_map[sani] %}
|
||||
|
||||
{% set sanitizer_b2_flags_map = {
|
||||
"address": "context-impl=ucontext address-sanitizer=norecover",
|
||||
"thread": "context-impl=ucontext thread-sanitizer=norecover",
|
||||
"undefined": "undefined-sanitizer=norecover"
|
||||
} %}
|
||||
{% set sanitizer_b2_flags_str = sanitizer_b2_flags_map[sanitizer] %}
|
||||
|
||||
{% set sanitizer_build_flags_str = "-fsanitize=" ~ sanitizer ~ " -g -O1 -fno-omit-frame-pointer" %}
|
||||
{% set sanitizer_build_flags = sanitizer_build_flags_str.split(' ') %}
|
||||
{% set sanitizer_link_flags_str = "-fsanitize=" ~ sanitizer %}
|
||||
@@ -11,7 +18,8 @@
|
||||
include({{ compiler }})
|
||||
|
||||
[options]
|
||||
boost/*:extra_b2_flags="cxxflags=\"{{ sanitizer_build_flags_str }}\" linkflags=\"{{ sanitizer_link_flags_str }}\""
|
||||
boost/*:extra_b2_flags="{{ sanitizer_b2_flags_str }}"
|
||||
boost/*:without_context=False
|
||||
boost/*:without_stacktrace=True
|
||||
|
||||
[conf]
|
||||
@@ -20,4 +28,10 @@ tools.build:cxxflags+={{ sanitizer_build_flags }}
|
||||
tools.build:exelinkflags+={{ sanitizer_link_flags }}
|
||||
tools.build:sharedlinkflags+={{ sanitizer_link_flags }}
|
||||
|
||||
tools.info.package_id:confs+=["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]
|
||||
{% if sanitizer == "address" %}
|
||||
tools.build:defines+=["BOOST_USE_ASAN", "BOOST_USE_UCONTEXT"]
|
||||
{% elif sanitizer == "thread" %}
|
||||
tools.build:defines+=["BOOST_USE_TSAN", "BOOST_USE_UCONTEXT"]
|
||||
{% endif %}
|
||||
|
||||
tools.info.package_id:confs+=["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags", "tools.build:defines"]
|
||||
|
||||
@@ -8,7 +8,7 @@ ARG UBUNTU_VERSION
|
||||
|
||||
ARG GCC_MAJOR_VERSION
|
||||
|
||||
ARG BUILD_VERSION=1
|
||||
ARG BUILD_VERSION=0
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
@@ -34,6 +34,7 @@ RUN wget --progress=dot:giga https://gcc.gnu.org/pub/gcc/releases/gcc-$GCC_VERSI
|
||||
WORKDIR /gcc-$GCC_VERSION
|
||||
RUN ./contrib/download_prerequisites
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN mkdir /gcc-build
|
||||
WORKDIR /gcc-build
|
||||
RUN /gcc-$GCC_VERSION/configure \
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
clio_develop:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7
|
||||
volumes:
|
||||
- clio_develop_conan_data:/root/.conan2/p
|
||||
- clio_develop_ccache:/root/.ccache
|
||||
|
||||
38
docker/pre-commit/Dockerfile
Normal file
38
docker/pre-commit/Dockerfile
Normal file
@@ -0,0 +1,38 @@
|
||||
ARG GHCR_REPO=invalid
|
||||
FROM ${GHCR_REPO}/clio-tools:latest AS clio-tools
|
||||
|
||||
# We're using Ubuntu 24.04 to have a more recent version of Python
|
||||
FROM ubuntu:24.04
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# hadolint ignore=DL3002
|
||||
USER root
|
||||
WORKDIR /root
|
||||
|
||||
# Install common tools and dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
curl \
|
||||
git \
|
||||
libatomic1 \
|
||||
software-properties-common \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python tools
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
python3 \
|
||||
python3-pip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install -q --no-cache-dir --break-system-packages \
|
||||
pre-commit
|
||||
|
||||
COPY --from=clio-tools \
|
||||
/usr/local/bin/doxygen \
|
||||
/usr/local/bin/
|
||||
@@ -8,7 +8,7 @@ ARG TARGETARCH
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
ARG BUILD_VERSION=2
|
||||
ARG BUILD_VERSION=0
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
@@ -24,7 +24,7 @@ RUN apt-get update \
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
ARG MOLD_VERSION=2.40.1
|
||||
ARG MOLD_VERSION=2.40.4
|
||||
RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v${MOLD_VERSION}.tar.gz" \
|
||||
&& tar xf "v${MOLD_VERSION}.tar.gz" \
|
||||
&& cd "mold-${MOLD_VERSION}" \
|
||||
@@ -34,7 +34,7 @@ RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v
|
||||
&& ninja install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ARG CCACHE_VERSION=4.11.3
|
||||
ARG CCACHE_VERSION=4.12.1
|
||||
RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
|
||||
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
|
||||
&& cd "ccache-${CCACHE_VERSION}" \
|
||||
@@ -51,7 +51,7 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG DOXYGEN_VERSION=1.12.0
|
||||
ARG DOXYGEN_VERSION=1.15.0
|
||||
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& cd "doxygen-${DOXYGEN_VERSION}" \
|
||||
@@ -71,13 +71,13 @@ RUN wget --progress=dot:giga "https://github.com/aras-p/ClangBuildAnalyzer/archi
|
||||
&& ninja install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ARG GIT_CLIFF_VERSION=2.9.1
|
||||
ARG GIT_CLIFF_VERSION=2.10.1
|
||||
RUN wget --progress=dot:giga "https://github.com/orhun/git-cliff/releases/download/v${GIT_CLIFF_VERSION}/git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz" \
|
||||
&& tar xf git-cliff-${GIT_CLIFF_VERSION}-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& mv git-cliff-${GIT_CLIFF_VERSION}/git-cliff /usr/local/bin/git-cliff \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ARG GH_VERSION=2.74.0
|
||||
ARG GH_VERSION=2.82.1
|
||||
RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz" \
|
||||
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
||||
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \
|
||||
|
||||
@@ -15,6 +15,7 @@ EXTRACT_ANON_NSPACES = NO
|
||||
SORT_MEMBERS_CTORS_1ST = YES
|
||||
|
||||
INPUT = ${SOURCE}/src
|
||||
USE_MDFILE_AS_MAINPAGE = ${SOURCE}/src/README.md
|
||||
EXCLUDE_SYMBOLS = ${EXCLUDES}
|
||||
RECURSIVE = YES
|
||||
HAVE_DOT = ${USE_DOT}
|
||||
|
||||
@@ -177,7 +177,7 @@ There are several CMake options you can use to customize the build:
|
||||
|
||||
### Generating API docs for Clio
|
||||
|
||||
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.12.0 on your system.
|
||||
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.14.0 on your system.
|
||||
|
||||
To generate the API docs, please use CMake option `-Ddocs=ON` as described above and build the `docs` target.
|
||||
|
||||
@@ -191,7 +191,7 @@ Open the `index.html` file in your browser to see the documentation pages.
|
||||
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
|
||||
|
||||
```sh
|
||||
docker run -it ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
docker run -it ghcr.io/xrplf/clio-ci:77387d8f9f13aea8f23831d221ac3e7683bb69b7
|
||||
git clone https://github.com/XRPLF/clio
|
||||
cd clio
|
||||
```
|
||||
|
||||
@@ -293,7 +293,7 @@ This document provides a list of all available Clio configuration properties in
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: `1`
|
||||
- **Default value**: `1000`
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
|
||||
- **Description**: The maximum size of the server's request queue. If set to `0`, this means there is no queue size limit.
|
||||
|
||||
@@ -391,7 +391,7 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Type**: double
|
||||
- **Default value**: `10`
|
||||
- **Constraints**: The value must be a positive double number.
|
||||
- **Description**: The number of milliseconds the server waits to shutdown gracefully. If Clio does not shutdown gracefully after the specified value, it will be killed instead.
|
||||
- **Description**: The number of seconds the server waits to shutdown gracefully. If Clio does not shutdown gracefully after the specified value, it will be killed instead.
|
||||
|
||||
### cache.num_diffs
|
||||
|
||||
@@ -441,6 +441,22 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The value must be one of the following: `sync`, `async`, `none`.
|
||||
- **Description**: The strategy used for Cache loading.
|
||||
|
||||
### cache.file.path
|
||||
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: The path to a file where cache will be saved to on shutdown and loaded from on startup. If the file couldn't be read Clio will load cache as usual (from DB or from rippled).
|
||||
|
||||
### cache.file.max_sequence_age
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: `5000`
|
||||
- **Constraints**: None
|
||||
- **Description**: Max allowed difference between the latest sequence in DB and in cache file. If the cache file is too old (contains too low latest sequence) Clio will reject using it.
|
||||
|
||||
### log.channels.[].channel
|
||||
|
||||
- **Required**: False
|
||||
|
||||
@@ -951,7 +951,7 @@ span.arrowhead {
|
||||
border-color: var(--primary-color);
|
||||
}
|
||||
|
||||
#nav-tree ul li:first-child > div > a {
|
||||
#nav-tree-contents > ul > li:first-child > div > a {
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233,
|
||||
// Max number of requests to queue up before rejecting further requests.
|
||||
// Defaults to 0, which disables the limit.
|
||||
// Defaults to 1000 (use 0 to make the queue unbound).
|
||||
"max_queue_size": 500,
|
||||
// If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash
|
||||
// If matches, the request will be considered as admin request
|
||||
@@ -137,7 +137,11 @@
|
||||
// "num_cursors_from_account": 3200, // Read the cursors from the account table until we have enough cursors to partition the ledger to load concurrently.
|
||||
"num_markers": 48, // The number of markers is the number of coroutines to load the cache concurrently.
|
||||
"page_fetch_size": 512, // The number of rows to load for each page.
|
||||
"load": "async" // "sync" to load cache synchronously or "async" to load cache asynchronously or "none"/"no" to turn off the cache.
|
||||
"load": "async", // "sync" to load cache synchronously or "async" to load cache asynchronously or "none"/"no" to turn off the cache.
|
||||
"file": {
|
||||
"path": "./cache.bin",
|
||||
"max_sequence_age": 5000
|
||||
}
|
||||
},
|
||||
"prometheus": {
|
||||
"enabled": true,
|
||||
|
||||
@@ -77,7 +77,7 @@ It's possible to configure `minimum`, `maximum` and `default` version like so:
|
||||
|
||||
All of the above are optional.
|
||||
|
||||
Clio will fallback to hardcoded defaults when these values are not specified in the config file, or if the configured values are outside of the minimum and maximum supported versions hardcoded in [src/rpc/common/APIVersion.h](../src/rpc/common/APIVersion.hpp).
|
||||
Clio will fallback to hardcoded defaults when these values are not specified in the config file, or if the configured values are outside of the minimum and maximum supported versions hardcoded in [src/rpc/common/APIVersion.hpp](../src/rpc/common/APIVersion.hpp).
|
||||
|
||||
> [!TIP]
|
||||
> See the [example-config.json](../docs/examples/config/example-config.json) for more details.
|
||||
|
||||
@@ -36,19 +36,19 @@ EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check version of doxygen is at least 1.12
|
||||
# Check version of doxygen is at least 1.14
|
||||
version=$($DOXYGEN --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "1.12.0" > "$version" ]]; then
|
||||
if [[ "1.14.0" > "$version" ]]; then
|
||||
# No hard error if doxygen version is not the one we want - let CI deal with it
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 1.12 of `which doxygen` is required.
|
||||
Your version is $version. Please upgrade it for next time.
|
||||
A minimum of version 1.14 of `which doxygen` is required.
|
||||
Your version is $version. Please upgrade it.
|
||||
|
||||
Your changes may fail to pass CI once pushed.
|
||||
Your changes may fail CI checks.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
|
||||
@@ -44,8 +44,13 @@ def fix_colon_spacing(cpp_content: str) -> str:
|
||||
|
||||
|
||||
def fix_indentation(cpp_content: str) -> str:
|
||||
if "JSON(" not in cpp_content:
|
||||
return cpp_content
|
||||
|
||||
lines = cpp_content.splitlines()
|
||||
|
||||
ends_with_newline = cpp_content.endswith('\n')
|
||||
|
||||
def find_indentation(line: str) -> int:
|
||||
return len(line) - len(line.lstrip())
|
||||
|
||||
@@ -66,7 +71,11 @@ def fix_indentation(cpp_content: str) -> str:
|
||||
break
|
||||
lines[i] = lines[i][by_how_much:] if by_how_much > 0 else " " * (-by_how_much) + lines[i]
|
||||
|
||||
return "\n".join(lines) + "\n"
|
||||
result = "\n".join(lines)
|
||||
|
||||
if ends_with_newline:
|
||||
result += "\n"
|
||||
return result
|
||||
|
||||
|
||||
def process_file(file_path: Path, dry_run: bool) -> bool:
|
||||
|
||||
@@ -2,7 +2,6 @@ add_subdirectory(util)
|
||||
add_subdirectory(data)
|
||||
add_subdirectory(cluster)
|
||||
add_subdirectory(etl)
|
||||
add_subdirectory(etlng)
|
||||
add_subdirectory(feed)
|
||||
add_subdirectory(rpc)
|
||||
add_subdirectory(web)
|
||||
|
||||
20
src/README.md
Normal file
20
src/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Clio API server
|
||||
|
||||
## Introduction
|
||||
|
||||
Clio is an XRP Ledger API server optimized for RPC calls over WebSocket or JSON-RPC.
|
||||
|
||||
It stores validated historical ledger and transaction data in a more space efficient format, and uses up to 4 times
|
||||
less space than [rippled](https://github.com/XRPLF/rippled).
|
||||
|
||||
Clio can be configured to store data in [Apache Cassandra](https://cassandra.apache.org/_/index.html) or
|
||||
[ScyllaDB](https://www.scylladb.com/), enabling scalable read throughput. Multiple Clio nodes can share
|
||||
access to the same dataset, which allows for a highly available cluster of Clio nodes without the need for redundant
|
||||
data storage or computation.
|
||||
|
||||
## Develop
|
||||
|
||||
As you prepare to develop code for Clio, please be sure you are aware of our current
|
||||
[Contribution guidelines](https://github.com/XRPLF/clio/blob/develop/CONTRIBUTING.md).
|
||||
|
||||
Read about @ref "rpc" carefully to know more about writing your own handlers for Clio.
|
||||
@@ -5,10 +5,9 @@ target_link_libraries(
|
||||
clio_app
|
||||
PUBLIC clio_cluster
|
||||
clio_etl
|
||||
clio_etlng
|
||||
clio_feed
|
||||
clio_web
|
||||
clio_rpc
|
||||
clio_migration
|
||||
clio_rpc
|
||||
clio_web
|
||||
PRIVATE Boost::program_options
|
||||
)
|
||||
|
||||
@@ -25,11 +25,10 @@
|
||||
#include "data/AmendmentCenter.hpp"
|
||||
#include "data/BackendFactory.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/LedgerCacheSaver.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/NetworkValidatedLedgers.hpp"
|
||||
#include "etlng/LoadBalancer.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "migration/MigrationInspectorFactory.hpp"
|
||||
#include "rpc/Counters.hpp"
|
||||
@@ -57,6 +56,7 @@
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
@@ -99,20 +99,23 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
auto const threads = config_.get<uint16_t>("io_threads");
|
||||
LOG(util::LogService::info()) << "Number of io threads = " << threads;
|
||||
|
||||
// Similarly we need a context to run ETL on
|
||||
// In the future we can remove the raw ioc and use ctx instead
|
||||
// This context should be above ioc because its reference is getting into tasks inside ioc
|
||||
util::async::CoroExecutionContext ctx{threads};
|
||||
|
||||
// IO context to handle all incoming requests, as well as other things.
|
||||
// This is not the only io context in the application.
|
||||
boost::asio::io_context ioc{threads};
|
||||
|
||||
// Similarly we need a context to run ETLng on
|
||||
// In the future we can remove the raw ioc and use ctx instead
|
||||
util::async::CoroExecutionContext ctx{threads};
|
||||
|
||||
// Rate limiter, to prevent abuse
|
||||
auto whitelistHandler = web::dosguard::WhitelistHandler{config_};
|
||||
auto const dosguardWeights = web::dosguard::Weights::make(config_);
|
||||
auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler, dosguardWeights};
|
||||
auto sweepHandler = web::dosguard::IntervalSweepHandler{config_, ioc, dosGuard};
|
||||
|
||||
auto cache = data::LedgerCache{};
|
||||
auto cacheSaver = data::LedgerCacheSaver{config_, cache};
|
||||
|
||||
// Interface to the database
|
||||
auto backend = data::makeBackend(config_, cache);
|
||||
@@ -142,20 +145,12 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
// ETL uses the balancer to extract data.
|
||||
// The server uses the balancer to forward RPCs to a rippled node.
|
||||
// The balancer itself publishes to streams (transactions_proposed and accounts_proposed)
|
||||
auto balancer = [&] -> std::shared_ptr<etlng::LoadBalancerInterface> {
|
||||
if (config_.get<bool>("__ng_etl")) {
|
||||
return etlng::LoadBalancer::makeLoadBalancer(
|
||||
auto balancer = etl::LoadBalancer::makeLoadBalancer(
|
||||
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers
|
||||
);
|
||||
}
|
||||
|
||||
return etl::LoadBalancer::makeLoadBalancer(
|
||||
config_, ioc, backend, subscriptions, std::make_unique<util::MTRandomGenerator>(), ledgers
|
||||
);
|
||||
}();
|
||||
|
||||
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
|
||||
auto etl = etl::ETLService::makeETLService(config_, ioc, ctx, backend, subscriptions, balancer, ledgers);
|
||||
auto etl = etl::ETLService::makeETLService(config_, ctx, backend, subscriptions, balancer, ledgers);
|
||||
|
||||
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
||||
auto counters = rpc::Counters::makeCounters(workQueue);
|
||||
@@ -201,7 +196,7 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
}
|
||||
|
||||
appStopper_.setOnStop(
|
||||
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, ioc)
|
||||
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
|
||||
);
|
||||
|
||||
// Blocks until stopped.
|
||||
@@ -216,6 +211,9 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(config_, backend, rpcEngine, etl, dosGuard);
|
||||
|
||||
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
|
||||
appStopper_.setOnStop(
|
||||
Stopper::makeOnStopCallback(*httpServer, *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
|
||||
);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
|
||||
@@ -20,12 +20,13 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etlng/ETLServiceInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "data/LedgerCacheSaver.hpp"
|
||||
#include "etl/ETLServiceInterface.hpp"
|
||||
#include "etl/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/CoroutineGroup.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/ng/Server.hpp"
|
||||
#include "web/interface/Concepts.hpp"
|
||||
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
@@ -71,21 +72,25 @@ public:
|
||||
* @param etl The ETL service to stop.
|
||||
* @param subscriptions The subscription manager to stop.
|
||||
* @param backend The backend to stop.
|
||||
* @param cacheSaver The ledger cache saver
|
||||
* @param ioc The io_context to stop.
|
||||
* @return The callback to be called on application stop.
|
||||
*/
|
||||
template <web::ng::SomeServer ServerType>
|
||||
template <web::SomeServer ServerType, data::SomeLedgerCacheSaver LedgerCacheSaverType>
|
||||
static std::function<void(boost::asio::yield_context)>
|
||||
makeOnStopCallback(
|
||||
ServerType& server,
|
||||
etlng::LoadBalancerInterface& balancer,
|
||||
etlng::ETLServiceInterface& etl,
|
||||
etl::LoadBalancerInterface& balancer,
|
||||
etl::ETLServiceInterface& etl,
|
||||
feed::SubscriptionManagerInterface& subscriptions,
|
||||
data::BackendInterface& backend,
|
||||
LedgerCacheSaverType& cacheSaver,
|
||||
boost::asio::io_context& ioc
|
||||
)
|
||||
{
|
||||
return [&](boost::asio::yield_context yield) {
|
||||
cacheSaver.save();
|
||||
|
||||
util::CoroutineGroup coroutineGroup{yield};
|
||||
coroutineGroup.spawn(yield, [&server](auto innerYield) {
|
||||
server.stop(innerYield);
|
||||
@@ -106,6 +111,8 @@ public:
|
||||
backend.waitForWritesToFinish();
|
||||
LOG(util::LogService::info()) << "Backend writes finished";
|
||||
|
||||
cacheSaver.waitToFinish();
|
||||
|
||||
ioc.stop();
|
||||
LOG(util::LogService::info()) << "io_context stopped";
|
||||
|
||||
|
||||
@@ -147,6 +147,11 @@ struct Amendments {
|
||||
REGISTER(fixAMMClawbackRounding);
|
||||
REGISTER(fixMPTDeliveredAmount);
|
||||
REGISTER(fixPriceOracleOrder);
|
||||
REGISTER(DynamicMPT);
|
||||
REGISTER(fixDelegateV1_1);
|
||||
REGISTER(fixDirectoryLimit);
|
||||
REGISTER(fixIncludeKeyletFields);
|
||||
REGISTER(fixTokenEscrowV1);
|
||||
|
||||
// Obsolete but supported by libxrpl
|
||||
REGISTER(CryptoConditionsSuite);
|
||||
|
||||
@@ -46,6 +46,7 @@ namespace data {
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
|
||||
{
|
||||
using namespace cassandra::impl;
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
@@ -56,7 +57,7 @@ makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheI
|
||||
|
||||
if (boost::iequals(type, "cassandra")) {
|
||||
auto const cfg = config.getObject("database." + type);
|
||||
if (cfg.getValueView("provider").asString() == toString(cassandra::impl::Provider::Keyspace)) {
|
||||
if (providerFromString(cfg.getValueView("provider").asString()) == Provider::Keyspace) {
|
||||
backend = std::make_shared<data::cassandra::KeyspaceBackend>(
|
||||
data::cassandra::SettingsProvider{cfg}, cache, readOnly
|
||||
);
|
||||
|
||||
@@ -270,7 +270,7 @@ BackendInterface::updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
|
||||
if (range_.has_value() && newMax < range_->maxSequence) {
|
||||
if (range_.has_value() and newMax < range_->maxSequence) {
|
||||
ASSERT(
|
||||
false,
|
||||
"Range shouldn't exist yet or newMax should be at least range->maxSequence. newMax = {}, "
|
||||
@@ -280,11 +280,14 @@ BackendInterface::updateRange(uint32_t newMax)
|
||||
);
|
||||
}
|
||||
|
||||
if (!range_.has_value()) {
|
||||
range_ = {.minSequence = newMax, .maxSequence = newMax};
|
||||
} else {
|
||||
range_->maxSequence = newMax;
|
||||
updateRangeImpl(newMax);
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::forceUpdateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
updateRangeImpl(newMax);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -410,4 +413,14 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
|
||||
return fees;
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::updateRangeImpl(uint32_t newMax)
|
||||
{
|
||||
if (!range_.has_value()) {
|
||||
range_ = {.minSequence = newMax, .maxSequence = newMax};
|
||||
} else {
|
||||
range_->maxSequence = newMax;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
|
||||
@@ -249,6 +249,15 @@ public:
|
||||
void
|
||||
updateRange(uint32_t newMax);
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences that are stored in the DB without any checks
|
||||
* @note In the most cases you should use updateRange() instead
|
||||
*
|
||||
* @param newMax The new maximum sequence available
|
||||
*/
|
||||
void
|
||||
forceUpdateRange(uint32_t newMax);
|
||||
|
||||
/**
|
||||
* @brief Sets the range of sequences that are stored in the DB.
|
||||
*
|
||||
@@ -776,6 +785,9 @@ private:
|
||||
*/
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
|
||||
void
|
||||
updateRangeImpl(uint32_t newMax);
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
|
||||
@@ -5,6 +5,7 @@ target_sources(
|
||||
BackendCounters.cpp
|
||||
BackendInterface.cpp
|
||||
LedgerCache.cpp
|
||||
LedgerCacheSaver.cpp
|
||||
LedgerHeaderCache.cpp
|
||||
cassandra/impl/Future.cpp
|
||||
cassandra/impl/Cluster.cpp
|
||||
@@ -14,6 +15,9 @@ target_sources(
|
||||
cassandra/impl/SslContext.cpp
|
||||
cassandra/Handle.cpp
|
||||
cassandra/SettingsProvider.cpp
|
||||
impl/InputFile.cpp
|
||||
impl/LedgerCacheFile.cpp
|
||||
impl/OutputFile.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(clio_data PUBLIC cassandra-cpp-driver::cassandra-cpp-driver clio_util)
|
||||
|
||||
@@ -189,10 +189,11 @@ public:
|
||||
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
|
||||
|
||||
for (auto i = 0u; i < nftIDs.size(); i++) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>();
|
||||
maybeRow.has_value()) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
|
||||
@@ -57,9 +57,9 @@ namespace data::cassandra {
|
||||
/**
|
||||
* @brief Implements @ref CassandraBackendFamily for Keyspace
|
||||
*
|
||||
* @tparam SettingsProviderType The settings provider type to use
|
||||
* @tparam ExecutionStrategyType The execution strategy type to use
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type to use
|
||||
* @tparam SettingsProviderType The settings provider type
|
||||
* @tparam ExecutionStrategyType The execution strategy type
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type
|
||||
*/
|
||||
template <
|
||||
SomeSettingsProvider SettingsProviderType,
|
||||
@@ -101,9 +101,9 @@ public:
|
||||
// !range_.has_value() means the table 'ledger_range' is not populated;
|
||||
// This would be the first write to the table.
|
||||
// In this case, insert both min_sequence/max_sequence range into the table.
|
||||
if (not(range_.has_value())) {
|
||||
executor_.writeSync(schema_->insertLedgerRange, false, ledgerSequence_);
|
||||
executor_.writeSync(schema_->insertLedgerRange, true, ledgerSequence_);
|
||||
if (not range_.has_value()) {
|
||||
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/false, ledgerSequence_);
|
||||
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/true, ledgerSequence_);
|
||||
}
|
||||
|
||||
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
|
||||
@@ -130,30 +130,30 @@ public:
|
||||
// Keyspace and ScyllaDB uses the same logic for taxon-filtered queries
|
||||
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
|
||||
} else {
|
||||
// --- Amazon Keyspaces Workflow for non-taxon queries ---
|
||||
// Amazon Keyspaces Workflow for non-taxon queries
|
||||
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
|
||||
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
|
||||
|
||||
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
Statement const firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
firstQuery.bindAt(1, startTaxon);
|
||||
firstQuery.bindAt(2, startTokenID);
|
||||
firstQuery.bindAt(3, Limit{limit});
|
||||
|
||||
auto const firstRes = executor_.read(yield, firstQuery);
|
||||
if (firstRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
|
||||
if (firstRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
|
||||
if (nftIDs.size() < limit) {
|
||||
auto const remainingLimit = limit - nftIDs.size();
|
||||
Statement secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
|
||||
Statement const secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
|
||||
secondQuery.bindAt(1, startTaxon);
|
||||
secondQuery.bindAt(2, Limit{remainingLimit});
|
||||
|
||||
auto const secondRes = executor_.read(yield, secondQuery);
|
||||
if (secondRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
|
||||
if (secondRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
}
|
||||
@@ -163,7 +163,7 @@ public:
|
||||
|
||||
/**
|
||||
* @brief (Unsupported in Keyspaces) Fetches account root object indexes by page.
|
||||
* * @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
|
||||
* @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
|
||||
* This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces does not support, and there is
|
||||
* no efficient alternative. This is acceptable as the cache is primarily loaded via diffs. Calling this
|
||||
* function will throw an exception.
|
||||
@@ -197,14 +197,14 @@ private:
|
||||
) const
|
||||
{
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
Statement statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
Statement const statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
statement.bindAt(1, taxon);
|
||||
statement.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
|
||||
statement.bindAt(3, Limit{limit});
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
if (res && res.value().hasRows()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(res.value()))
|
||||
if (res.has_value() && res->hasRows()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*res))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
return nftIDs;
|
||||
@@ -229,8 +229,8 @@ private:
|
||||
firstQuery.bindAt(3, Limit{limit});
|
||||
|
||||
auto const firstRes = executor_.read(yield, firstQuery);
|
||||
if (firstRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
|
||||
if (firstRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
|
||||
@@ -241,8 +241,8 @@ private:
|
||||
secondQuery.bindAt(2, Limit{remainingLimit});
|
||||
|
||||
auto const secondRes = executor_.read(yield, secondQuery);
|
||||
if (secondRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
|
||||
if (secondRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
}
|
||||
@@ -291,10 +291,11 @@ private:
|
||||
|
||||
// Combine the results into final NFT objects.
|
||||
for (auto i = 0u; i < nftIDs.size(); ++i) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>();
|
||||
maybeRow.has_value()) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
|
||||
@@ -20,16 +20,22 @@
|
||||
#include "data/LedgerCache.hpp"
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "data/impl/LedgerCacheFile.hpp"
|
||||
#include "etl/Models.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
@@ -89,7 +95,7 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCache::update(std::vector<etlng::model::Object> const& objs, uint32_t seq)
|
||||
LedgerCache::update(std::vector<etl::model::Object> const& objs, uint32_t seq)
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
@@ -251,4 +257,34 @@ LedgerCache::getSuccessorHitRate() const
|
||||
return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value();
|
||||
}
|
||||
|
||||
std::expected<void, std::string>
|
||||
LedgerCache::saveToFile(std::string const& path) const
|
||||
{
|
||||
if (not isFull()) {
|
||||
return std::unexpected{"Ledger cache is not full"};
|
||||
}
|
||||
|
||||
impl::LedgerCacheFile file{path};
|
||||
std::shared_lock const lock{mtx_};
|
||||
impl::LedgerCacheFile::DataView const data{.latestSeq = latestSeq_, .map = map_, .deleted = deleted_};
|
||||
return file.write(data);
|
||||
}
|
||||
|
||||
std::expected<void, std::string>
|
||||
LedgerCache::loadFromFile(std::string const& path, uint32_t minLatestSequence)
|
||||
{
|
||||
impl::LedgerCacheFile file{path};
|
||||
auto data = file.read(minLatestSequence);
|
||||
if (not data.has_value()) {
|
||||
return std::unexpected(std::move(data).error());
|
||||
}
|
||||
auto [latestSeq, map, deleted] = std::move(data).value();
|
||||
std::unique_lock const lock{mtx_};
|
||||
latestSeq_ = latestSeq;
|
||||
map_ = std::move(map);
|
||||
deleted_ = std::move(deleted);
|
||||
full_ = true;
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etl/Models.hpp"
|
||||
#include "util/prometheus/Bool.hpp"
|
||||
#include "util/prometheus/Counter.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
@@ -37,6 +37,7 @@
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
@@ -46,11 +47,16 @@ namespace data {
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCache : public LedgerCacheInterface {
|
||||
public:
|
||||
/** @brief An entry of the cache */
|
||||
struct CacheEntry {
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
|
||||
using CacheMap = std::map<ripple::uint256, CacheEntry>;
|
||||
|
||||
private:
|
||||
// counters for fetchLedgerObject(s) hit rate
|
||||
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
@@ -73,8 +79,8 @@ class LedgerCache : public LedgerCacheInterface {
|
||||
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
|
||||
)};
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
std::map<ripple::uint256, CacheEntry> deleted_;
|
||||
CacheMap map_;
|
||||
CacheMap deleted_;
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::condition_variable_any cv_;
|
||||
@@ -98,7 +104,7 @@ public:
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground) override;
|
||||
|
||||
void
|
||||
update(std::vector<etlng::model::Object> const& objs, uint32_t seq) override;
|
||||
update(std::vector<etl::model::Object> const& objs, uint32_t seq) override;
|
||||
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const override;
|
||||
@@ -138,6 +144,12 @@ public:
|
||||
|
||||
void
|
||||
waitUntilCacheContainsSeq(uint32_t seq) override;
|
||||
|
||||
std::expected<void, std::string>
|
||||
saveToFile(std::string const& path) const override;
|
||||
|
||||
std::expected<void, std::string>
|
||||
loadFromFile(std::string const& path, uint32_t minLatestSequence) override;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
|
||||
@@ -20,14 +20,16 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etl/Models.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/hardened_hash.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
@@ -63,7 +65,7 @@ public:
|
||||
* @param seq The sequence to update cache for
|
||||
*/
|
||||
virtual void
|
||||
update(std::vector<etlng::model::Object> const& objs, uint32_t seq) = 0;
|
||||
update(std::vector<etl::model::Object> const& objs, uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
@@ -168,6 +170,27 @@ public:
|
||||
*/
|
||||
virtual void
|
||||
waitUntilCacheContainsSeq(uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Save the cache to file
|
||||
* @note This operation takes about 7 seconds and it keeps a shared lock of mtx_
|
||||
*
|
||||
* @param path The file path to save the cache to
|
||||
* @return An error as a string if any
|
||||
*/
|
||||
[[nodiscard]] virtual std::expected<void, std::string>
|
||||
saveToFile(std::string const& path) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Load the cache from file
|
||||
* @note This operation takes about 7 seconds and it keeps mtx_ exclusively locked
|
||||
*
|
||||
* @param path The file path to load data from
|
||||
* @param minLatestSequence The minimum allowed value of the latestLedgerSequence in cache file
|
||||
* @return An error as a string if any
|
||||
*/
|
||||
[[nodiscard]] virtual std::expected<void, std::string>
|
||||
loadFromFile(std::string const& path, uint32_t minLatestSequence) = 0;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
|
||||
70
src/data/LedgerCacheSaver.cpp
Normal file
70
src/data/LedgerCacheSaver.cpp
Normal file
@@ -0,0 +1,70 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/LedgerCacheSaver.hpp"
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
namespace data {
|
||||
|
||||
LedgerCacheSaver::LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache)
|
||||
: cacheFilePath_(config.maybeValue<std::string>("cache.file.path")), cache_(cache)
|
||||
{
|
||||
}
|
||||
|
||||
LedgerCacheSaver::~LedgerCacheSaver()
|
||||
{
|
||||
waitToFinish();
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCacheSaver::save()
|
||||
{
|
||||
ASSERT(not savingThread_.has_value(), "Multiple save() calls are not allowed");
|
||||
savingThread_ = std::thread([this]() {
|
||||
if (not cacheFilePath_.has_value()) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOG(util::LogService::info()) << "Saving ledger cache to " << *cacheFilePath_;
|
||||
if (auto const [success, durationMs] = util::timed([&]() { return cache_.get().saveToFile(*cacheFilePath_); });
|
||||
success.has_value()) {
|
||||
LOG(util::LogService::info()) << "Successfully saved ledger cache in " << durationMs << " ms";
|
||||
} else {
|
||||
LOG(util::LogService::error()) << "Error saving LedgerCache to file: " << success.error();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCacheSaver::waitToFinish()
|
||||
{
|
||||
if (savingThread_.has_value() and savingThread_->joinable()) {
|
||||
savingThread_->join();
|
||||
}
|
||||
savingThread_.reset();
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
93
src/data/LedgerCacheSaver.hpp
Normal file
93
src/data/LedgerCacheSaver.hpp
Normal file
@@ -0,0 +1,93 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
|
||||
#include <concepts>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A concept for a class that can save ledger cache asynchronously.
|
||||
*
|
||||
* This concept defines the interface requirements for any type that manages
|
||||
* asynchronous saving of ledger cache to persistent storage.
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeLedgerCacheSaver = requires(T a) {
|
||||
{ a.save() } -> std::same_as<void>;
|
||||
{ a.waitToFinish() } -> std::same_as<void>;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Manages asynchronous saving of ledger cache to a file.
|
||||
*
|
||||
* This class provides functionality to save the ledger cache to a file in a separate thread,
|
||||
* allowing the main application to continue without blocking. The file path is configured
|
||||
* through the application's configuration system.
|
||||
*/
|
||||
class LedgerCacheSaver {
|
||||
std::optional<std::string> cacheFilePath_;
|
||||
std::reference_wrapper<LedgerCacheInterface const> cache_;
|
||||
std::optional<std::thread> savingThread_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Constructs a LedgerCacheSaver instance.
|
||||
*
|
||||
* @param config The configuration object containing the cache file path setting
|
||||
* @param cache Reference to the ledger cache interface to be saved
|
||||
*/
|
||||
LedgerCacheSaver(util::config::ClioConfigDefinition const& config, LedgerCacheInterface const& cache);
|
||||
|
||||
/**
|
||||
* @brief Destructor that ensures the saving thread is properly joined.
|
||||
*
|
||||
* Waits for any ongoing save operation to complete before destruction.
|
||||
*/
|
||||
~LedgerCacheSaver();
|
||||
|
||||
/**
|
||||
* @brief Initiates an asynchronous save operation of the ledger cache.
|
||||
*
|
||||
* Spawns a new thread that saves the ledger cache to the configured file path.
|
||||
* If no file path is configured, the operation is skipped. Logs the progress
|
||||
* and result of the save operation.
|
||||
*/
|
||||
void
|
||||
save();
|
||||
|
||||
/**
|
||||
* @brief Waits for the saving thread to complete.
|
||||
*
|
||||
* Blocks until the saving operation finishes if a thread is currently active.
|
||||
* Safe to call multiple times or when no save operation is in progress.
|
||||
*/
|
||||
void
|
||||
waitToFinish();
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -1,8 +1,10 @@
|
||||
# Backend
|
||||
# Backend
|
||||
|
||||
@page "backend" Backend
|
||||
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. Currently, Cassandra and ScyllaDB are the only supported databases that are production-ready.
|
||||
|
||||
To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file.
|
||||
To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.hpp](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.hpp](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file.
|
||||
|
||||
## Data Model
|
||||
|
||||
|
||||
@@ -247,6 +247,9 @@ struct MPTHoldersAndCursor {
|
||||
struct LedgerRange {
|
||||
std::uint32_t minSequence = 0;
|
||||
std::uint32_t maxSequence = 0;
|
||||
|
||||
bool
|
||||
operator==(LedgerRange const&) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -70,10 +70,10 @@ namespace data::cassandra {
|
||||
*
|
||||
* Note: This is a safer and more correct rewrite of the original implementation of the backend.
|
||||
*
|
||||
* @tparam SettingsProviderType The settings provider type to use
|
||||
* @tparam ExecutionStrategyType The execution strategy type to use
|
||||
* @tparam SchemaType The Schema type to use
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type to use
|
||||
* @tparam SettingsProviderType The settings provider type
|
||||
* @tparam ExecutionStrategyType The execution strategy type
|
||||
* @tparam SchemaType The Schema type
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type
|
||||
*/
|
||||
template <
|
||||
SomeSettingsProvider SettingsProviderType,
|
||||
@@ -100,8 +100,8 @@ public:
|
||||
/**
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
*
|
||||
* @param settingsProvider The settings provider to use
|
||||
* @param cache The ledger cache to use
|
||||
* @param settingsProvider The settings provider
|
||||
* @param cache The ledger cache
|
||||
* @param readOnly Whether the database should be in readonly mode
|
||||
*/
|
||||
CassandraBackendFamily(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
|
||||
@@ -111,18 +111,18 @@ public:
|
||||
, handle_{settingsProvider_.getSettings()}
|
||||
, executor_{settingsProvider_.getSettings(), handle_}
|
||||
{
|
||||
if (auto const res = handle_.connect(); not res)
|
||||
if (auto const res = handle_.connect(); not res.has_value())
|
||||
throw std::runtime_error("Could not connect to database: " + res.error());
|
||||
|
||||
if (not readOnly) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res.has_value()) {
|
||||
// on datastax, creation of keyspaces can be configured to only be done thru the admin
|
||||
// interface. this does not mean that the keyspace does not already exist tho.
|
||||
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
|
||||
throw std::runtime_error("Could not create keyspace: " + res.error());
|
||||
}
|
||||
|
||||
if (auto const res = handle_.executeEach(schema_.createSchema); not res)
|
||||
if (auto const res = handle_.executeEach(schema_.createSchema); not res.has_value())
|
||||
throw std::runtime_error("Could not create schema: " + res.error());
|
||||
}
|
||||
|
||||
@@ -146,9 +146,6 @@ public:
|
||||
*/
|
||||
CassandraBackendFamily(CassandraBackendFamily&&) = delete;
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchAccountTransactions
|
||||
*/
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
@@ -217,18 +214,12 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::waitForWritesToFinish
|
||||
*/
|
||||
void
|
||||
waitForWritesToFinish() override
|
||||
{
|
||||
executor_.sync();
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeLedger
|
||||
*/
|
||||
void
|
||||
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
|
||||
{
|
||||
@@ -239,16 +230,13 @@ public:
|
||||
ledgerSequence_ = ledgerHeader.seq;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchLatestLedgerSequence
|
||||
*/
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return maybeValue;
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res.has_value()) {
|
||||
if (auto const& rows = *res; rows) {
|
||||
if (auto const maybeRow = rows.template get<uint32_t>(); maybeRow.has_value())
|
||||
return maybeRow;
|
||||
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no rows";
|
||||
return std::nullopt;
|
||||
@@ -262,9 +250,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchLedgerBySequence
|
||||
*/
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -292,9 +277,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchLedgerByHash
|
||||
*/
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -315,9 +297,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::hardFetchLedgerRange(boost::asio::yield_context) const
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -356,9 +335,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchAllTransactionsInLedger
|
||||
*/
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -366,9 +342,6 @@ public:
|
||||
return fetchTransactions(hashes, yield);
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchAllTransactionHashesInLedger
|
||||
*/
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
@@ -402,9 +375,6 @@ public:
|
||||
return hashes;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchNFT
|
||||
*/
|
||||
std::optional<NFT>
|
||||
fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
@@ -444,9 +414,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchNFTTransactions
|
||||
*/
|
||||
TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
@@ -518,9 +485,6 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchMPTHolders
|
||||
*/
|
||||
MPTHoldersAndCursor
|
||||
fetchMPTHolders(
|
||||
ripple::uint192 const& mptID,
|
||||
@@ -560,9 +524,6 @@ public:
|
||||
return {mptObjects, {}};
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doFetchLedgerObject
|
||||
*/
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
@@ -585,9 +546,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doFetchLedgerObjectSeq
|
||||
*/
|
||||
std::optional<std::uint32_t>
|
||||
doFetchLedgerObjectSeq(
|
||||
ripple::uint256 const& key,
|
||||
@@ -609,9 +567,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchTransaction
|
||||
*/
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -629,9 +584,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doFetchSuccessorKey
|
||||
*/
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
@@ -654,9 +606,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchTransactions
|
||||
*/
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -698,9 +647,6 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doFetchLedgerObjects
|
||||
*/
|
||||
std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
@@ -741,9 +687,6 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchLedgerDiff
|
||||
*/
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -789,9 +732,6 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchMigratorStatus
|
||||
*/
|
||||
std::optional<std::string>
|
||||
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -812,9 +752,6 @@ public:
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchClioNodesData
|
||||
*/
|
||||
std::expected<std::vector<std::pair<boost::uuids::uuid, std::string>>, std::string>
|
||||
fetchClioNodesData(boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -831,9 +768,6 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doWriteLedgerObject
|
||||
*/
|
||||
void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
|
||||
{
|
||||
@@ -845,9 +779,6 @@ public:
|
||||
executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeSuccessor
|
||||
*/
|
||||
void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
|
||||
{
|
||||
@@ -859,9 +790,6 @@ public:
|
||||
executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeAccountTransactions
|
||||
*/
|
||||
void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) override
|
||||
{
|
||||
@@ -881,9 +809,6 @@ public:
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeAccountTransaction
|
||||
*/
|
||||
void
|
||||
writeAccountTransaction(AccountTransactionsData record) override
|
||||
{
|
||||
@@ -901,9 +826,6 @@ public:
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeNFTTransactions
|
||||
*/
|
||||
void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
|
||||
{
|
||||
@@ -919,9 +841,6 @@ public:
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeTransaction
|
||||
*/
|
||||
void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
@@ -939,9 +858,6 @@ public:
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeNFTs
|
||||
*/
|
||||
void
|
||||
writeNFTs(std::vector<NFTsData> const& data) override
|
||||
{
|
||||
@@ -980,9 +896,6 @@ public:
|
||||
executor_.writeEach(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeNFTs
|
||||
*/
|
||||
void
|
||||
writeMPTHolders(std::vector<MPTHolderData> const& data) override
|
||||
{
|
||||
@@ -994,9 +907,6 @@ public:
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::startWrites
|
||||
*/
|
||||
void
|
||||
startWrites() const override
|
||||
{
|
||||
@@ -1004,9 +914,6 @@ public:
|
||||
// probably was used in PG to start a transaction or smth.
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeMigratorStatus
|
||||
*/
|
||||
void
|
||||
writeMigratorStatus(std::string const& migratorName, std::string const& status) override
|
||||
{
|
||||
@@ -1015,27 +922,18 @@ public:
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeNodeMessage
|
||||
*/
|
||||
void
|
||||
writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
|
||||
{
|
||||
executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::isTooBusy
|
||||
*/
|
||||
bool
|
||||
isTooBusy() const override
|
||||
{
|
||||
return executor_.isTooBusy();
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::stats
|
||||
*/
|
||||
boost::json::object
|
||||
stats() const override
|
||||
{
|
||||
|
||||
@@ -97,7 +97,7 @@ SettingsProvider::parseSettings() const
|
||||
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
|
||||
settings.provider = config_.get<std::string>("provider");
|
||||
settings.provider = impl::providerFromString(config_.get<std::string>("provider"));
|
||||
|
||||
if (config_.getValueView("connect_timeout").hasValue()) {
|
||||
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");
|
||||
|
||||
@@ -61,7 +61,7 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
// TODO: AWS keyspace reads should be local_one to save cost
|
||||
if (settings.provider == toString(cassandra::impl::Provider::Keyspace)) {
|
||||
if (settings.provider == cassandra::impl::Provider::Keyspace) {
|
||||
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting keyspace consistency: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
@@ -31,28 +32,21 @@
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
namespace {
|
||||
|
||||
enum class Provider { Cassandra, Keyspace };
|
||||
|
||||
inline std::string
|
||||
toString(Provider provider)
|
||||
inline Provider
|
||||
providerFromString(std::string const& provider)
|
||||
{
|
||||
switch (provider) {
|
||||
case Provider::Cassandra:
|
||||
return "cassandra";
|
||||
case Provider::Keyspace:
|
||||
return "aws_keyspace";
|
||||
ASSERT(
|
||||
provider == "cassandra" || provider == "aws_keyspace",
|
||||
"Provider type must be one of 'cassandra' or 'aws_keyspace'"
|
||||
);
|
||||
return provider == "cassandra" ? Provider::Cassandra : Provider::Keyspace;
|
||||
}
|
||||
std::unreachable();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// TODO: move Settings to public interface, not impl
|
||||
|
||||
@@ -109,7 +103,7 @@ struct Settings {
|
||||
std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE;
|
||||
|
||||
/** @brief Provider to know if we are using scylladb or keyspace */
|
||||
std::string provider = toString(kDEFAULT_PROVIDER);
|
||||
Provider provider = kDEFAULT_PROVIDER;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
58
src/data/impl/InputFile.cpp
Normal file
58
src/data/impl/InputFile.cpp
Normal file
@@ -0,0 +1,58 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/impl/InputFile.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <ios>
|
||||
#include <iosfwd>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace data::impl {
|
||||
|
||||
InputFile::InputFile(std::string const& path) : file_(path, std::ios::binary | std::ios::in)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
InputFile::isOpen() const
|
||||
{
|
||||
return file_.is_open();
|
||||
}
|
||||
|
||||
bool
|
||||
InputFile::readRaw(char* data, size_t size)
|
||||
{
|
||||
file_.read(data, size);
|
||||
shasum_.update(data, size);
|
||||
return not file_.fail();
|
||||
}
|
||||
|
||||
ripple::uint256
|
||||
InputFile::hash() const
|
||||
{
|
||||
auto sum = shasum_;
|
||||
return std::move(sum).finalize();
|
||||
}
|
||||
|
||||
} // namespace data::impl
|
||||
@@ -1,7 +1,7 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -19,24 +19,39 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "util/FakeFetchResponse.hpp"
|
||||
#include "util/Shasum.hpp"
|
||||
|
||||
#include <gmock/gmock.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <iosfwd>
|
||||
#include <string>
|
||||
|
||||
struct MockLedgerLoader {
|
||||
using GetLedgerResponseType = FakeFetchResponse;
|
||||
using RawLedgerObjectType = FakeLedgerObject;
|
||||
namespace data::impl {
|
||||
|
||||
MOCK_METHOD(
|
||||
FormattedTransactionsData,
|
||||
insertTransactions,
|
||||
(ripple::LedgerHeader const&, GetLedgerResponseType& data),
|
||||
()
|
||||
);
|
||||
MOCK_METHOD(std::optional<ripple::LedgerHeader>, loadInitialLedger, (uint32_t sequence), ());
|
||||
class InputFile {
|
||||
std::ifstream file_;
|
||||
util::Sha256sum shasum_;
|
||||
|
||||
public:
|
||||
InputFile(std::string const& path);
|
||||
|
||||
bool
|
||||
isOpen() const;
|
||||
|
||||
template <typename T>
|
||||
bool
|
||||
read(T& t)
|
||||
{
|
||||
return readRaw(reinterpret_cast<char*>(&t), sizeof(T));
|
||||
}
|
||||
|
||||
bool
|
||||
readRaw(char* data, size_t size);
|
||||
|
||||
ripple::uint256
|
||||
hash() const;
|
||||
};
|
||||
} // namespace data::impl
|
||||
210
src/data/impl/LedgerCacheFile.cpp
Normal file
210
src/data/impl/LedgerCacheFile.cpp
Normal file
@@ -0,0 +1,210 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/impl/LedgerCacheFile.hpp"
|
||||
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/Types.hpp"
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <filesystem>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace data::impl {
|
||||
|
||||
using Hash = ripple::uint256;
|
||||
using Separator = std::array<char, 16>;
|
||||
static constexpr Separator kSEPARATOR = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
|
||||
namespace {
|
||||
|
||||
std::expected<std::pair<ripple::uint256, LedgerCache::CacheEntry>, std::string>
|
||||
readCacheEntry(InputFile& file, size_t i)
|
||||
{
|
||||
ripple::uint256 key;
|
||||
if (not file.readRaw(reinterpret_cast<char*>(key.data()), ripple::base_uint<256>::bytes)) {
|
||||
return std::unexpected(fmt::format("Failed to read key at index {}", i));
|
||||
}
|
||||
|
||||
uint32_t seq{};
|
||||
if (not file.read(seq)) {
|
||||
return std::unexpected(fmt::format("Failed to read sequence at index {}", i));
|
||||
}
|
||||
|
||||
size_t blobSize{};
|
||||
if (not file.read(blobSize)) {
|
||||
return std::unexpected(fmt::format("Failed to read blob size at index {}", i));
|
||||
}
|
||||
|
||||
Blob blob(blobSize);
|
||||
if (not file.readRaw(reinterpret_cast<char*>(blob.data()), blobSize)) {
|
||||
return std::unexpected(fmt::format("Failed to read blob data at index {}", i));
|
||||
}
|
||||
return std::make_pair(key, LedgerCache::CacheEntry{.seq = seq, .blob = std::move(blob)});
|
||||
}
|
||||
|
||||
std::expected<void, std::string>
|
||||
verifySeparator(Separator const& s)
|
||||
{
|
||||
if (not std::ranges::all_of(s, [](char c) { return c == 0; })) {
|
||||
return std::unexpected{"Separator verification failed - data corruption detected"};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
LedgerCacheFile::LedgerCacheFile(std::string path) : path_(std::move(path))
|
||||
{
|
||||
}
|
||||
|
||||
std::expected<void, std::string>
|
||||
LedgerCacheFile::write(DataView dataView)
|
||||
{
|
||||
auto const newFilePath = fmt::format("{}.new", path_);
|
||||
auto file = OutputFile{newFilePath};
|
||||
if (not file.isOpen()) {
|
||||
return std::unexpected{fmt::format("Couldn't open file: {}", newFilePath)};
|
||||
}
|
||||
|
||||
Header const header{
|
||||
.latestSeq = dataView.latestSeq, .mapSize = dataView.map.size(), .deletedSize = dataView.deleted.size()
|
||||
};
|
||||
file.write(header);
|
||||
file.write(kSEPARATOR);
|
||||
|
||||
for (auto const& [k, v] : dataView.map) {
|
||||
file.write(k.data(), decltype(k)::bytes);
|
||||
file.write(v.seq);
|
||||
file.write(v.blob.size());
|
||||
file.writeRaw(reinterpret_cast<char const*>(v.blob.data()), v.blob.size());
|
||||
}
|
||||
file.write(kSEPARATOR);
|
||||
|
||||
for (auto const& [k, v] : dataView.deleted) {
|
||||
file.write(k.data(), decltype(k)::bytes);
|
||||
file.write(v.seq);
|
||||
file.write(v.blob.size());
|
||||
file.writeRaw(reinterpret_cast<char const*>(v.blob.data()), v.blob.size());
|
||||
}
|
||||
file.write(kSEPARATOR);
|
||||
auto const hash = file.hash();
|
||||
file.write(hash.data(), decltype(hash)::bytes);
|
||||
|
||||
try {
|
||||
std::filesystem::rename(newFilePath, path_);
|
||||
} catch (std::exception const& e) {
|
||||
return std::unexpected{fmt::format("Error moving cache file from {} to {}: {}", newFilePath, path_, e.what())};
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::expected<LedgerCacheFile::Data, std::string>
|
||||
LedgerCacheFile::read(uint32_t minLatestSequence)
|
||||
{
|
||||
try {
|
||||
auto file = InputFile{path_};
|
||||
if (not file.isOpen()) {
|
||||
return std::unexpected{fmt::format("Couldn't open file: {}", path_)};
|
||||
}
|
||||
|
||||
Data result;
|
||||
|
||||
Header header{};
|
||||
if (not file.read(header)) {
|
||||
return std::unexpected{"Error reading cache header"};
|
||||
}
|
||||
if (header.version != kVERSION) {
|
||||
return std::unexpected{
|
||||
fmt::format("Cache has wrong version: expected {} found {}", kVERSION, header.version)
|
||||
};
|
||||
}
|
||||
if (header.latestSeq < minLatestSequence) {
|
||||
return std::unexpected{fmt::format("Latest sequence ({}) in the cache file is too low.", header.latestSeq)};
|
||||
}
|
||||
result.latestSeq = header.latestSeq;
|
||||
|
||||
Separator separator{};
|
||||
if (not file.readRaw(separator.data(), separator.size())) {
|
||||
return std::unexpected{"Error reading cache header"};
|
||||
}
|
||||
if (auto verificationResult = verifySeparator(separator); not verificationResult.has_value()) {
|
||||
return std::unexpected{std::move(verificationResult).error()};
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < header.mapSize; ++i) {
|
||||
auto cacheEntryExpected = readCacheEntry(file, i);
|
||||
if (not cacheEntryExpected.has_value()) {
|
||||
return std::unexpected{std::move(cacheEntryExpected).error()};
|
||||
}
|
||||
// Using insert with hint here to decrease insert operation complexity to the amortized constant instead of
|
||||
// logN
|
||||
result.map.insert(result.map.end(), std::move(cacheEntryExpected).value());
|
||||
}
|
||||
|
||||
if (not file.readRaw(separator.data(), separator.size())) {
|
||||
return std::unexpected{"Error reading separator"};
|
||||
}
|
||||
if (auto verificationResult = verifySeparator(separator); not verificationResult.has_value()) {
|
||||
return std::unexpected{std::move(verificationResult).error()};
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < header.deletedSize; ++i) {
|
||||
auto cacheEntryExpected = readCacheEntry(file, i);
|
||||
if (not cacheEntryExpected.has_value()) {
|
||||
return std::unexpected{std::move(cacheEntryExpected).error()};
|
||||
}
|
||||
result.deleted.insert(result.deleted.end(), std::move(cacheEntryExpected).value());
|
||||
}
|
||||
|
||||
if (not file.readRaw(separator.data(), separator.size())) {
|
||||
return std::unexpected{"Error reading separator"};
|
||||
}
|
||||
if (auto verificationResult = verifySeparator(separator); not verificationResult.has_value()) {
|
||||
return std::unexpected{std::move(verificationResult).error()};
|
||||
}
|
||||
|
||||
auto const dataHash = file.hash();
|
||||
ripple::uint256 hashFromFile{};
|
||||
if (not file.readRaw(reinterpret_cast<char*>(hashFromFile.data()), decltype(hashFromFile)::bytes)) {
|
||||
return std::unexpected{"Error reading hash"};
|
||||
}
|
||||
|
||||
if (dataHash != hashFromFile) {
|
||||
return std::unexpected{"Hash file corruption detected"};
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (std::exception const& e) {
|
||||
return std::unexpected{fmt::format(" Error reading cache file: {}", e.what())};
|
||||
} catch (...) {
|
||||
return std::unexpected{fmt::format(" Error reading cache file")};
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace data::impl
|
||||
70
src/data/impl/LedgerCacheFile.hpp
Normal file
70
src/data/impl/LedgerCacheFile.hpp
Normal file
@@ -0,0 +1,70 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/impl/InputFile.hpp"
|
||||
#include "data/impl/OutputFile.hpp"
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
namespace data::impl {
|
||||
|
||||
class LedgerCacheFile {
|
||||
public:
|
||||
struct Header {
|
||||
uint32_t version = kVERSION;
|
||||
uint32_t latestSeq{};
|
||||
uint64_t mapSize{};
|
||||
uint64_t deletedSize{};
|
||||
};
|
||||
|
||||
private:
|
||||
static constexpr uint32_t kVERSION = 1;
|
||||
|
||||
std::string path_;
|
||||
|
||||
public:
|
||||
template <typename T>
|
||||
struct DataBase {
|
||||
uint32_t latestSeq{0};
|
||||
T map;
|
||||
T deleted;
|
||||
};
|
||||
|
||||
using DataView = DataBase<LedgerCache::CacheMap const&>;
|
||||
using Data = DataBase<LedgerCache::CacheMap>;
|
||||
|
||||
LedgerCacheFile(std::string path);
|
||||
|
||||
std::expected<void, std::string>
|
||||
write(DataView dataView);
|
||||
|
||||
std::expected<Data, std::string>
|
||||
read(uint32_t minLatestSequence);
|
||||
};
|
||||
|
||||
} // namespace data::impl
|
||||
62
src/data/impl/OutputFile.cpp
Normal file
62
src/data/impl/OutputFile.cpp
Normal file
@@ -0,0 +1,62 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/impl/OutputFile.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <ios>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace data::impl {
|
||||
|
||||
OutputFile::OutputFile(std::string const& path) : file_(path, std::ios::binary | std::ios::out)
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
OutputFile::isOpen() const
|
||||
{
|
||||
return file_.is_open();
|
||||
}
|
||||
|
||||
void
|
||||
OutputFile::writeRaw(char const* data, size_t size)
|
||||
{
|
||||
writeToFile(data, size);
|
||||
}
|
||||
|
||||
void
|
||||
OutputFile::writeToFile(char const* data, size_t size)
|
||||
{
|
||||
file_.write(data, size);
|
||||
shasum_.update(data, size);
|
||||
}
|
||||
|
||||
ripple::uint256
|
||||
OutputFile::hash() const
|
||||
{
|
||||
auto sum = shasum_;
|
||||
return std::move(sum).finalize();
|
||||
}
|
||||
|
||||
} // namespace data::impl
|
||||
68
src/data/impl/OutputFile.hpp
Normal file
68
src/data/impl/OutputFile.hpp
Normal file
@@ -0,0 +1,68 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/Shasum.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
|
||||
namespace data::impl {
|
||||
|
||||
class OutputFile {
|
||||
std::ofstream file_;
|
||||
util::Sha256sum shasum_;
|
||||
|
||||
public:
|
||||
OutputFile(std::string const& path);
|
||||
|
||||
bool
|
||||
isOpen() const;
|
||||
|
||||
template <typename T>
|
||||
void
|
||||
write(T&& data)
|
||||
{
|
||||
writeRaw(reinterpret_cast<char const*>(&data), sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void
|
||||
write(T const* data, size_t const size)
|
||||
{
|
||||
writeRaw(reinterpret_cast<char const*>(data), size);
|
||||
}
|
||||
|
||||
void
|
||||
writeRaw(char const* data, size_t size);
|
||||
|
||||
ripple::uint256
|
||||
hash() const;
|
||||
|
||||
private:
|
||||
void
|
||||
writeToFile(char const* data, size_t size);
|
||||
};
|
||||
|
||||
} // namespace data::impl
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace etlng {
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief The interface of a handler for amendment blocking
|
||||
@@ -32,6 +32,12 @@ struct AmendmentBlockHandlerInterface {
|
||||
*/
|
||||
virtual void
|
||||
notifyAmendmentBlocked() = 0;
|
||||
|
||||
/**
|
||||
* @brief Stop the block handler from repeatedly executing
|
||||
*/
|
||||
virtual void
|
||||
stop() = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
} // namespace etl
|
||||
@@ -7,14 +7,24 @@ target_sources(
|
||||
ETLService.cpp
|
||||
ETLState.cpp
|
||||
LoadBalancer.cpp
|
||||
MPTHelpers.cpp
|
||||
NetworkValidatedLedgers.cpp
|
||||
NFTHelpers.cpp
|
||||
Source.cpp
|
||||
MPTHelpers.cpp
|
||||
impl/AmendmentBlockHandler.cpp
|
||||
impl/AsyncGrpcCall.cpp
|
||||
impl/Extraction.cpp
|
||||
impl/ForwardingSource.cpp
|
||||
impl/GrpcSource.cpp
|
||||
impl/Loading.cpp
|
||||
impl/Monitor.cpp
|
||||
impl/SubscriptionSource.cpp
|
||||
impl/TaskManager.cpp
|
||||
impl/ext/Cache.cpp
|
||||
impl/ext/Core.cpp
|
||||
impl/ext/MPT.cpp
|
||||
impl/ext/NFT.cpp
|
||||
impl/ext/Successor.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(clio_etl PUBLIC clio_data)
|
||||
|
||||
@@ -21,16 +21,20 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/CacheLoaderInterface.hpp"
|
||||
#include "etl/CacheLoaderSettings.hpp"
|
||||
#include "etl/impl/CacheLoader.hpp"
|
||||
#include "etl/impl/CursorFromAccountProvider.hpp"
|
||||
#include "etl/impl/CursorFromDiffProvider.hpp"
|
||||
#include "etl/impl/CursorFromFixDiffNumProvider.hpp"
|
||||
#include "etlng/CacheLoaderInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/async/context/BasicExecutionContext.hpp"
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@@ -48,7 +52,7 @@ namespace etl {
|
||||
* @tparam ExecutionContextType The type of the execution context to use
|
||||
*/
|
||||
template <typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
class CacheLoader : public etlng::CacheLoaderInterface {
|
||||
class CacheLoader : public CacheLoaderInterface {
|
||||
using CacheLoaderType = impl::CacheLoaderImpl<data::LedgerCacheInterface>;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
@@ -98,6 +102,10 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
if (loadCacheFromFile()) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::shared_ptr<impl::BaseCursorProvider> provider;
|
||||
if (settings_.numCacheCursorsFromDiff != 0) {
|
||||
LOG(log_.info()) << "Loading cache with cursor from num_cursors_from_diff="
|
||||
@@ -149,6 +157,36 @@ public:
|
||||
if (loader_ != nullptr)
|
||||
loader_->wait();
|
||||
}
|
||||
|
||||
private:
|
||||
bool
|
||||
loadCacheFromFile()
|
||||
{
|
||||
if (not settings_.cacheFileSettings.has_value()) {
|
||||
return false;
|
||||
}
|
||||
LOG(log_.info()) << "Loading ledger cache from " << settings_.cacheFileSettings->path;
|
||||
auto const minLatestSequence =
|
||||
backend_->fetchLedgerRange()
|
||||
.transform([this](data::LedgerRange const& range) {
|
||||
return std::max(range.maxSequence - settings_.cacheFileSettings->maxAge, range.minSequence);
|
||||
})
|
||||
.value_or(0);
|
||||
|
||||
auto const [success, duration_ms] = util::timed([&]() {
|
||||
return cache_.get().loadFromFile(settings_.cacheFileSettings->path, minLatestSequence);
|
||||
});
|
||||
|
||||
if (not success.has_value()) {
|
||||
LOG(log_.warn()) << "Error loading cache from file: " << success.error();
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Loaded cache from file in " << duration_ms
|
||||
<< " ms. Latest sequence: " << cache_.get().latestLedgerSequence();
|
||||
backend_->forceUpdateRange(cache_.get().latestLedgerSequence());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace etlng {
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief An interface for the Cache Loader
|
||||
@@ -50,4 +50,4 @@ struct CacheLoaderInterface {
|
||||
wait() noexcept = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
} // namespace etl
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace etl {
|
||||
|
||||
@@ -63,6 +64,12 @@ makeCacheLoaderSettings(util::config::ClioConfigDefinition const& config)
|
||||
settings.numCacheMarkers = cache.get<std::size_t>("num_markers");
|
||||
settings.cachePageFetchSize = cache.get<std::size_t>("page_fetch_size");
|
||||
|
||||
if (auto filePath = cache.maybeValue<std::string>("file.path"); filePath.has_value()) {
|
||||
settings.cacheFileSettings = CacheLoaderSettings::CacheFileSettings{
|
||||
.path = std::move(filePath).value(), .maxAge = cache.get<uint32_t>("file.max_sequence_age")
|
||||
};
|
||||
}
|
||||
|
||||
auto const entry = cache.get<std::string>("load");
|
||||
if (boost::iequals(entry, "sync"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::SYNC;
|
||||
|
||||
@@ -22,6 +22,9 @@
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace etl {
|
||||
|
||||
@@ -32,6 +35,15 @@ struct CacheLoaderSettings {
|
||||
/** @brief Ways to load the cache */
|
||||
enum class LoadStyle { ASYNC, SYNC, NONE };
|
||||
|
||||
/** @brief Settings for cache file operations */
|
||||
struct CacheFileSettings {
|
||||
std::string path; /**< path to the file to load cache from on start and save cache to on shutdown */
|
||||
uint32_t maxAge = 5000; /**< max difference between latest sequence in cache file and DB */
|
||||
|
||||
auto
|
||||
operator<=>(CacheFileSettings const&) const = default;
|
||||
};
|
||||
|
||||
size_t numCacheDiffs = 32; /**< number of diffs to use to generate cursors */
|
||||
size_t numCacheMarkers = 48; /**< number of markers to use at one time to traverse the ledger */
|
||||
size_t cachePageFetchSize = 512; /**< number of ledger objects to fetch concurrently per marker */
|
||||
@@ -40,6 +52,7 @@ struct CacheLoaderSettings {
|
||||
size_t numCacheCursorsFromAccount = 0; /**< number of cursors to fetch from account_tx */
|
||||
|
||||
LoadStyle loadStyle = LoadStyle::ASYNC; /**< how to load the cache */
|
||||
std::optional<CacheFileSettings> cacheFileSettings; /**< optional settings for cache file operations */
|
||||
|
||||
auto
|
||||
operator<=>(CacheLoaderSettings const&) const = default;
|
||||
|
||||
@@ -20,12 +20,12 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etl/Models.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng {
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief An interface for the Cache Updater
|
||||
@@ -63,4 +63,4 @@ struct CacheUpdaterInterface {
|
||||
setFull() = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
} // namespace etl
|
||||
@@ -1,7 +1,7 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -20,101 +20,103 @@
|
||||
#include "etl/ETLService.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/CacheLoader.hpp"
|
||||
#include "etl/CacheLoaderInterface.hpp"
|
||||
#include "etl/CacheUpdaterInterface.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/ETLServiceInterface.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/ExtractorInterface.hpp"
|
||||
#include "etl/InitialLoadObserverInterface.hpp"
|
||||
#include "etl/LedgerPublisherInterface.hpp"
|
||||
#include "etl/LoadBalancerInterface.hpp"
|
||||
#include "etl/LoaderInterface.hpp"
|
||||
#include "etl/MonitorInterface.hpp"
|
||||
#include "etl/MonitorProviderInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etl/TaskManagerProviderInterface.hpp"
|
||||
#include "etl/impl/AmendmentBlockHandler.hpp"
|
||||
#include "etl/impl/ExtractionDataPipe.hpp"
|
||||
#include "etl/impl/Extractor.hpp"
|
||||
#include "etl/impl/CacheUpdater.hpp"
|
||||
#include "etl/impl/Extraction.hpp"
|
||||
#include "etl/impl/LedgerFetcher.hpp"
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etl/impl/LedgerPublisher.hpp"
|
||||
#include "etl/impl/Transformer.hpp"
|
||||
#include "etlng/ETLService.hpp"
|
||||
#include "etlng/ETLServiceInterface.hpp"
|
||||
#include "etlng/LoadBalancer.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/impl/LedgerPublisher.hpp"
|
||||
#include "etlng/impl/MonitorProvider.hpp"
|
||||
#include "etlng/impl/TaskManagerProvider.hpp"
|
||||
#include "etlng/impl/ext/Cache.hpp"
|
||||
#include "etlng/impl/ext/Core.hpp"
|
||||
#include "etlng/impl/ext/MPT.hpp"
|
||||
#include "etlng/impl/ext/NFT.hpp"
|
||||
#include "etlng/impl/ext/Successor.hpp"
|
||||
#include "etl/impl/Loading.hpp"
|
||||
#include "etl/impl/MonitorProvider.hpp"
|
||||
#include "etl/impl/Registry.hpp"
|
||||
#include "etl/impl/Scheduling.hpp"
|
||||
#include "etl/impl/TaskManager.hpp"
|
||||
#include "etl/impl/TaskManagerProvider.hpp"
|
||||
#include "etl/impl/ext/Cache.hpp"
|
||||
#include "etl/impl/ext/Core.hpp"
|
||||
#include "etl/impl/ext/MPT.hpp"
|
||||
#include "etl/impl/ext/NFT.hpp"
|
||||
#include "etl/impl/ext/Successor.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <thread>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::shared_ptr<etlng::ETLServiceInterface>
|
||||
std::shared_ptr<ETLServiceInterface>
|
||||
ETLService::makeETLService(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
)
|
||||
{
|
||||
std::shared_ptr<etlng::ETLServiceInterface> ret;
|
||||
std::shared_ptr<ETLServiceInterface> ret;
|
||||
|
||||
if (config.get<bool>("__ng_etl")) {
|
||||
ASSERT(
|
||||
std::dynamic_pointer_cast<etlng::LoadBalancer>(balancer), "LoadBalancer type must be etlng::LoadBalancer"
|
||||
);
|
||||
|
||||
auto state = std::make_shared<etl::SystemState>();
|
||||
auto state = std::make_shared<SystemState>();
|
||||
state->isStrictReadonly = config.get<bool>("read_only");
|
||||
|
||||
auto fetcher = std::make_shared<etl::impl::LedgerFetcher>(backend, balancer);
|
||||
auto extractor = std::make_shared<etlng::impl::Extractor>(fetcher);
|
||||
auto publisher = std::make_shared<etlng::impl::LedgerPublisher>(ioc, backend, subscriptions, *state);
|
||||
auto cacheLoader = std::make_shared<etl::CacheLoader<>>(config, backend, backend->cache());
|
||||
auto cacheUpdater = std::make_shared<etlng::impl::CacheUpdater>(backend->cache());
|
||||
auto amendmentBlockHandler = std::make_shared<etlng::impl::AmendmentBlockHandler>(ctx, *state);
|
||||
auto monitorProvider = std::make_shared<etlng::impl::MonitorProvider>();
|
||||
auto fetcher = std::make_shared<impl::LedgerFetcher>(backend, balancer);
|
||||
auto extractor = std::make_shared<impl::Extractor>(fetcher);
|
||||
auto publisher = std::make_shared<impl::LedgerPublisher>(ctx, backend, subscriptions, *state);
|
||||
auto cacheLoader = std::make_shared<CacheLoader<>>(config, backend, backend->cache());
|
||||
auto cacheUpdater = std::make_shared<impl::CacheUpdater>(backend->cache());
|
||||
auto amendmentBlockHandler = std::make_shared<impl::AmendmentBlockHandler>(ctx, *state);
|
||||
auto monitorProvider = std::make_shared<impl::MonitorProvider>();
|
||||
|
||||
backend->setCorruptionDetector(CorruptionDetector{*state, backend->cache()});
|
||||
|
||||
auto loader = std::make_shared<etlng::impl::Loader>(
|
||||
auto loader = std::make_shared<impl::Loader>(
|
||||
backend,
|
||||
etlng::impl::makeRegistry(
|
||||
impl::makeRegistry(
|
||||
*state,
|
||||
etlng::impl::CacheExt{cacheUpdater},
|
||||
etlng::impl::CoreExt{backend},
|
||||
etlng::impl::SuccessorExt{backend, backend->cache()},
|
||||
etlng::impl::NFTExt{backend},
|
||||
etlng::impl::MPTExt{backend}
|
||||
impl::CacheExt{cacheUpdater},
|
||||
impl::CoreExt{backend},
|
||||
impl::SuccessorExt{backend, backend->cache()},
|
||||
impl::NFTExt{backend},
|
||||
impl::MPTExt{backend}
|
||||
),
|
||||
amendmentBlockHandler,
|
||||
state
|
||||
);
|
||||
|
||||
auto taskManagerProvider = std::make_shared<etlng::impl::TaskManagerProvider>(*ledgers, extractor, loader);
|
||||
auto taskManagerProvider = std::make_shared<impl::TaskManagerProvider>(*ledgers, extractor, loader);
|
||||
|
||||
ret = std::make_shared<etlng::ETLService>(
|
||||
ret = std::make_shared<ETLService>(
|
||||
ctx,
|
||||
config,
|
||||
backend,
|
||||
@@ -130,261 +132,280 @@ ETLService::makeETLService(
|
||||
monitorProvider,
|
||||
state
|
||||
);
|
||||
} else {
|
||||
ASSERT(std::dynamic_pointer_cast<etl::LoadBalancer>(balancer), "LoadBalancer type must be etl::LoadBalancer");
|
||||
ret = std::make_shared<etl::ETLService>(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
}
|
||||
|
||||
// inject networkID into subscriptions, as transaction feed require it to inject CTID in response
|
||||
if (auto const state = ret->getETLState(); state)
|
||||
subscriptions->setNetworkID(state->networkID);
|
||||
if (auto const etlState = ret->getETLState(); etlState)
|
||||
subscriptions->setNetworkID(etlState->networkID);
|
||||
|
||||
ret->run();
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Database must be populated when this starts
|
||||
std::optional<uint32_t>
|
||||
ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
{
|
||||
if (finishSequence_ && startSequence > *finishSequence_)
|
||||
return {};
|
||||
|
||||
LOG(log_.debug()) << "Wait for cache containing seq " << startSequence - 1
|
||||
<< " current cache last seq =" << backend_->cache().latestLedgerSequence();
|
||||
backend_->cache().waitUntilCacheContainsSeq(startSequence - 1);
|
||||
|
||||
LOG(log_.debug()) << "Starting etl pipeline";
|
||||
state_.isWriting = true;
|
||||
|
||||
auto const rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
ASSERT(rng.has_value(), "Parent ledger range can't be null");
|
||||
ASSERT(
|
||||
rng->maxSequence >= startSequence - 1,
|
||||
"Got not parent ledger. rnd->maxSequence = {}, startSequence = {}",
|
||||
rng->maxSequence,
|
||||
startSequence
|
||||
);
|
||||
|
||||
auto const begin = std::chrono::system_clock::now();
|
||||
auto extractors = std::vector<std::unique_ptr<ExtractorType>>{};
|
||||
auto pipe = DataPipeType{numExtractors, startSequence};
|
||||
|
||||
for (auto i = 0u; i < numExtractors; ++i) {
|
||||
extractors.push_back(
|
||||
std::make_unique<ExtractorType>(
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_
|
||||
ETLService::ETLService(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::reference_wrapper<util::config::ClioConfigDefinition const> config,
|
||||
std::shared_ptr<data::BackendInterface> backend,
|
||||
std::shared_ptr<LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers,
|
||||
std::shared_ptr<LedgerPublisherInterface> publisher,
|
||||
std::shared_ptr<CacheLoaderInterface> cacheLoader,
|
||||
std::shared_ptr<CacheUpdaterInterface> cacheUpdater,
|
||||
std::shared_ptr<ExtractorInterface> extractor,
|
||||
std::shared_ptr<LoaderInterface> loader,
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver,
|
||||
std::shared_ptr<TaskManagerProviderInterface> taskManagerProvider,
|
||||
std::shared_ptr<MonitorProviderInterface> monitorProvider,
|
||||
std::shared_ptr<SystemState> state
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
auto transformer =
|
||||
TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, startSequence, state_};
|
||||
transformer.waitTillFinished(); // suspend current thread until exit condition is met
|
||||
pipe.cleanup(); // TODO: this should probably happen automatically using destructor
|
||||
|
||||
// wait for all of the extractors to stop
|
||||
for (auto& t : extractors)
|
||||
t->waitTillFinished();
|
||||
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
|
||||
static constexpr auto kNANOSECONDS_PER_SECOND = 1'000'000'000.0;
|
||||
LOG(log_.debug()) << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / kNANOSECONDS_PER_SECOND;
|
||||
|
||||
state_.isWriting = false;
|
||||
|
||||
LOG(log_.debug()) << "Stopping etl pipeline";
|
||||
return lastPublishedSeq;
|
||||
}
|
||||
|
||||
// Main loop of ETL.
|
||||
// The software begins monitoring the ledgers that are validated by the network.
|
||||
// The member networkValidatedLedgers_ keeps track of the sequences of ledgers validated by the network.
|
||||
// Whenever a ledger is validated by the network, the software looks for that ledger in the database. Once the ledger is
|
||||
// found in the database, the software publishes that ledger to the ledgers stream. If a network validated ledger is not
|
||||
// found in the database after a certain amount of time, then the software attempts to take over responsibility of the
|
||||
// ETL process, where it writes new ledgers to the database. The software will relinquish control of the ETL process if
|
||||
// it detects that another process has taken over ETL.
|
||||
void
|
||||
ETLService::monitor()
|
||||
: ctx_(std::move(ctx))
|
||||
, config_(config)
|
||||
, backend_(std::move(backend))
|
||||
, balancer_(std::move(balancer))
|
||||
, ledgers_(std::move(ledgers))
|
||||
, publisher_(std::move(publisher))
|
||||
, cacheLoader_(std::move(cacheLoader))
|
||||
, cacheUpdater_(std::move(cacheUpdater))
|
||||
, extractor_(std::move(extractor))
|
||||
, loader_(std::move(loader))
|
||||
, initialLoadObserver_(std::move(initialLoadObserver))
|
||||
, taskManagerProvider_(std::move(taskManagerProvider))
|
||||
, monitorProvider_(std::move(monitorProvider))
|
||||
, state_(std::move(state))
|
||||
, startSequence_(config.get().maybeValue<uint32_t>("start_sequence"))
|
||||
, finishSequence_(config.get().maybeValue<uint32_t>("finish_sequence"))
|
||||
{
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng) {
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
std::optional<ripple::LedgerHeader> ledger;
|
||||
ASSERT(not state_->isWriting, "ETL should never start in writer mode");
|
||||
|
||||
try {
|
||||
if (startSequence_) {
|
||||
LOG(log_.info()) << "ledger sequence specified in config. "
|
||||
<< "Will begin ETL process starting with ledger " << *startSequence_;
|
||||
ledger = ledgerLoader_.loadInitialLedger(*startSequence_);
|
||||
} else {
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> mostRecentValidated = networkValidatedLedgers_->getMostRecent();
|
||||
if (startSequence_.has_value())
|
||||
LOG(log_.info()) << "Start sequence: " << *startSequence_;
|
||||
|
||||
if (mostRecentValidated) {
|
||||
LOG(log_.info()) << "Ledger " << *mostRecentValidated << " has been validated. Downloading...";
|
||||
ledger = ledgerLoader_.loadInitialLedger(*mostRecentValidated);
|
||||
} else {
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. "
|
||||
"Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
|
||||
amendmentBlockHandler_.notifyAmendmentBlocked();
|
||||
return;
|
||||
if (finishSequence_.has_value())
|
||||
LOG(log_.info()) << "Finish sequence: " << *finishSequence_;
|
||||
|
||||
LOG(log_.info()) << "Starting in " << (state_->isStrictReadonly ? "STRICT READONLY MODE" : "WRITE MODE");
|
||||
}
|
||||
|
||||
if (ledger) {
|
||||
rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
} else {
|
||||
LOG(log_.error()) << "Failed to load initial ledger. Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (startSequence_)
|
||||
LOG(log_.warn()) << "start sequence specified but db is already populated";
|
||||
|
||||
LOG(log_.info()) << "Database already populated. Picking up from the tip of history";
|
||||
cacheLoader_.load(rng->maxSequence);
|
||||
}
|
||||
|
||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||
uint32_t nextSequence = rng->maxSequence + 1;
|
||||
|
||||
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
||||
|
||||
while (not isStopping()) {
|
||||
nextSequence = publishNextSequence(nextSequence);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t
|
||||
ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
ETLService::~ETLService()
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence) {
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
} else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::kMILLISECONDS_PER_SECOND)) {
|
||||
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
// Attempt to take over responsibility of ETL writer after 10 failed
|
||||
// attempts to publish the ledger. publishLedger() fails if the
|
||||
// ledger that has been validated by the network is not found in the
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t kTIMEOUT_SECONDS = 10;
|
||||
bool const success = ledgerPublisher_.publish(nextSequence, kTIMEOUT_SECONDS);
|
||||
|
||||
if (!success) {
|
||||
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
|
||||
// returns the most recent sequence published. empty optional if no sequence was published
|
||||
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
|
||||
LOG(log_.info()) << "Aborting ETL. Falling back to publishing";
|
||||
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
} else {
|
||||
++nextSequence;
|
||||
}
|
||||
}
|
||||
return nextSequence;
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::monitorReadOnly()
|
||||
{
|
||||
LOG(log_.debug()) << "Starting reporting in strict read only mode";
|
||||
|
||||
auto const latestSequenceOpt = [this]() -> std::optional<uint32_t> {
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
|
||||
if (!rng) {
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent()) {
|
||||
return net;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return rng->maxSequence;
|
||||
}();
|
||||
|
||||
if (!latestSequenceOpt.has_value()) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t latestSequence = *latestSequenceOpt;
|
||||
|
||||
cacheLoader_.load(latestSequence);
|
||||
latestSequence++;
|
||||
|
||||
while (not isStopping()) {
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence) {
|
||||
ledgerPublisher_.publish(latestSequence, {});
|
||||
latestSequence = latestSequence + 1;
|
||||
} else {
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs
|
||||
// first. Even if we don't hear from rippled, if ledgers are being written to the db, we publish
|
||||
// them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::kMILLISECONDS_PER_SECOND);
|
||||
}
|
||||
}
|
||||
stop();
|
||||
LOG(log_.debug()) << "Destroying ETL";
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::run()
|
||||
{
|
||||
LOG(log_.info()) << "Starting reporting etl";
|
||||
state_.isStopping = false;
|
||||
LOG(log_.info()) << "Running ETL...";
|
||||
|
||||
doWork();
|
||||
mainLoop_.emplace(ctx_.execute([this] {
|
||||
auto const rng = loadInitialLedgerIfNeeded();
|
||||
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> const mostRecentValidated = ledgers_->getMostRecent();
|
||||
|
||||
if (not mostRecentValidated) {
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. "
|
||||
"Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
|
||||
if (not rng.has_value()) {
|
||||
LOG(log_.warn()) << "Initial ledger download got cancelled - stopping ETL service";
|
||||
return;
|
||||
}
|
||||
|
||||
auto nextSequence = rng->maxSequence + 1;
|
||||
if (backend_->cache().latestLedgerSequence() != 0) {
|
||||
nextSequence = backend_->cache().latestLedgerSequence();
|
||||
}
|
||||
|
||||
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
||||
startMonitor(nextSequence);
|
||||
|
||||
// If we are a writer as the result of loading the initial ledger - start loading
|
||||
if (state_->isWriting)
|
||||
startLoading(nextSequence);
|
||||
}));
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::doWork()
|
||||
ETLService::stop()
|
||||
{
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("ETLService worker");
|
||||
LOG(log_.info()) << "Stop called";
|
||||
|
||||
if (state_.isStrictReadonly) {
|
||||
monitorReadOnly();
|
||||
} else {
|
||||
monitor();
|
||||
if (mainLoop_)
|
||||
mainLoop_->wait();
|
||||
if (taskMan_)
|
||||
taskMan_->stop();
|
||||
if (monitor_)
|
||||
monitor_->stop();
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
ETLService::getInfo() const
|
||||
{
|
||||
boost::json::object result;
|
||||
|
||||
result["etl_sources"] = balancer_->toJson();
|
||||
result["is_writer"] = static_cast<int>(state_->isWriting);
|
||||
result["read_only"] = static_cast<int>(state_->isStrictReadonly);
|
||||
auto last = publisher_->getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(publisher_->lastPublishAgeSeconds());
|
||||
return result;
|
||||
}
|
||||
|
||||
bool
|
||||
ETLService::isAmendmentBlocked() const
|
||||
{
|
||||
return state_->isAmendmentBlocked;
|
||||
}
|
||||
|
||||
bool
|
||||
ETLService::isCorruptionDetected() const
|
||||
{
|
||||
return state_->isCorruptionDetected;
|
||||
}
|
||||
|
||||
std::optional<ETLState>
|
||||
ETLService::getETLState() const
|
||||
{
|
||||
return balancer_->getETLState();
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
ETLService::lastCloseAgeSeconds() const
|
||||
{
|
||||
return publisher_->lastCloseAgeSeconds();
|
||||
}
|
||||
|
||||
std::optional<data::LedgerRange>
|
||||
ETLService::loadInitialLedgerIfNeeded()
|
||||
{
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (not rng.has_value()) {
|
||||
ASSERT(
|
||||
not state_->isStrictReadonly,
|
||||
"Database is empty but this node is in strict readonly mode. Can't write initial ledger."
|
||||
);
|
||||
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
state_->isWriting = true; // immediately become writer as the db is empty
|
||||
|
||||
auto const getMostRecent = [this]() {
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
return ledgers_->getMostRecent();
|
||||
};
|
||||
|
||||
if (auto const maybeSeq = startSequence_.or_else(getMostRecent); maybeSeq.has_value()) {
|
||||
auto const seq = *maybeSeq;
|
||||
LOG(log_.info()) << "Starting from sequence " << seq
|
||||
<< ". Initial ledger download and extraction can take a while...";
|
||||
|
||||
auto [ledger, timeDiff] = ::util::timed<std::chrono::duration<double>>([this, seq]() {
|
||||
return extractor_->extractLedgerOnly(seq).and_then(
|
||||
[this, seq](auto&& data) -> std::optional<ripple::LedgerHeader> {
|
||||
// TODO: loadInitialLedger in balancer should be called fetchEdgeKeys or similar
|
||||
auto res = balancer_->loadInitialLedger(seq, *initialLoadObserver_);
|
||||
if (not res.has_value() and res.error() == InitialLedgerLoadError::Cancelled) {
|
||||
LOG(log_.debug()) << "Initial ledger load got cancelled";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
ASSERT(res.has_value(), "Initial ledger retry logic failed");
|
||||
data.edgeKeys = std::move(res).value();
|
||||
|
||||
return loader_->loadInitialLedger(data);
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
if (not ledger.has_value()) {
|
||||
LOG(log_.error()) << "Failed to load initial ledger. Exiting monitor loop";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
ETLService::ETLService(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
)
|
||||
: backend_(backend)
|
||||
, loadBalancer_(balancer)
|
||||
, networkValidatedLedgers_(std::move(ledgers))
|
||||
, cacheLoader_(config, backend, backend->cache())
|
||||
, ledgerFetcher_(backend, balancer)
|
||||
, ledgerLoader_(backend, balancer, ledgerFetcher_, state_)
|
||||
, ledgerPublisher_(ioc, backend, backend->cache(), subscriptions, state_)
|
||||
, amendmentBlockHandler_(ioc, state_)
|
||||
LOG(log_.debug()) << "Time to download and store ledger = " << timeDiff;
|
||||
LOG(log_.info()) << "Finished loadInitialLedger. cache size = " << backend_->cache().size();
|
||||
|
||||
return backend_->hardFetchLedgerRangeNoThrow();
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. "
|
||||
"Exiting monitor loop";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Database already populated. Picking up from the tip of history";
|
||||
if (not backend_->cache().isFull()) {
|
||||
cacheLoader_->load(rng->maxSequence);
|
||||
}
|
||||
|
||||
return rng;
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::startMonitor(uint32_t seq)
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
state_.isStrictReadonly = config.get<bool>("read_only");
|
||||
extractorThreads_ = config.get<uint32_t>("extractor_threads");
|
||||
monitor_ = monitorProvider_->make(ctx_, backend_, ledgers_, seq);
|
||||
|
||||
// This should probably be done in the backend factory but we don't have state available until here
|
||||
backend_->setCorruptionDetector(CorruptionDetector{state_, backend->cache()});
|
||||
monitorNewSeqSubscription_ = monitor_->subscribeToNewSequence([this](uint32_t seq) {
|
||||
LOG(log_.info()) << "ETLService (via Monitor) got new seq from db: " << seq;
|
||||
|
||||
if (state_->writeConflict) {
|
||||
LOG(log_.info()) << "Got a write conflict; Giving up writer seat immediately";
|
||||
giveUpWriter();
|
||||
}
|
||||
|
||||
if (not state_->isWriting) {
|
||||
auto const diff = data::synchronousAndRetryOnTimeout([this, seq](auto yield) {
|
||||
return backend_->fetchLedgerDiff(seq, yield);
|
||||
});
|
||||
|
||||
cacheUpdater_->update(seq, diff);
|
||||
backend_->updateRange(seq);
|
||||
}
|
||||
|
||||
publisher_->publish(seq, {});
|
||||
});
|
||||
|
||||
monitorDbStalledSubscription_ = monitor_->subscribeToDbStalled([this]() {
|
||||
LOG(log_.warn()) << "ETLService received DbStalled signal from Monitor";
|
||||
if (not state_->isStrictReadonly and not state_->isWriting)
|
||||
attemptTakeoverWriter();
|
||||
});
|
||||
|
||||
monitor_->run();
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::startLoading(uint32_t seq)
|
||||
{
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
taskMan_ = taskManagerProvider_->make(ctx_, *monitor_, seq, finishSequence_);
|
||||
|
||||
// FIXME: this legacy name "extractor_threads" is no longer accurate (we have coroutines now)
|
||||
taskMan_->run(config_.get().get<std::size_t>("extractor_threads"));
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::attemptTakeoverWriter()
|
||||
{
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||
|
||||
state_->isWriting = true; // switch to writer
|
||||
LOG(log_.info()) << "Taking over the ETL writer seat";
|
||||
startLoading(rng->maxSequence + 1);
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::giveUpWriter()
|
||||
{
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
state_->isWriting = false;
|
||||
state_->writeConflict = false;
|
||||
taskMan_ = nullptr;
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -20,57 +20,64 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/CacheLoader.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/CacheLoaderInterface.hpp"
|
||||
#include "etl/CacheUpdaterInterface.hpp"
|
||||
#include "etl/ETLServiceInterface.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/ExtractorInterface.hpp"
|
||||
#include "etl/InitialLoadObserverInterface.hpp"
|
||||
#include "etl/LedgerPublisherInterface.hpp"
|
||||
#include "etl/LoadBalancerInterface.hpp"
|
||||
#include "etl/LoaderInterface.hpp"
|
||||
#include "etl/MonitorInterface.hpp"
|
||||
#include "etl/MonitorProviderInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etl/TaskManagerInterface.hpp"
|
||||
#include "etl/TaskManagerProviderInterface.hpp"
|
||||
#include "etl/impl/AmendmentBlockHandler.hpp"
|
||||
#include "etl/impl/ExtractionDataPipe.hpp"
|
||||
#include "etl/impl/Extractor.hpp"
|
||||
#include "etl/impl/CacheUpdater.hpp"
|
||||
#include "etl/impl/Extraction.hpp"
|
||||
#include "etl/impl/LedgerFetcher.hpp"
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etl/impl/LedgerPublisher.hpp"
|
||||
#include "etl/impl/Transformer.hpp"
|
||||
#include "etlng/ETLServiceInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/impl/LedgerPublisher.hpp"
|
||||
#include "etlng/impl/TaskManagerProvider.hpp"
|
||||
#include "etl/impl/Loading.hpp"
|
||||
#include "etl/impl/Registry.hpp"
|
||||
#include "etl/impl/Scheduling.hpp"
|
||||
#include "etl/impl/TaskManager.hpp"
|
||||
#include "etl/impl/ext/Cache.hpp"
|
||||
#include "etl/impl/ext/Core.hpp"
|
||||
#include "etl/impl/ext/NFT.hpp"
|
||||
#include "etl/impl/ext/Successor.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/ledger.pb.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/STTx.h>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
#include <xrpl/protocol/TxMeta.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
struct AccountTransactionsData;
|
||||
struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
|
||||
/**
|
||||
* @brief This namespace contains everything to do with the ETL and ETL sources.
|
||||
*/
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief A tag class to help identify ETLService in templated code.
|
||||
*/
|
||||
struct ETLServiceTag {
|
||||
virtual ~ETLServiceTag() = default;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
concept SomeETLService = std::derived_from<T, ETLServiceTag>;
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for continuously extracting data from a p2p node, and writing that data to the
|
||||
* databases.
|
||||
@@ -84,71 +91,42 @@ concept SomeETLService = std::derived_from<T, ETLServiceTag>;
|
||||
* the others will fall back to monitoring/publishing. In this sense, this class dynamically transitions from monitoring
|
||||
* to writing and from writing to monitoring, based on the activity of other processes running on different machines.
|
||||
*/
|
||||
class ETLService : public etlng::ETLServiceInterface, ETLServiceTag {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using DataPipeType = etl::impl::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheLoaderType = etl::CacheLoader<>;
|
||||
using LedgerFetcherType = etl::impl::LedgerFetcher;
|
||||
using ExtractorType = etl::impl::Extractor<DataPipeType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::impl::LedgerLoader<LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher;
|
||||
using AmendmentBlockHandlerType = etl::impl::AmendmentBlockHandler;
|
||||
using TransformerType =
|
||||
etl::impl::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
|
||||
class ETLService : public ETLServiceInterface {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
util::async::AnyExecutionContext ctx_;
|
||||
std::reference_wrapper<util::config::ClioConfigDefinition const> config_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> loadBalancer_;
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> networkValidatedLedgers_;
|
||||
std::shared_ptr<LoadBalancerInterface> balancer_;
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers_;
|
||||
std::shared_ptr<LedgerPublisherInterface> publisher_;
|
||||
std::shared_ptr<CacheLoaderInterface> cacheLoader_;
|
||||
std::shared_ptr<CacheUpdaterInterface> cacheUpdater_;
|
||||
std::shared_ptr<ExtractorInterface> extractor_;
|
||||
std::shared_ptr<LoaderInterface> loader_;
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver_;
|
||||
std::shared_ptr<TaskManagerProviderInterface> taskManagerProvider_;
|
||||
std::shared_ptr<MonitorProviderInterface> monitorProvider_;
|
||||
std::shared_ptr<SystemState> state_;
|
||||
|
||||
std::uint32_t extractorThreads_ = 1;
|
||||
std::thread worker_;
|
||||
|
||||
CacheLoaderType cacheLoader_;
|
||||
LedgerFetcherType ledgerFetcher_;
|
||||
LedgerLoaderType ledgerLoader_;
|
||||
LedgerPublisherType ledgerPublisher_;
|
||||
AmendmentBlockHandlerType amendmentBlockHandler_;
|
||||
|
||||
SystemState state_;
|
||||
|
||||
size_t numMarkers_ = 2;
|
||||
std::optional<uint32_t> startSequence_;
|
||||
std::optional<uint32_t> finishSequence_;
|
||||
|
||||
std::unique_ptr<MonitorInterface> monitor_;
|
||||
std::unique_ptr<TaskManagerInterface> taskMan_;
|
||||
|
||||
boost::signals2::scoped_connection monitorNewSeqSubscription_;
|
||||
boost::signals2::scoped_connection monitorDbStalledSubscription_;
|
||||
|
||||
std::optional<util::async::AnyOperation<void>> mainLoop_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of ETLService.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param balancer Load balancer to use
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
*/
|
||||
ETLService(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Move constructor is deleted because ETL service shares its fields by reference
|
||||
*/
|
||||
ETLService(ETLService&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief A factory function to spawn new ETLService instances.
|
||||
*
|
||||
* Creates and runs the ETL service.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
* @param ctx Execution context for asynchronous operations
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
@@ -156,182 +134,89 @@ public:
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
* @return A shared pointer to a new instance of ETLService
|
||||
*/
|
||||
static std::shared_ptr<etlng::ETLServiceInterface>
|
||||
static std::shared_ptr<ETLServiceInterface>
|
||||
makeETLService(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<etlng::LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Stops components and joins worker thread.
|
||||
*/
|
||||
~ETLService() override
|
||||
{
|
||||
if (not state_.isStopping)
|
||||
stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stop the ETL service.
|
||||
* @note This method blocks until the ETL service has stopped.
|
||||
*/
|
||||
void
|
||||
stop() override
|
||||
{
|
||||
LOG(log_.info()) << "Stop called";
|
||||
|
||||
state_.isStopping = true;
|
||||
cacheLoader_.stop();
|
||||
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
|
||||
LOG(log_.debug()) << "Joined ETLService worker thread";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time passed since last ledger close, in seconds.
|
||||
* @brief Create an instance of ETLService.
|
||||
*
|
||||
* @return Time passed since last ledger close
|
||||
* @param ctx The execution context for asynchronous operations
|
||||
* @param config The Clio configuration definition
|
||||
* @param backend Interface to the backend database
|
||||
* @param balancer Load balancer for distributing work
|
||||
* @param ledgers Interface for accessing network validated ledgers
|
||||
* @param publisher Interface for publishing ledger data
|
||||
* @param cacheLoader Interface for loading cache data
|
||||
* @param cacheUpdater Interface for updating cache data
|
||||
* @param extractor The extractor to use
|
||||
* @param loader Interface for loading data
|
||||
* @param initialLoadObserver The observer for initial data loading
|
||||
* @param taskManagerProvider The provider of the task manager instance
|
||||
* @param monitorProvider The provider of the monitor instance
|
||||
* @param state System state tracking object
|
||||
*/
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const override
|
||||
{
|
||||
return ledgerPublisher_.lastCloseAgeSeconds();
|
||||
}
|
||||
ETLService(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::reference_wrapper<util::config::ClioConfigDefinition const> config,
|
||||
std::shared_ptr<data::BackendInterface> backend,
|
||||
std::shared_ptr<LoadBalancerInterface> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers,
|
||||
std::shared_ptr<LedgerPublisherInterface> publisher,
|
||||
std::shared_ptr<CacheLoaderInterface> cacheLoader,
|
||||
std::shared_ptr<CacheUpdaterInterface> cacheUpdater,
|
||||
std::shared_ptr<ExtractorInterface> extractor,
|
||||
std::shared_ptr<LoaderInterface> loader,
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver,
|
||||
std::shared_ptr<TaskManagerProviderInterface> taskManagerProvider,
|
||||
std::shared_ptr<MonitorProviderInterface> monitorProvider,
|
||||
std::shared_ptr<SystemState> state
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Check for the amendment blocked state.
|
||||
*
|
||||
* @return true if currently amendment blocked; false otherwise
|
||||
*/
|
||||
bool
|
||||
isAmendmentBlocked() const override
|
||||
{
|
||||
return state_.isAmendmentBlocked;
|
||||
}
|
||||
~ETLService() override;
|
||||
|
||||
/**
|
||||
* @brief Check whether Clio detected DB corruptions.
|
||||
*
|
||||
* @return true if corruption of DB was detected and cache was stopped.
|
||||
*/
|
||||
bool
|
||||
isCorruptionDetected() const override
|
||||
{
|
||||
return state_.isCorruptionDetected;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get state of ETL as a JSON object
|
||||
*
|
||||
* @return The state of ETL as a JSON object
|
||||
*/
|
||||
boost::json::object
|
||||
getInfo() const override
|
||||
{
|
||||
boost::json::object result;
|
||||
|
||||
result["etl_sources"] = loadBalancer_->toJson();
|
||||
result["is_writer"] = static_cast<int>(state_.isWriting);
|
||||
result["read_only"] = static_cast<int>(state_.isStrictReadonly);
|
||||
auto last = ledgerPublisher_.getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(ledgerPublisher_.lastPublishAgeSeconds());
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the etl nodes' state
|
||||
* @return The etl nodes' state, nullopt if etl nodes are not connected
|
||||
*/
|
||||
std::optional<etl::ETLState>
|
||||
getETLState() const noexcept override
|
||||
{
|
||||
return loadBalancer_->getETLState();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Start all components to run ETL service.
|
||||
*/
|
||||
void
|
||||
run() override;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Run the ETL pipeline.
|
||||
*
|
||||
* Extracts ledgers and writes them to the database, until a write conflict occurs (or the server shuts down).
|
||||
* @note database must already be populated when this function is called
|
||||
*
|
||||
* @param startSequence the first ledger to extract
|
||||
* @param numExtractors number of extractors to use
|
||||
* @return The last ledger written to the database, if any
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
runETLPipeline(uint32_t startSequence, uint32_t numExtractors);
|
||||
|
||||
/**
|
||||
* @brief Monitor the network for newly validated ledgers.
|
||||
*
|
||||
* Also monitor the database to see if any process is writing those ledgers.
|
||||
* This function is called when the application starts, and will only return when the application is shutting down.
|
||||
* If the software detects the database is empty, this function will call loadInitialLedger(). If the software
|
||||
* detects ledgers are not being written, this function calls runETLPipeline(). Otherwise, this function publishes
|
||||
* ledgers as they are written to the database.
|
||||
*/
|
||||
void
|
||||
monitor();
|
||||
stop() override;
|
||||
|
||||
/**
|
||||
* @brief Monitor the network for newly validated ledgers and publish them to the ledgers stream
|
||||
*
|
||||
* @param nextSequence the ledger sequence to publish
|
||||
* @return The next ledger sequence to publish
|
||||
*/
|
||||
uint32_t
|
||||
publishNextSequence(uint32_t nextSequence);
|
||||
boost::json::object
|
||||
getInfo() const override;
|
||||
|
||||
/**
|
||||
* @brief Monitor the database for newly written ledgers.
|
||||
*
|
||||
* Similar to the monitor(), except this function will never call runETLPipeline() or loadInitialLedger().
|
||||
* This function only publishes ledgers as they are written to the database.
|
||||
*/
|
||||
void
|
||||
monitorReadOnly();
|
||||
|
||||
/**
|
||||
* @return true if stopping; false otherwise
|
||||
*/
|
||||
bool
|
||||
isStopping() const
|
||||
{
|
||||
return state_.isStopping;
|
||||
}
|
||||
isAmendmentBlocked() const override;
|
||||
|
||||
bool
|
||||
isCorruptionDetected() const override;
|
||||
|
||||
std::optional<ETLState>
|
||||
getETLState() const override;
|
||||
|
||||
/**
|
||||
* @brief Get the number of markers to use during the initial ledger download.
|
||||
*
|
||||
* This is equivalent to the degree of parallelism during the initial ledger download.
|
||||
*
|
||||
* @return The number of markers
|
||||
*/
|
||||
std::uint32_t
|
||||
getNumMarkers() const
|
||||
{
|
||||
return numMarkers_;
|
||||
}
|
||||
lastCloseAgeSeconds() const override;
|
||||
|
||||
private:
|
||||
std::optional<data::LedgerRange>
|
||||
loadInitialLedgerIfNeeded();
|
||||
|
||||
/**
|
||||
* @brief Spawn the worker thread and start monitoring.
|
||||
*/
|
||||
void
|
||||
doWork();
|
||||
startMonitor(uint32_t seq);
|
||||
|
||||
void
|
||||
startLoading(uint32_t seq);
|
||||
|
||||
void
|
||||
attemptTakeoverWriter();
|
||||
|
||||
void
|
||||
giveUpWriter();
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This is a base class for any ETL service implementations.
|
||||
@@ -77,7 +77,7 @@ struct ETLServiceInterface {
|
||||
* @brief Get the etl nodes' state
|
||||
* @return The etl nodes' state, nullopt if etl nodes are not connected
|
||||
*/
|
||||
[[nodiscard]] virtual std::optional<etl::ETLState>
|
||||
[[nodiscard]] virtual std::optional<ETLState>
|
||||
getETLState() const = 0;
|
||||
|
||||
/**
|
||||
@@ -89,4 +89,4 @@ struct ETLServiceInterface {
|
||||
lastCloseAgeSeconds() const = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
} // namespace etl
|
||||
@@ -19,12 +19,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etl/Models.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief An interface for the Extractor
|
||||
@@ -51,4 +51,4 @@ struct ExtractorInterface {
|
||||
extractLedgerOnly(uint32_t seq) = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
} // namespace etl
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etl/Models.hpp"
|
||||
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng {
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief The interface for observing the initial ledger load
|
||||
@@ -51,4 +51,4 @@ struct InitialLoadObserverInterface {
|
||||
) = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
} // namespace etl
|
||||
@@ -23,7 +23,7 @@
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief The interface of a scheduler for the extraction process
|
||||
@@ -71,4 +71,4 @@ struct LedgerPublisherInterface {
|
||||
lastPublishAgeSeconds() const = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
} // namespace etl
|
||||
@@ -21,9 +21,10 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/InitialLoadObserverInterface.hpp"
|
||||
#include "etl/LoadBalancerInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
@@ -64,7 +65,7 @@ using util::prometheus::Labels;
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::shared_ptr<etlng::LoadBalancerInterface>
|
||||
std::shared_ptr<LoadBalancerInterface>
|
||||
LoadBalancer::makeLoadBalancer(
|
||||
ClioConfigDefinition const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
@@ -158,7 +159,6 @@ LoadBalancer::LoadBalancer(
|
||||
auto source = sourceFactory(
|
||||
*it,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
validatedLedgers,
|
||||
forwardingTimeout,
|
||||
@@ -212,26 +212,32 @@ LoadBalancer::LoadBalancer(
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string>
|
||||
LoadBalancer::loadInitialLedger(uint32_t sequence, std::chrono::steady_clock::duration retryAfter)
|
||||
InitialLedgerLoadResult
|
||||
LoadBalancer::loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
InitialLoadObserverInterface& loadObserver,
|
||||
std::chrono::steady_clock::duration retryAfter
|
||||
)
|
||||
{
|
||||
std::vector<std::string> response;
|
||||
execute(
|
||||
[this, &response, &sequence](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_);
|
||||
InitialLedgerLoadResult response;
|
||||
|
||||
if (!res) {
|
||||
execute(
|
||||
[this, &response, &sequence, &loadObserver](auto& source) {
|
||||
auto res = source->loadInitialLedger(sequence, downloadRanges_, loadObserver);
|
||||
|
||||
if (not res.has_value() and res.error() == InitialLedgerLoadError::Errored) {
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
} else {
|
||||
response = std::move(data);
|
||||
return false; // should retry on error
|
||||
}
|
||||
|
||||
return res;
|
||||
response = std::move(res); // cancelled or data received
|
||||
return true;
|
||||
},
|
||||
sequence,
|
||||
retryAfter
|
||||
);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
@@ -21,13 +21,12 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/InitialLoadObserverInterface.hpp"
|
||||
#include "etl/LoadBalancerInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/Random.hpp"
|
||||
#include "util/ResponseExpirationCache.hpp"
|
||||
@@ -54,7 +53,6 @@
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
@@ -76,7 +74,7 @@ concept SomeLoadBalancer = std::derived_from<T, LoadBalancerTag>;
|
||||
* which ledgers have been validated by the network, and the range of ledgers each etl source has). This class also
|
||||
* allows requests for ledger data to be load balanced across all possible ETL sources.
|
||||
*/
|
||||
class LoadBalancer : public etlng::LoadBalancerInterface, LoadBalancerTag {
|
||||
class LoadBalancer : public LoadBalancerInterface, LoadBalancerTag {
|
||||
public:
|
||||
using RawLedgerObjectType = org::xrpl::rpc::v1::RawLedgerObject;
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
@@ -164,20 +162,6 @@ public:
|
||||
SourceFactory sourceFactory = makeSource
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
*/
|
||||
std::vector<std::string>
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2}
|
||||
) override;
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded or the download is cancelled.
|
||||
@@ -187,16 +171,12 @@ public:
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::expected with ledger edge keys on success, or InitialLedgerLoadError on failure
|
||||
*/
|
||||
etlng::InitialLedgerLoadResult
|
||||
InitialLedgerLoadResult
|
||||
loadInitialLedger(
|
||||
[[maybe_unused]] uint32_t sequence,
|
||||
[[maybe_unused]] etlng::InitialLoadObserverInterface& observer,
|
||||
[[maybe_unused]] std::chrono::steady_clock::duration retryAfter
|
||||
) override
|
||||
{
|
||||
ASSERT(false, "Not available for old ETL");
|
||||
std::unreachable();
|
||||
}
|
||||
uint32_t sequence,
|
||||
InitialLoadObserverInterface& observer,
|
||||
std::chrono::steady_clock::duration retryAfter
|
||||
) override;
|
||||
|
||||
/**
|
||||
* @brief Fetch data for a specific ledger.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user