mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 11:55:51 +00:00
Compare commits
107 Commits
2.2.0-b2
...
release/2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
854749a05e | ||
|
|
f57706be3d | ||
|
|
bb0d912f2b | ||
|
|
d02d6affdb | ||
|
|
0054e4b64c | ||
|
|
9fe9e7c9d2 | ||
|
|
2e2740d4c5 | ||
|
|
5004dc4e15 | ||
|
|
665fab183a | ||
|
|
b65ac67d17 | ||
|
|
7b18e28c47 | ||
|
|
4940d463dc | ||
|
|
c795cf371a | ||
|
|
e135aa49d5 | ||
|
|
5ba08b1d26 | ||
|
|
37cd79ceb0 | ||
|
|
1334bd05d9 | ||
|
|
437ea7bf98 | ||
|
|
f9f3bc928e | ||
|
|
aa1f3efda2 | ||
|
|
a6d21c1a02 | ||
|
|
49b80c7ad8 | ||
|
|
56ab943be5 | ||
|
|
9d3b4f0313 | ||
|
|
42c970a2a3 | ||
|
|
1125b09611 | ||
|
|
ce94f0f513 | ||
|
|
d39fb20022 | ||
|
|
967b85ca33 | ||
|
|
55b8134e6d | ||
|
|
66e8a65732 | ||
|
|
067dd72aed | ||
|
|
da5bf5c441 | ||
|
|
ff4bc5b0aa | ||
|
|
c56998477c | ||
|
|
df17b429c5 | ||
|
|
36c6caa7c0 | ||
|
|
642aaf8902 | ||
|
|
99400d74ba | ||
|
|
da10535bc0 | ||
|
|
f74b89cc8d | ||
|
|
d229ff1811 | ||
|
|
3a6390caf5 | ||
|
|
a8c90a31d9 | ||
|
|
531b647b1e | ||
|
|
cbc856b190 | ||
|
|
98ef83d470 | ||
|
|
d5ed0cff77 | ||
|
|
b18d73eef0 | ||
|
|
8f47128424 | ||
|
|
b8cb60b7db | ||
|
|
0dcbbf9afa | ||
|
|
c00342c792 | ||
|
|
82b8316978 | ||
|
|
890e5bb4c2 | ||
|
|
adadd70a05 | ||
|
|
e66cc7759e | ||
|
|
e931f27d3b | ||
|
|
1fe42c88c3 | ||
|
|
6b9c8a12d0 | ||
|
|
3fa1df9117 | ||
|
|
25876aef9b | ||
|
|
e744a5a8a9 | ||
|
|
230212213b | ||
|
|
7fcd3e48bd | ||
|
|
470585461d | ||
|
|
ec05b06370 | ||
|
|
1b7d35b16c | ||
|
|
6ff6956a53 | ||
|
|
dade122c6e | ||
|
|
8095e6893d | ||
|
|
48b0a7690c | ||
|
|
7372442f3a | ||
|
|
36a790d666 | ||
|
|
285dd008de | ||
|
|
332b66dc4f | ||
|
|
231556d850 | ||
|
|
d2439cc8a9 | ||
|
|
03d6b1a3b6 | ||
|
|
ebdcca51a6 | ||
|
|
7e5f94c3fd | ||
|
|
1be4d5186d | ||
|
|
8240508d19 | ||
|
|
03a01e55f9 | ||
|
|
828fea6e30 | ||
|
|
27a422369d | ||
|
|
b7d0fc0200 | ||
|
|
6e8de0b64e | ||
|
|
8e75818b4f | ||
|
|
27db183aff | ||
|
|
d9362311ca | ||
|
|
9d91fddce5 | ||
|
|
6d9446bf87 | ||
|
|
94706bfff9 | ||
|
|
a1243da956 | ||
|
|
8004a0e0ff | ||
|
|
6978431c6a | ||
|
|
8211440711 | ||
|
|
041608b243 | ||
|
|
e83dfcbcc3 | ||
|
|
010538d6fe | ||
|
|
c17cc37c1c | ||
|
|
9fa1740146 | ||
|
|
7312b4af80 | ||
|
|
c3125b4b1c | ||
|
|
43ced3bf9d | ||
|
|
ecb7cdae88 |
26
.clang-tidy
26
.clang-tidy
@@ -4,6 +4,9 @@ Checks: '-*,
|
||||
bugprone-assert-side-effect,
|
||||
bugprone-bad-signal-to-kill-thread,
|
||||
bugprone-bool-pointer-implicit-conversion,
|
||||
bugprone-casting-through-void,
|
||||
bugprone-chained-comparison,
|
||||
bugprone-compare-pointer-to-member-virtual-function,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-dangling-handle,
|
||||
bugprone-dynamic-static-initializers,
|
||||
@@ -11,6 +14,8 @@ Checks: '-*,
|
||||
bugprone-fold-init-type,
|
||||
bugprone-forward-declaration-namespace,
|
||||
bugprone-inaccurate-erase,
|
||||
bugprone-inc-dec-in-conditions,
|
||||
bugprone-incorrect-enable-if,
|
||||
bugprone-incorrect-roundings,
|
||||
bugprone-infinite-loop,
|
||||
bugprone-integer-division,
|
||||
@@ -21,15 +26,16 @@ Checks: '-*,
|
||||
bugprone-misplaced-pointer-arithmetic-in-alloc,
|
||||
bugprone-misplaced-widening-cast,
|
||||
bugprone-move-forwarding-reference,
|
||||
bugprone-multi-level-implicit-pointer-conversion,
|
||||
bugprone-multiple-new-in-one-expression,
|
||||
bugprone-multiple-statement-macro,
|
||||
bugprone-no-escape,
|
||||
bugprone-non-zero-enum-to-bool-conversion,
|
||||
bugprone-optional-value-conversion,
|
||||
bugprone-parent-virtual-call,
|
||||
bugprone-posix-return,
|
||||
bugprone-redundant-branch-condition,
|
||||
bugprone-reserved-identifier,
|
||||
bugprone-unused-return-value,
|
||||
bugprone-shared-ptr-array-mismatch,
|
||||
bugprone-signal-handler,
|
||||
bugprone-signed-char-misuse,
|
||||
@@ -60,16 +66,20 @@ Checks: '-*,
|
||||
bugprone-unhandled-self-assignment,
|
||||
bugprone-unique-ptr-array-mismatch,
|
||||
bugprone-unsafe-functions,
|
||||
bugprone-unused-local-non-trivial-variable,
|
||||
bugprone-unused-raii,
|
||||
bugprone-unused-return-value,
|
||||
bugprone-use-after-move,
|
||||
bugprone-virtual-near-miss,
|
||||
cppcoreguidelines-init-variables,
|
||||
cppcoreguidelines-misleading-capture-default-by-value,
|
||||
cppcoreguidelines-no-suspend-with-lock,
|
||||
cppcoreguidelines-pro-type-member-init,
|
||||
cppcoreguidelines-pro-type-static-cast-downcast,
|
||||
cppcoreguidelines-rvalue-reference-param-not-moved,
|
||||
cppcoreguidelines-use-default-member-init,
|
||||
cppcoreguidelines-virtual-class-destructor,
|
||||
hicpp-ignored-remove-result,
|
||||
llvm-namespace-comment,
|
||||
misc-const-correctness,
|
||||
misc-definitions-in-headers,
|
||||
@@ -91,6 +101,8 @@ Checks: '-*,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-override,
|
||||
modernize-use-starts-ends-with,
|
||||
modernize-use-std-numbers,
|
||||
modernize-use-using,
|
||||
performance-faster-string-find,
|
||||
performance-for-range-copy,
|
||||
@@ -100,6 +112,8 @@ Checks: '-*,
|
||||
performance-move-constructor-init,
|
||||
performance-no-automatic-move,
|
||||
performance-trivially-destructible,
|
||||
readability-avoid-nested-conditional-operator,
|
||||
readability-avoid-return-with-void-value,
|
||||
readability-braces-around-statements,
|
||||
readability-const-return-type,
|
||||
readability-container-contains,
|
||||
@@ -112,9 +126,12 @@ Checks: '-*,
|
||||
readability-make-member-function-const,
|
||||
readability-misleading-indentation,
|
||||
readability-non-const-parameter,
|
||||
readability-redundant-casting,
|
||||
readability-redundant-declaration,
|
||||
readability-redundant-inline-specifier,
|
||||
readability-redundant-member-init,
|
||||
readability-redundant-string-init,
|
||||
readability-reference-to-constructed-temporary,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
@@ -124,9 +141,8 @@ Checks: '-*,
|
||||
CheckOptions:
|
||||
readability-braces-around-statements.ShortStatementLines: 2
|
||||
bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true
|
||||
bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc;::std::expected
|
||||
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*'
|
||||
bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc
|
||||
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*;.*(expected|unexpected).*'
|
||||
|
||||
HeaderFilterRegex: '^.*/(src|unittests)/.*\.(h|hpp)$'
|
||||
HeaderFilterRegex: '^.*/(src|tests)/.*\.(h|hpp)$'
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
|
||||
7
.clangd
7
.clangd
@@ -1,5 +1,10 @@
|
||||
CompileFlags:
|
||||
Add: [-D__cpp_concepts=202002]
|
||||
|
||||
Diagnostics:
|
||||
UnusedIncludes: Strict
|
||||
MissingIncludes: Strict
|
||||
Includes:
|
||||
IgnoreHeader: ".*/(detail|impl)/.*"
|
||||
IgnoreHeader:
|
||||
- ".*/(detail|impl)/.*"
|
||||
- ".*expected.*"
|
||||
|
||||
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.png filter=lfs diff=lfs merge=lfs -text
|
||||
*.svg filter=lfs diff=lfs merge=lfs -text
|
||||
@@ -8,16 +8,16 @@
|
||||
echo "+ Checking code format..."
|
||||
|
||||
# paths to check and re-format
|
||||
sources="src unittests"
|
||||
sources="src tests"
|
||||
formatter="clang-format -i"
|
||||
version=$($formatter --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "17.0.0" > "$version" ]]; then
|
||||
if [[ "18.0.0" > "$version" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 17 of `which clang-format` is required.
|
||||
A minimum of version 18 of `which clang-format` is required.
|
||||
Your version is $version.
|
||||
Please fix paths and run again.
|
||||
-----------------------------------------------------------------------------
|
||||
@@ -59,7 +59,9 @@ function grep_code {
|
||||
grep -l "${1}" ${sources} -r --include \*.hpp --include \*.cpp
|
||||
}
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
GNU_SED=$(sed --version 2>&1 | grep -q 'GNU' && echo true || echo false)
|
||||
|
||||
if [[ "$GNU_SED" == "false" ]]; then # macOS sed
|
||||
# make all includes to be <...> style
|
||||
grep_code '#include ".*"' | xargs sed -i '' -E 's|#include "(.*)"|#include <\1>|g'
|
||||
|
||||
@@ -71,7 +73,7 @@ else
|
||||
grep_code '#include ".*"' | xargs sed -i -E 's|#include "(.*)"|#include <\1>|g'
|
||||
|
||||
# make local includes to be "..." style
|
||||
main_src_dirs=$(find ./src -type d -maxdepth 1 -exec basename {} \; | paste -sd '|' | sed 's/|/\\|/g')
|
||||
main_src_dirs=$(find ./src -maxdepth 1 -type d -exec basename {} \; | paste -sd '|' | sed 's/|/\\|/g')
|
||||
grep_code "#include <\($main_src_dirs\)/.*>" | xargs sed -i -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g"
|
||||
fi
|
||||
|
||||
@@ -83,9 +85,10 @@ first=$(git diff $sources $cmake_files)
|
||||
find $sources -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
|
||||
cmake-format -i $cmake_files
|
||||
second=$(git diff $sources $cmake_files)
|
||||
changes=$(diff <(echo "$first") <(echo "$second") | wc -l | sed -e 's/^[[:space:]]*//')
|
||||
changes=$(diff <(echo "$first") <(echo "$second"))
|
||||
changes_number=$(echo -n "$changes" | wc -l | sed -e 's/^[[:space:]]*//')
|
||||
|
||||
if [ "$changes" != "0" ]; then
|
||||
if [ "$changes_number" != "0" ]; then
|
||||
cat <<\EOF
|
||||
|
||||
WARNING
|
||||
@@ -95,5 +98,8 @@ if [ "$changes" != "0" ]; then
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
if [[ "$1" == "--diff" ]]; then
|
||||
echo "$changes"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
3
.githooks/post-checkout
Executable file
3
.githooks/post-checkout
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
|
||||
git lfs post-checkout "$@"
|
||||
3
.githooks/post-commit
Executable file
3
.githooks/post-commit
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
|
||||
git lfs post-commit "$@"
|
||||
3
.githooks/post-merge
Executable file
3
.githooks/post-merge
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
|
||||
git lfs post-merge "$@"
|
||||
@@ -3,4 +3,6 @@
|
||||
# This script is intended to be run from the root of the repository.
|
||||
|
||||
source .githooks/check-format
|
||||
source .githooks/check-docs
|
||||
#source .githooks/check-docs
|
||||
|
||||
# TODO: Fix Doxygen issue with reference links. See https://github.com/XRPLF/clio/issues/1431
|
||||
|
||||
3
.githooks/pre-push
Executable file
3
.githooks/pre-push
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
|
||||
git lfs pre-push "$@"
|
||||
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -8,6 +8,7 @@ assignees: ''
|
||||
---
|
||||
|
||||
<!-- Please search existing issues to avoid creating duplicates. -->
|
||||
<!-- Kindly refrain from posting any credentials or sensitive information in this issue -->
|
||||
|
||||
## Issue Description
|
||||
<!-- Provide a summary for your issue/bug. -->
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -8,6 +8,7 @@ assignees: ''
|
||||
---
|
||||
|
||||
<!-- Please search existing issues to avoid creating duplicates. -->
|
||||
<!-- Kindly refrain from posting any credentials or sensitive information in this issue -->
|
||||
|
||||
## Summary
|
||||
<!-- Provide a summary to the feature request -->
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/question.md
vendored
1
.github/ISSUE_TEMPLATE/question.md
vendored
@@ -9,6 +9,7 @@ assignees: ''
|
||||
|
||||
<!-- Please search existing issues to avoid creating duplicates. -->
|
||||
<!-- Consider starting a [discussion](https://github.com/XRPLF/clio/discussions) instead. -->
|
||||
<!-- Kindly refrain from posting any credentials or sensitive information in this issue -->
|
||||
|
||||
## Question
|
||||
<!-- Your question -->
|
||||
|
||||
10
.github/actions/code_coverage/action.yml
vendored
10
.github/actions/code_coverage/action.yml
vendored
@@ -6,15 +6,19 @@ runs:
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
build/clio_tests --backend_host=scylladb
|
||||
build/clio_tests
|
||||
|
||||
- name: Run gcovr
|
||||
shell: bash
|
||||
run: |
|
||||
gcovr -e unittests --xml build/coverage_report.xml -j8 --exclude-throw-branches
|
||||
gcovr -e tests \
|
||||
-e src/data/cassandra \
|
||||
-e src/data/CassandraBackend.hpp \
|
||||
-e 'src/data/BackendFactory.*' \
|
||||
--xml build/coverage_report.xml -j8 --exclude-throw-branches
|
||||
|
||||
- name: Archive coverage report
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build/coverage_report.xml
|
||||
|
||||
35
.github/actions/create_issue/action.yml
vendored
Normal file
35
.github/actions/create_issue/action.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Create an issue
|
||||
description: Create an issue
|
||||
inputs:
|
||||
title:
|
||||
description: Issue title
|
||||
required: true
|
||||
body:
|
||||
description: Issue body
|
||||
required: true
|
||||
labels:
|
||||
description: Comma-separated list of labels
|
||||
required: true
|
||||
default: 'bug'
|
||||
assignees:
|
||||
description: Comma-separated list of assignees
|
||||
required: true
|
||||
default: 'cindyyan317,godexsoft,kuznetsss'
|
||||
outputs:
|
||||
created_issue_id:
|
||||
description: Created issue id
|
||||
value: ${{ steps.create_issue.outputs.created_issue }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create an issue
|
||||
id: create_issue
|
||||
shell: bash
|
||||
run: |
|
||||
echo -e '${{ inputs.body }}' > issue.md
|
||||
gh issue create --assignee '${{ inputs.assignees }}' --label '${{ inputs.labels }}' --title '${{ inputs.title }}' --body-file ./issue.md > create_issue.log
|
||||
created_issue=$(cat create_issue.log | sed 's|.*/||')
|
||||
echo "created_issue=$created_issue" >> $GITHUB_OUTPUT
|
||||
rm create_issue.log issue.md
|
||||
|
||||
|
||||
7
.github/actions/generate/action.yml
vendored
7
.github/actions/generate/action.yml
vendored
@@ -16,6 +16,10 @@ inputs:
|
||||
description: Whether conan's coverage option should be on or not
|
||||
required: true
|
||||
default: 'false'
|
||||
static:
|
||||
description: Whether Clio is to be statically linked
|
||||
required: true
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -28,9 +32,10 @@ runs:
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
STATIC_OPTION: "${{ inputs.static == 'true' && 'True' || 'False' }}"
|
||||
run: |
|
||||
cd build
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:tests=True -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:static="${STATIC_OPTION}" -o clio:tests=True -o clio:integration_tests=True -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
|
||||
4
.github/actions/prepare_runner/action.yml
vendored
4
.github/actions/prepare_runner/action.yml
vendored
@@ -12,9 +12,7 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq gh conan@1
|
||||
if ! command -v conan &> /dev/null; then
|
||||
echo "/opt/homebrew/opt/conan@1/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
echo "/opt/homebrew/opt/conan@1/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
|
||||
11
.github/actions/restore_cache/action.yml
vendored
11
.github/actions/restore_cache/action.yml
vendored
@@ -4,6 +4,9 @@ inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
@@ -44,16 +47,16 @@ runs:
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore conan cache
|
||||
uses: actions/cache/restore@v3
|
||||
uses: actions/cache/restore@v4
|
||||
id: conan_cache
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.conan_profile }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v3
|
||||
uses: actions/cache/restore@v4
|
||||
id: ccache_cache
|
||||
if: ${{ env.CCACHE_DISABLE != '1' }}
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
|
||||
11
.github/actions/save_cache/action.yml
vendored
11
.github/actions/save_cache/action.yml
vendored
@@ -4,6 +4,9 @@ inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
required: true
|
||||
@@ -41,16 +44,16 @@ runs:
|
||||
|
||||
- name: Save conan cache
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v3
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-develop-${{ inputs.conan_hash }}
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.conan_profile }}-develop-${{ inputs.conan_hash }}
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
|
||||
uses: actions/cache/save@v3
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
|
||||
|
||||
|
||||
8
.github/actions/setup_conan/action.yml
vendored
8
.github/actions/setup_conan/action.yml
vendored
@@ -1,5 +1,9 @@
|
||||
name: Setup conan
|
||||
description: Setup conan profile and artifactory
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
outputs:
|
||||
conan_profile:
|
||||
description: Created conan profile name
|
||||
@@ -11,7 +15,7 @@ runs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_PROFILE: clio_apple_clang_15
|
||||
CONAN_PROFILE: apple_clang_15
|
||||
id: conan_setup_mac
|
||||
run: |
|
||||
echo "Creating $CONAN_PROFILE conan profile";
|
||||
@@ -27,7 +31,7 @@ runs:
|
||||
shell: bash
|
||||
id: conan_setup_linux
|
||||
run: |
|
||||
echo "created_conan_profile=default" >> $GITHUB_OUTPUT
|
||||
echo "created_conan_profile=${{ inputs.conan_profile }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
|
||||
28
.github/scripts/update-libxrpl-version
vendored
Executable file
28
.github/scripts/update-libxrpl-version
vendored
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Note: This script is intended to be run from the root of the repository.
|
||||
#
|
||||
# This script modifies conanfile.py such that the specified version of libXRPL is used.
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
Version should be passed as first argument to the script.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$1
|
||||
GNU_SED=$(sed --version 2>&1 | grep -q 'GNU' && echo true || echo false)
|
||||
|
||||
echo "+ Updating required libXRPL version to $VERSION"
|
||||
|
||||
if [[ "$GNU_SED" == "false" ]]; then
|
||||
sed -i '' -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/$VERSION'|g" conanfile.py
|
||||
else
|
||||
sed -i -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/$VERSION'|g" conanfile.py
|
||||
fi
|
||||
75
.github/workflows/build.yml
vendored
75
.github/workflows/build.yml
vendored
@@ -13,11 +13,15 @@ jobs:
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
- name: Fix git permissions on Linux
|
||||
shell: bash
|
||||
run: git config --global --add safe.directory $PWD
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run formatters
|
||||
id: run_formatters
|
||||
run: |
|
||||
./.githooks/check-format
|
||||
./.githooks/check-format --diff
|
||||
shell: bash
|
||||
|
||||
check_docs:
|
||||
@@ -35,8 +39,8 @@ jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
needs:
|
||||
- check_format
|
||||
needs:
|
||||
- check_format
|
||||
- check_docs
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -46,27 +50,37 @@ jobs:
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
conan_profile: gcc
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
conan_profile: gcc
|
||||
code_coverage: true
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
conan_profile: clang
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
conan_profile: clang
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: macos14
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
static: false
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
services:
|
||||
scylladb:
|
||||
image: ${{ (matrix.code_coverage) && 'scylladb/scylla' || '' }}
|
||||
options: >-
|
||||
--health-cmd "cqlsh -e 'describe cluster'"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
@@ -84,12 +98,15 @@ jobs:
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: ${{ matrix.conan_profile }}
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
@@ -101,6 +118,7 @@ jobs:
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
static: ${{ matrix.static }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
@@ -116,20 +134,20 @@ jobs:
|
||||
|
||||
- name: Strip tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
run: strip build/clio_tests
|
||||
run: strip build/clio_tests && strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}
|
||||
path: build/clio_tests
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ steps.conan.outputs.conan_profile }}
|
||||
path: build/clio_*tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
@@ -142,6 +160,7 @@ jobs:
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
@@ -166,7 +185,21 @@ jobs:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
conan_profile: clang
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
conan_profile: clang
|
||||
build_type: Debug
|
||||
- os: macos14
|
||||
conan_profile: apple_clang_15
|
||||
build_type: Release
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
@@ -175,10 +208,10 @@ jobs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
./clio_tests
|
||||
|
||||
91
.github/workflows/check_libxrpl.yml
vendored
Normal file
91
.github/workflows/check_libxrpl.yml
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
name: Check new libXRPL
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [check_libxrpl]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
||||
runs-on: [self-hosted, heavy]
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Update libXRPL version requirement
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/update-libxrpl-version ${{ github.event.client_payload.version }}
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: gcc
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: Release
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_libxrpl-${{ github.event.client_payload.version }}
|
||||
path: build/clio_tests
|
||||
|
||||
run_tests:
|
||||
name: Run tests
|
||||
needs: build
|
||||
runs-on: [self-hosted, heavy]
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_libxrpl-${{ github.event.client_payload.version }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
|
||||
create_issue_on_failure:
|
||||
name: Create an issue on failure
|
||||
needs: [build, run_tests]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') }}
|
||||
runs-on: ubuntu-20.04
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Create an issue
|
||||
uses: ./.github/actions/create_issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
labels: 'compatibility,bug'
|
||||
title: 'Proposed libXRPL check failed'
|
||||
body: >
|
||||
Clio build or tests failed against `libXRPL ${{ github.event.client_payload.version }}`.
|
||||
|
||||
Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
22
.github/workflows/clang-tidy.yml
vendored
22
.github/workflows/clang-tidy.yml
vendored
@@ -33,6 +33,8 @@ jobs:
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: clang
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
@@ -40,6 +42,7 @@ jobs:
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
@@ -57,7 +60,7 @@ jobs:
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-17 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
|
||||
run-clang-tidy-18 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
|
||||
|
||||
- name: Check format
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
@@ -76,16 +79,15 @@ jobs:
|
||||
- name: Create an issue
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
id: create_issue
|
||||
shell: bash
|
||||
uses: ./.github/actions/create_issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo -e 'Clang-tidy found issues in the code:\n' > issue.md
|
||||
echo -e "List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/" >> issue.md
|
||||
gh issue create --assignee 'cindyyan317,godexsoft,kuznetsss' --label bug --title 'Clang-tidy found bugs in code🐛' --body-file ./issue.md > create_issue.log
|
||||
created_issue=$(cat create_issue.log | sed 's|.*/||')
|
||||
echo "created_issue=$created_issue" >> $GITHUB_OUTPUT
|
||||
rm create_issue.log issue.md
|
||||
with:
|
||||
title: 'Clang-tidy found bugs in code 🐛'
|
||||
body: >
|
||||
Clang-tidy found issues in the code:
|
||||
|
||||
List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
|
||||
- uses: crazy-max/ghaction-import-gpg@v5
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
@@ -108,7 +110,7 @@ jobs:
|
||||
branch-suffix: timestamp
|
||||
delete-branch: true
|
||||
title: "[CI] clang-tidy auto fixes"
|
||||
body: "Fixes #${{ steps.create_issue.outputs.created_issue }}. Please review and commit clang-tidy fixes."
|
||||
body: "Fixes #${{ steps.create_issue.outputs.created_issue_id }}. Please review and commit clang-tidy fixes."
|
||||
reviewers: "cindyyan317,godexsoft,kuznetsss"
|
||||
|
||||
- name: Fail the job
|
||||
|
||||
10
.github/workflows/docs.yml
vendored
10
.github/workflows/docs.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Documentation
|
||||
on:
|
||||
push:
|
||||
branches: [release/*, develop]
|
||||
branches: [develop]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -24,7 +24,9 @@ jobs:
|
||||
image: rippleci/clio_ci:latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
- name: Build docs
|
||||
run: |
|
||||
@@ -32,13 +34,13 @@ jobs:
|
||||
cmake ../docs && cmake --build . --target docs
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v3
|
||||
uses: actions/configure-pages@v4
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: build_docs/html
|
||||
name: docs-develop # TODO: use x.y.z for `release/x.y.z` branches and `develop` for latest dev docs
|
||||
name: docs-develop
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
|
||||
64
.github/workflows/nightly.yml
vendored
64
.github/workflows/nightly.yml
vendored
@@ -41,6 +41,8 @@ jobs:
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
with:
|
||||
conan_profile: gcc
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
@@ -53,13 +55,13 @@ jobs:
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
run: strip build/clio_tests && strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_tests
|
||||
path: build/clio_*tests
|
||||
|
||||
- name: Compress clio_server
|
||||
shell: bash
|
||||
@@ -68,7 +70,7 @@ jobs:
|
||||
tar czf ./clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz ./clio_server
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz
|
||||
@@ -82,25 +84,50 @@ jobs:
|
||||
include:
|
||||
- os: macos14
|
||||
build_type: Release
|
||||
integration_tests: false
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
integration_tests: true
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
integration_tests: true
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
services:
|
||||
scylladb:
|
||||
image: ${{ (matrix.integration_tests) && 'scylladb/scylla' || '' }}
|
||||
options: >-
|
||||
--health-cmd "cqlsh -e 'describe cluster'"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@1.0
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
./clio_tests
|
||||
|
||||
# To be enabled back once docker in mac runner arrives
|
||||
# https://github.com/XRPLF/clio/issues/1400
|
||||
- name: Run clio_integration_tests
|
||||
if: matrix.integration_tests
|
||||
run: |
|
||||
chmod +x ./clio_integration_tests
|
||||
./clio_integration_tests --backend_host=scylladb
|
||||
|
||||
nightly_release:
|
||||
needs: run_tests
|
||||
@@ -113,7 +140,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: nightly_release
|
||||
|
||||
@@ -122,7 +149,7 @@ jobs:
|
||||
run: |
|
||||
cp ${{ github.workspace }}/.github/workflows/nightly_notes.md "${RUNNER_TEMP}/nightly_notes.md"
|
||||
cd nightly_release
|
||||
rm -r clio_tests*
|
||||
rm -r clio_*tests*
|
||||
for d in $(ls); do
|
||||
archive_name=$(ls $d)
|
||||
mv ${d}/${archive_name} ./
|
||||
@@ -144,3 +171,24 @@ jobs:
|
||||
gh release create nightly --prerelease --title "Clio development (nightly) build" \
|
||||
--target $GITHUB_SHA --notes-file "${RUNNER_TEMP}/nightly_notes.md" \
|
||||
./nightly_release/clio_server*
|
||||
|
||||
create_issue_on_failure:
|
||||
needs: [build, run_tests, nightly_release]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') }}
|
||||
runs-on: ubuntu-20.04
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Create an issue
|
||||
uses: ./.github/actions/create_issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
title: 'Nightly release failed 🌙'
|
||||
body: >
|
||||
Nightly release failed:
|
||||
|
||||
Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
|
||||
15
.github/workflows/update_docker_ci.yml
vendored
15
.github/workflows/update_docker_ci.yml
vendored
@@ -1,18 +1,25 @@
|
||||
name: Update CI docker image
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'docker/ci/**'
|
||||
- 'docker/compilers/**'
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- 'docker/ci/**'
|
||||
- 'docker/ci/**' # CI image must update when either its dockerfile changes
|
||||
- 'docker/compilers/**' # or any compilers changed and were pushed by hand
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build_and_push:
|
||||
name: Build and push docker image
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: [self-hosted, heavy]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
@@ -27,7 +34,7 @@ jobs:
|
||||
images: rippleci/clio_ci
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=gcc_11
|
||||
type=raw,value=gcc_12_clang_16
|
||||
type=raw,value=${{ env.GITHUB_SHA }}
|
||||
|
||||
- name: Build and push
|
||||
@@ -35,6 +42,6 @@ jobs:
|
||||
with:
|
||||
context: ${{ github.workspace }}/docker/ci
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
|
||||
|
||||
6
.github/workflows/upload_coverage_report.yml
vendored
6
.github/workflows/upload_coverage_report.yml
vendored
@@ -16,16 +16,16 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download report artifact
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build
|
||||
|
||||
- name: Upload coverage report
|
||||
if: ${{ hashFiles('build/coverage_report.xml') != '' }}
|
||||
uses: wandalen/wretry.action@v1.3.0
|
||||
uses: wandalen/wretry.action@v1.4.10
|
||||
with:
|
||||
action: codecov/codecov-action@v3
|
||||
action: codecov/codecov-action@v4
|
||||
with: |
|
||||
files: build/coverage_report.xml
|
||||
fail_ci_if_error: false
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 3.16.3)
|
||||
cmake_minimum_required(VERSION 3.20)
|
||||
|
||||
set(CMAKE_PROJECT_INCLUDE_BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/cmake/ClioVersion.cmake)
|
||||
|
||||
@@ -8,12 +8,14 @@ project(clio VERSION ${CLIO_VERSION} HOMEPAGE_URL "https://github.com/XRPLF/clio
|
||||
|
||||
# =========================== Options ====================================== #
|
||||
option(verbose "Verbose build" FALSE)
|
||||
option(tests "Build tests" FALSE)
|
||||
option(tests "Build unit tests" FALSE)
|
||||
option(integration_tests "Build integration tests" FALSE)
|
||||
option(benchmark "Build benchmarks" FALSE)
|
||||
option(docs "Generate doxygen docs" FALSE)
|
||||
option(coverage "Build test coverage report" FALSE)
|
||||
option(packaging "Create distribution packages" FALSE)
|
||||
option(lint "Run clang-tidy checks during compilation" FALSE)
|
||||
option(static "Statically linked Clio" FALSE)
|
||||
# ========================================================================== #
|
||||
set(san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
@@ -28,17 +30,9 @@ include(CheckCXXCompilerFlag)
|
||||
include(ClangTidy)
|
||||
|
||||
add_library(clio_options INTERFACE)
|
||||
target_compile_features(clio_options INTERFACE cxx_std_23) # Clio needs c++23 but deps can remain c++20 for now
|
||||
target_include_directories(clio_options INTERFACE ${CMAKE_SOURCE_DIR}/src)
|
||||
|
||||
# Set coverage build options
|
||||
if (coverage)
|
||||
if (NOT tests)
|
||||
message(FATAL_ERROR "Coverage requires tests to be enabled")
|
||||
endif ()
|
||||
include(CodeCoverage)
|
||||
append_coverage_compiler_flags_to_target(clio_options INTERFACE)
|
||||
endif ()
|
||||
|
||||
if (verbose)
|
||||
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
endif ()
|
||||
@@ -63,10 +57,7 @@ include(deps/cassandra)
|
||||
include(deps/libbacktrace)
|
||||
|
||||
add_subdirectory(src)
|
||||
|
||||
if (tests)
|
||||
add_subdirectory(unittests)
|
||||
endif ()
|
||||
add_subdirectory(tests)
|
||||
|
||||
if (benchmark)
|
||||
add_subdirectory(benchmarks)
|
||||
|
||||
@@ -6,7 +6,7 @@ To contribute, please:
|
||||
2. Create a new branch on which to commit/push your changes.
|
||||
3. Write and test your code.
|
||||
4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate.
|
||||
5. Where applicable, write test cases for your code and include those in `unittests`.
|
||||
5. Where applicable, write test cases for your code and include those in the relevant subfolder under `tests`.
|
||||
6. Ensure your code passes automated checks (e.g. clang-format)
|
||||
7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change). See below for more details.
|
||||
8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template.
|
||||
@@ -21,7 +21,7 @@ git config --local core.hooksPath .githooks
|
||||
```
|
||||
|
||||
## Git hooks dependencies
|
||||
The pre-commit hook requires `clang-format >= 17.0.0` and `cmake-format` to be installed on your machine.
|
||||
The pre-commit hook requires `clang-format >= 18.0.0` and `cmake-format` to be installed on your machine.
|
||||
`clang-format` can be installed using `brew` on macOS and default package manager on Linux.
|
||||
`cmake-format` can be installed using `pip`.
|
||||
The hook will also attempt to automatically use `doxygen` to verify that everything public in the codebase is covered by doc comments. If `doxygen` is not installed, the hook will raise a warning suggesting to install `doxygen` for future commits.
|
||||
@@ -102,7 +102,7 @@ The button for that is near the bottom of the PR's page on GitHub.
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
|
||||
|
||||
## Formatting
|
||||
Code must conform to `clang-format` version 17, unless the result would be unreasonably difficult to read or maintain.
|
||||
Code must conform to `clang-format` version 18, unless the result would be unreasonably difficult to read or maintain.
|
||||
In most cases the pre-commit hook will take care of formatting and will fix any issues automatically.
|
||||
To manually format your code, use `clang-format -i <your changed files>` for C++ files and `cmake-format -i <your changed files>` for CMake files.
|
||||
|
||||
|
||||
@@ -36,10 +36,12 @@ Below are some useful docs to learn more about Clio.
|
||||
- [How to configure Clio and rippled](./docs/configure-clio.md)
|
||||
- [How to run Clio](./docs/run-clio.md)
|
||||
- [Logging](./docs/logging.md)
|
||||
- [Troubleshooting guide](./docs/trouble_shooting.md)
|
||||
|
||||
**General reference material:**
|
||||
|
||||
- [API reference](https://xrpl.org/http-websocket-apis.html)
|
||||
- [Developer docs](https://xrplf.github.io/clio)
|
||||
- [Clio documentation](https://xrpl.org/the-clio-server.html#the-clio-server)
|
||||
|
||||
## 🆘 Help
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
|
||||
message(FATAL_ERROR "Clang 14+ required for building clio")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
|
||||
message(FATAL_ERROR "Clang 16+ required for building clio")
|
||||
endif ()
|
||||
set(is_clang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
|
||||
message(FATAL_ERROR "AppleClang 14+ required for building clio")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15)
|
||||
message(FATAL_ERROR "AppleClang 15+ required for building clio")
|
||||
endif ()
|
||||
set(is_appleclang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
|
||||
message(FATAL_ERROR "GCC 11+ required for building clio")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12)
|
||||
message(FATAL_ERROR "GCC 12+ required for building clio")
|
||||
endif ()
|
||||
set(is_gcc TRUE)
|
||||
else ()
|
||||
message(FATAL_ERROR "Supported compilers: AppleClang 14+, Clang 14+, GCC 11+")
|
||||
message(FATAL_ERROR "Supported compilers: AppleClang 15+, Clang 16+, GCC 12+")
|
||||
endif ()
|
||||
|
||||
if (san)
|
||||
|
||||
@@ -8,7 +8,7 @@ if (lint)
|
||||
endif ()
|
||||
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||
else ()
|
||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-17" "clang-tidy" REQUIRED)
|
||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-18" "clang-tidy" REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (NOT _CLANG_TIDY_BIN)
|
||||
|
||||
@@ -22,7 +22,11 @@ endif ()
|
||||
|
||||
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name
|
||||
# YYYYMMDDHMS-<branch>-<git-rev>
|
||||
execute_process(COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd)
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE DATE
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
set(CLIO_VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
set(DOC_CLIO_VERSION "develop")
|
||||
else ()
|
||||
@@ -31,7 +35,7 @@ else ()
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE CLIO_TAG_VERSION
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
set(CLIO_VERSION "${CLIO_TAG_VERSION}-${REV}")
|
||||
set(CLIO_VERSION "${CLIO_TAG_VERSION}")
|
||||
set(DOC_CLIO_VERSION "${CLIO_TAG_VERSION}")
|
||||
endif ()
|
||||
|
||||
|
||||
@@ -17,6 +17,13 @@ set(COMPILER_FLAGS
|
||||
-pedantic
|
||||
-Wpedantic
|
||||
-Wunused
|
||||
# FIXME: The following bunch are needed for gcc12 atm.
|
||||
-Wno-missing-requires
|
||||
-Wno-restrict
|
||||
-Wno-null-dereference
|
||||
-Wno-maybe-uninitialized
|
||||
-Wno-unknown-warning-option # and this to work with clang
|
||||
# TODO: Address these and others in https://github.com/XRPLF/clio/issues/1273
|
||||
)
|
||||
|
||||
# TODO: reenable when we change CI #884 if (is_gcc AND NOT lint) list(APPEND COMPILER_FLAGS -Wduplicated-branches
|
||||
|
||||
28
conanfile.py
28
conanfile.py
@@ -1,7 +1,6 @@
|
||||
from conan import ConanFile
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
|
||||
|
||||
class Clio(ConanFile):
|
||||
name = 'clio'
|
||||
license = 'ISC'
|
||||
@@ -10,31 +9,35 @@ class Clio(ConanFile):
|
||||
description = 'Clio RPC server'
|
||||
settings = 'os', 'compiler', 'build_type', 'arch'
|
||||
options = {
|
||||
'fPIC': [True, False],
|
||||
'static': [True, False], # static linkage
|
||||
'fPIC': [True, False], # unused?
|
||||
'verbose': [True, False],
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'benchmark': [True, False], # build benchmarks; create `clio_benchmarks` binary
|
||||
'docs': [True, False], # doxygen API docs; create custom target 'docs'
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'integration_tests': [True, False], # build integration tests; create `clio_integration_tests` binary
|
||||
'benchmark': [True, False], # build benchmarks; create `clio_benchmarks` binary
|
||||
'docs': [True, False], # doxygen API docs; create custom target 'docs'
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.12',
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/2.2.0-b1',
|
||||
'xrpl/2.2.0',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
|
||||
default_options = {
|
||||
'static': False,
|
||||
'fPIC': True,
|
||||
'verbose': False,
|
||||
'tests': False,
|
||||
'integration_tests': False,
|
||||
'benchmark': False,
|
||||
'packaging': False,
|
||||
'coverage': False,
|
||||
@@ -42,6 +45,7 @@ class Clio(ConanFile):
|
||||
'docs': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
'xrpl/*:rocksdb': False,
|
||||
'cassandra-cpp-driver/*:shared': False,
|
||||
'date/*:header_only': True,
|
||||
'grpc/*:shared': False,
|
||||
@@ -60,7 +64,7 @@ class Clio(ConanFile):
|
||||
)
|
||||
|
||||
def requirements(self):
|
||||
if self.options.tests:
|
||||
if self.options.tests or self.options.integration_tests:
|
||||
self.requires('gtest/1.14.0')
|
||||
if self.options.benchmark:
|
||||
self.requires('benchmark/1.8.3')
|
||||
@@ -79,7 +83,9 @@ class Clio(ConanFile):
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
tc.variables['verbose'] = self.options.verbose
|
||||
tc.variables['static'] = self.options.static
|
||||
tc.variables['tests'] = self.options.tests
|
||||
tc.variables['integration_tests'] = self.options.integration_tests
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['lint'] = self.options.lint
|
||||
tc.variables['docs'] = self.options.docs
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
# FROM centos:7 as deps
|
||||
FROM centos:7 as build
|
||||
|
||||
ENV CLIO_DIR=/opt/clio/
|
||||
# ENV OPENSSL_DIR=/opt/openssl
|
||||
|
||||
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
|
||||
RUN yum install -y devtoolset-11
|
||||
ENV version=3.16
|
||||
ENV build=3
|
||||
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
|
||||
COPY docker/shared/install_cmake.sh /install_cmake.sh
|
||||
RUN /install_cmake.sh 3.16.3 /usr/local
|
||||
RUN source /opt/rh/devtoolset-11/enable
|
||||
WORKDIR /tmp
|
||||
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
|
||||
COPY docker/centos/build_git_centos7.sh build_git_centos7.sh
|
||||
|
||||
RUN ./build_git_centos7.sh
|
||||
RUN git clone https://github.com/openssl/openssl
|
||||
WORKDIR /tmp/openssl
|
||||
RUN git checkout OpenSSL_1_1_1q
|
||||
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
|
||||
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
|
||||
make -j $(nproc) && \
|
||||
make install_sw
|
||||
WORKDIR /tmp
|
||||
# FROM centos:7 as build
|
||||
|
||||
RUN git clone https://github.com/xrplf/clio.git
|
||||
COPY docker/shared/build_boost.sh build_boost.sh
|
||||
ENV OPENSSL_ROOT=/opt/local/openssl
|
||||
ENV BOOST_ROOT=/boost
|
||||
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
|
||||
RUN yum install -y bison flex
|
||||
RUN yum install -y rpmdevtools rpmlint
|
||||
RUN source /opt/rh/devtoolset-11/enable && cd /tmp/clio && \
|
||||
cmake -B build -DBUILD_TESTS=1 && \
|
||||
cmake --build build --parallel $(nproc)
|
||||
RUN mkdir output
|
||||
RUN strip clio/build/clio_server && strip clio/build/clio_tests
|
||||
RUN cp clio/build/clio_tests output/ && cp clio/build/clio_server output/
|
||||
RUN cp clio/docs/examples/config/example-config.json output/example-config.json
|
||||
|
||||
FROM centos:7
|
||||
COPY --from=build /tmp/output /clio
|
||||
RUN mkdir -p /opt/clio/etc && mv /clio/docs/examples/config/example-config.json /opt/clio/etc/config.json
|
||||
|
||||
CMD ["/clio/clio_server", "/opt/clio/etc/config.json"]
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
GIT_VERSION="2.37.1"
|
||||
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
|
||||
tar zxvf git-${GIT_VERSION}.tar.gz
|
||||
cd git-${GIT_VERSION}
|
||||
|
||||
yum install -y centos-release-scl epel-release
|
||||
yum update -y
|
||||
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
|
||||
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
make configure
|
||||
./configure
|
||||
make git -j$(nproc)
|
||||
make install git
|
||||
git --version | cut -d ' ' -f3
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
@@ -1,42 +1,46 @@
|
||||
FROM ubuntu:focal
|
||||
FROM rippleci/clio_clang:16
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
USER root
|
||||
WORKDIR /root/
|
||||
WORKDIR /root
|
||||
|
||||
ENV GCC_VERSION=11 \
|
||||
CCACHE_VERSION=4.8.3 \
|
||||
LLVM_TOOLS_VERSION=17 \
|
||||
ENV CCACHE_VERSION=4.8.3 \
|
||||
LLVM_TOOLS_VERSION=18 \
|
||||
GH_VERSION=2.40.0 \
|
||||
DOXYGEN_VERSION=1.10.0
|
||||
|
||||
# Add repositories
|
||||
RUN apt-get -qq update \
|
||||
&& apt-get -qq install -y --no-install-recommends --no-install-suggests gnupg wget curl software-properties-common \
|
||||
&& add-apt-repository -y ppa:ubuntu-toolchain-r/test \
|
||||
&& wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | apt-key add - \
|
||||
&& apt-add-repository 'deb https://apt.kitware.com/ubuntu/ focal main' \
|
||||
&& echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${LLVM_TOOLS_VERSION} main" >> /etc/apt/sources.list \
|
||||
&& wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||
|
||||
# Install packages
|
||||
RUN apt update -qq \
|
||||
&& apt install -y --no-install-recommends --no-install-suggests cmake python3 python3-pip sudo git \
|
||||
ninja-build make pkg-config libzstd-dev libzstd1 g++-${GCC_VERSION} flex bison jq graphviz \
|
||||
&& apt install -y --no-install-recommends --no-install-suggests python3 python3-pip git git-lfs make ninja-build flex bison jq graphviz \
|
||||
clang-format-${LLVM_TOOLS_VERSION} clang-tidy-${LLVM_TOOLS_VERSION} clang-tools-${LLVM_TOOLS_VERSION} \
|
||||
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/clang-format clang-format /usr/bin/clang-format-${LLVM_TOOLS_VERSION} 100 \
|
||||
&& apt-get clean && apt remove -y software-properties-common \
|
||||
&& pip3 install -q --upgrade --no-cache-dir pip \
|
||||
&& pip3 install -q --no-cache-dir conan==1.62 gcovr cmake-format
|
||||
&& pip3 install -q --upgrade --no-cache-dir pip && pip3 install -q --no-cache-dir conan==1.62 gcovr cmake cmake-format \
|
||||
&& apt-get clean && apt remove -y software-properties-common
|
||||
|
||||
# Install gcc-12 and make ldconfig aware of the new libstdc++ location (for gcc)
|
||||
# Note: Clang is using libc++ instead
|
||||
COPY --from=rippleci/clio_gcc:12.3.0 /gcc12.deb /
|
||||
RUN apt update && apt-get install -y binutils libc6-dev \
|
||||
&& dpkg -i /gcc12.deb \
|
||||
&& rm -rf /gcc12.deb \
|
||||
&& ldconfig
|
||||
|
||||
# Rewire to use gcc-12 as default compiler
|
||||
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12 100
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -46,34 +50,51 @@ RUN wget "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/
|
||||
&& cd "ccache-${CCACHE_VERSION}" \
|
||||
&& mkdir build && cd build \
|
||||
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
|
||||
&& cmake --build . --target install
|
||||
&& cmake --build . --target install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
# Install doxygen from sounce
|
||||
# Install doxygen from source
|
||||
RUN wget "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& cd "doxygen-${DOXYGEN_VERSION}" \
|
||||
&& mkdir build && cd build \
|
||||
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
|
||||
&& cmake --build . --target install
|
||||
&& cmake --build . --target install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
# Install gh
|
||||
RUN wget https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
||||
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
||||
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/bin/gh
|
||||
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/bin/gh \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
# Clean up
|
||||
RUN rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
WORKDIR /root/
|
||||
WORKDIR /root
|
||||
# Using root by default is not very secure but github checkout action doesn't work with any other user
|
||||
# https://github.com/actions/checkout/issues/956
|
||||
# And Github Actions doc recommends using root
|
||||
# https://docs.github.com/en/actions/creating-actions/dockerfile-support-for-github-actions#user
|
||||
|
||||
# Setup conan
|
||||
RUN conan profile new default --detect \
|
||||
&& conan profile update settings.compiler.cppstd=20 default \
|
||||
&& conan profile update settings.compiler.libcxx=libstdc++11 default \
|
||||
&& conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
RUN conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
|
||||
# Note: intentionally leaving cppstd=20
|
||||
RUN conan profile new gcc --detect \
|
||||
&& conan profile update settings.compiler=gcc gcc \
|
||||
&& conan profile update settings.compiler.version=12 gcc \
|
||||
&& conan profile update settings.compiler.cppstd=20 gcc \
|
||||
&& conan profile update settings.compiler.libcxx=libstdc++11 gcc \
|
||||
&& conan profile update env.CC=/usr/bin/gcc-12 gcc \
|
||||
&& conan profile update env.CXX=/usr/bin/g++-12 gcc \
|
||||
&& conan profile update "conf.tools.build:compiler_executables={\"c\": \"/usr/bin/gcc-12\", \"cpp\": \"/usr/bin/g++-12\"}" gcc
|
||||
|
||||
RUN conan profile new clang --detect \
|
||||
&& conan profile update settings.compiler=clang clang \
|
||||
&& conan profile update settings.compiler.version=16 clang \
|
||||
&& conan profile update settings.compiler.cppstd=20 clang \
|
||||
&& conan profile update settings.compiler.libcxx=libc++ clang \
|
||||
&& conan profile update env.CC=/usr/bin/clang-16 clang \
|
||||
&& conan profile update env.CXX=/usr/bin/clang++-16 clang \
|
||||
&& conan profile update env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS" clang \
|
||||
&& conan profile update "conf.tools.build:compiler_executables={\"c\": \"/usr/bin/clang-16\", \"cpp\": \"/usr/bin/clang++-16\"}" clang
|
||||
|
||||
RUN echo "include(gcc)" >> .conan/profiles/default
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exu
|
||||
|
||||
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
|
||||
# it's either those or link=static that halves the failures. probably link=static
|
||||
BOOST_VERSION=$1
|
||||
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
|
||||
echo "BOOST_VERSION: ${BOOST_VERSION}"
|
||||
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
|
||||
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
|
||||
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
|
||||
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
|
||||
mkdir -p /boost && mv boost /boost && mv stage /boost
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
GIT_VERSION="2.37.1"
|
||||
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
|
||||
tar zxvf git-${GIT_VERSION}.tar.gz
|
||||
cd git-${GIT_VERSION}
|
||||
|
||||
yum install -y centos-release-scl epel-release
|
||||
yum update -y
|
||||
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
|
||||
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
make configure
|
||||
./configure
|
||||
make git -j$(nproc)
|
||||
make install git
|
||||
git --version | cut -d ' ' -f3
|
||||
@@ -1,34 +0,0 @@
|
||||
FROM centos:7
|
||||
|
||||
ENV CLIO_DIR=/opt/clio/
|
||||
# ENV OPENSSL_DIR=/opt/openssl
|
||||
|
||||
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
|
||||
RUN yum install -y devtoolset-11
|
||||
ENV version=3.16
|
||||
ENV build=3
|
||||
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
|
||||
COPY install_cmake.sh /install_cmake.sh
|
||||
RUN /install_cmake.sh 3.16.3 /usr/local
|
||||
RUN source /opt/rh/devtoolset-11/enable
|
||||
WORKDIR /tmp
|
||||
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
|
||||
COPY build_git_centos7.sh build_git_centos7.sh
|
||||
|
||||
RUN ./build_git_centos7.sh
|
||||
RUN git clone https://github.com/openssl/openssl
|
||||
WORKDIR /tmp/openssl
|
||||
RUN git checkout OpenSSL_1_1_1q
|
||||
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
|
||||
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
|
||||
make -j $(nproc) && \
|
||||
make install_sw
|
||||
WORKDIR /tmp
|
||||
RUN git clone https://github.com/xrplf/clio.git
|
||||
COPY build_boost.sh build_boost.sh
|
||||
ENV OPENSSL_ROOT=/opt/local/openssl
|
||||
ENV BOOST_ROOT=/boost
|
||||
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
|
||||
RUN yum install -y bison flex
|
||||
RUN source /opt/rh/devtoolset-11/enable && \
|
||||
cd /tmp/clio && cmake -B build -Dtests=0 -Dlocal_libarchive=1 -Dunity=0 -DBUILD_TESTS=0 && cmake --build build --parallel $(nproc)
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
19
docker/compilers/clang-16/dockerfile
Normal file
19
docker/compilers/clang-16/dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM ubuntu:focal
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
USER root
|
||||
WORKDIR /root
|
||||
|
||||
ENV CLANG_VERSION=16
|
||||
|
||||
RUN apt update -qq \
|
||||
&& apt install -qq -y --no-install-recommends --no-install-suggests \
|
||||
wget software-properties-common gnupg
|
||||
|
||||
RUN wget https://apt.llvm.org/llvm.sh \
|
||||
&& chmod +x llvm.sh \
|
||||
&& ./llvm.sh ${CLANG_VERSION} \
|
||||
&& rm -rf llvm.sh \
|
||||
&& apt-get install -y libc++-16-dev libc++abi-16-dev
|
||||
6
docker/compilers/gcc-12/control.m4
Normal file
6
docker/compilers/gcc-12/control.m4
Normal file
@@ -0,0 +1,6 @@
|
||||
Package: gcc-12-ubuntu-UBUNTUVERSION
|
||||
Version: VERSION
|
||||
Architecture: TARGETARCH
|
||||
Maintainer: Alex Kremer <akremer@ripple.com>
|
||||
Description: Gcc VERSION build for ubuntu UBUNTUVERSION
|
||||
Depends: binutils, libc6-dev
|
||||
74
docker/compilers/gcc-12/dockerfile
Normal file
74
docker/compilers/gcc-12/dockerfile
Normal file
@@ -0,0 +1,74 @@
|
||||
FROM ubuntu:focal as build
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG UBUNTU_VERSION=20.04
|
||||
ARG GCC_VERSION=12.3.0
|
||||
ARG BUILD_VERSION=1
|
||||
|
||||
RUN apt update && apt install -y wget build-essential file flex libz-dev libzstd-dev
|
||||
RUN wget https://gcc.gnu.org/pub/gcc/releases/gcc-$GCC_VERSION/gcc-$GCC_VERSION.tar.gz \
|
||||
&& tar xf gcc-$GCC_VERSION.tar.gz \
|
||||
&& cd /gcc-$GCC_VERSION && ./contrib/download_prerequisites
|
||||
|
||||
RUN mkdir /${TARGETARCH}-gcc-12
|
||||
WORKDIR /${TARGETARCH}-gcc-12
|
||||
RUN /gcc-$GCC_VERSION/configure \
|
||||
--with-pkgversion="clio-build-$BUILD_VERSION https://github.com/XRPLF/clio" \
|
||||
--enable-languages=c,c++ \
|
||||
--prefix=/usr \
|
||||
--with-gcc-major-version-only \
|
||||
--program-suffix=-12 \
|
||||
--enable-shared \
|
||||
--enable-linker-build-id \
|
||||
--libexecdir=/usr/lib \
|
||||
--without-included-gettext \
|
||||
--enable-threads=posix \
|
||||
--libdir=/usr/lib \
|
||||
--disable-nls \
|
||||
--enable-clocale=gnu \
|
||||
--enable-libstdcxx-backtrace=yes \
|
||||
--enable-libstdcxx-debug \
|
||||
--enable-libstdcxx-time=yes \
|
||||
--with-default-libstdcxx-abi=new \
|
||||
--enable-gnu-unique-object \
|
||||
--disable-vtable-verify \
|
||||
--enable-plugin \
|
||||
--enable-default-pie \
|
||||
--with-system-zlib \
|
||||
--enable-libphobos-checking=release \
|
||||
--with-target-system-zlib=auto \
|
||||
--disable-werror \
|
||||
--enable-cet \
|
||||
--disable-multilib \
|
||||
--without-cuda-driver \
|
||||
--enable-checking=release \
|
||||
&& make -j`nproc` \
|
||||
&& make install-strip DESTDIR=/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION \
|
||||
&& mkdir -p /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/share/gdb/auto-load/usr/lib64 \
|
||||
&& mv /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/lib64/libstdc++.so.6.0.30-gdb.py /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/share/gdb/auto-load/usr/lib64/libstdc++.so.6.0.30-gdb.py
|
||||
|
||||
# Generate deb
|
||||
WORKDIR /
|
||||
COPY control.m4 /
|
||||
COPY ld.so.conf /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/etc/ld.so.conf.d/1-gcc-12.conf
|
||||
|
||||
RUN mkdir /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/DEBIAN \
|
||||
&& m4 -P -DUBUNTU_VERSION=$UBUNTU_VERSION -DVERSION=$GCC_VERSION-$BUILD_VERSION -DTARGETARCH=$TARGETARCH control.m4 > /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/DEBIAN/control \
|
||||
&& dpkg-deb --build --root-owner-group /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION /gcc12.deb
|
||||
|
||||
# Create final image
|
||||
FROM ubuntu:focal as gcc
|
||||
COPY --from=build /gcc12.deb /
|
||||
|
||||
# Make gcc-12 available but also leave gcc12.deb for others to copy if needed
|
||||
RUN apt update && apt-get install -y binutils libc6-dev \
|
||||
&& dpkg -i /gcc12.deb
|
||||
|
||||
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12 100
|
||||
2
docker/compilers/gcc-12/ld.so.conf
Normal file
2
docker/compilers/gcc-12/ld.so.conf
Normal file
@@ -0,0 +1,2 @@
|
||||
# Path to the directory containing libstdc++.so.6
|
||||
/usr/lib64
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exu
|
||||
|
||||
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
|
||||
# it's either those or link=static that halves the failures. probably link=static
|
||||
BOOST_VERSION=$1
|
||||
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
|
||||
echo "BOOST_VERSION: ${BOOST_VERSION}"
|
||||
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
|
||||
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
|
||||
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
|
||||
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
|
||||
mkdir -p /boost && mv boost /boost && mv stage /boost
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
@@ -1,24 +0,0 @@
|
||||
FROM ubuntu:20.04 AS boost
|
||||
|
||||
RUN apt-get update && apt-get install -y build-essential
|
||||
ARG BOOST_VERSION_=1_75_0
|
||||
ARG BOOST_VERSION=1.75.0
|
||||
COPY docker/shared/build_boost.sh .
|
||||
RUN apt install -y curl
|
||||
RUN ./build_boost.sh ${BOOST_VERSION}
|
||||
ENV BOOST_ROOT=/boost
|
||||
|
||||
FROM ubuntu:20.04 AS build
|
||||
ENV BOOST_ROOT=/boost
|
||||
COPY --from=boost /boost /boost
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y build-essential software-properties-common pkg-config libssl-dev wget curl gpg git zlib1g-dev bison flex autoconf lsb-release
|
||||
RUN apt install -y gpg-agent
|
||||
RUN wget https://apt.llvm.org/llvm.sh
|
||||
RUN chmod +x llvm.sh && ./llvm.sh 14 && ./llvm.sh 15
|
||||
# COPY . /clio
|
||||
## Install cmake
|
||||
ARG CMAKE_VERSION=3.16.3
|
||||
COPY docker/shared/install_cmake.sh .
|
||||
RUN ./install_cmake.sh ${CMAKE_VERSION}
|
||||
ENV PATH="/opt/local/cmake/bin:$PATH"
|
||||
@@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 3.16.3)
|
||||
cmake_minimum_required(VERSION 3.20)
|
||||
project(docs)
|
||||
|
||||
include(${CMAKE_CURRENT_SOURCE_DIR}/../cmake/ClioVersion.cmake)
|
||||
|
||||
@@ -6,19 +6,22 @@ Clio is built with [CMake](https://cmake.org/) and uses [Conan](https://conan.io
|
||||
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 1.55](https://conan.io/downloads.html)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
- [CMake 3.20](https://cmake.org/download/)
|
||||
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html): needed for code coverage generation
|
||||
- [**Optional**] [CCache](https://ccache.dev/): speeds up compilation if you are going to compile Clio often
|
||||
|
||||
| Compiler | Version |
|
||||
|-------------|---------|
|
||||
| GCC | 11 |
|
||||
| Clang | 14 |
|
||||
| Apple Clang | 14.0.3 |
|
||||
| GCC | 12.3 |
|
||||
| Clang | 16 |
|
||||
| Apple Clang | 15 |
|
||||
|
||||
### Conan Configuration
|
||||
|
||||
Clio does not require anything but default settings in your (`~/.conan/profiles/default`) Conan profile. It's best to have no extra flags specified.
|
||||
Clio does not require anything other than `compiler.cppstd=20` in your (`~/.conan/profiles/default`) Conan profile.
|
||||
|
||||
> [!NOTE]
|
||||
> Although Clio is built using C++23, it's required to set `compiler.cppstd=20` for the time being as some of Clio's dependencies are not yet capable of building under C++23.
|
||||
|
||||
> Mac example:
|
||||
|
||||
@@ -29,10 +32,12 @@ os_build=Macos
|
||||
arch=armv8
|
||||
arch_build=armv8
|
||||
compiler=apple-clang
|
||||
compiler.version=14
|
||||
compiler.version=15
|
||||
compiler.libcxx=libc++
|
||||
build_type=Release
|
||||
compiler.cppstd=20
|
||||
[conf]
|
||||
tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]
|
||||
```
|
||||
|
||||
> Linux example:
|
||||
@@ -44,7 +49,7 @@ os_build=Linux
|
||||
arch=x86_64
|
||||
arch_build=x86_64
|
||||
compiler=gcc
|
||||
compiler.version=11
|
||||
compiler.version=12
|
||||
compiler.libcxx=libstdc++11
|
||||
build_type=Release
|
||||
compiler.cppstd=20
|
||||
@@ -88,6 +93,35 @@ If successful, `conan install` will find the required packages and `cmake` will
|
||||
> [!TIP]
|
||||
> To generate a Code Coverage report, include `-o coverage=True` in the `conan install` command above, along with `-o tests=True` to enable tests. After running the `cmake` commands, execute `make clio_tests-ccov`. The coverage report will be found at `clio_tests-llvm-cov/index.html`.
|
||||
|
||||
> [!NOTE]
|
||||
> If you've built Clio before and the build is now failing, it's likely due to updated dependencies. Try deleting the build folder and then rerunning the Conan and CMake commands mentioned above.
|
||||
|
||||
### Generating API docs for Clio
|
||||
|
||||
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen on your system.
|
||||
|
||||
To generate the API docs:
|
||||
|
||||
1. First, include `-o docs=True` in the conan install command. For example:
|
||||
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=False -o docs=True
|
||||
```
|
||||
|
||||
2. Once that has completed successfully, run the `cmake` command and add the `--target docs` option:
|
||||
|
||||
```sh
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 --target docs
|
||||
```
|
||||
|
||||
3. Go to `build/docs/html` to view the generated files.
|
||||
|
||||
Open the `index.html` file in your browser to see the documentation pages.
|
||||
|
||||

|
||||
|
||||
## Building Clio with Docker
|
||||
|
||||
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
|
||||
|
||||
@@ -27,7 +27,7 @@ If you're running Clio and `rippled` on separate machines, in addition to uncomm
|
||||
|
||||
2. Open a public, unencrypted WebSocket port on your `rippled` server.
|
||||
|
||||
3. In the `rippled` config, change the IP specified for `secure_gateway`, under the `port_grpc` section, to the IP of your Clio server. This entry can take the form of a comma-separated list if you are running multiple Clio nodes.
|
||||
3. In the `rippled` config, change the IP specified for `secure_gateway`, under the `port_grpc` and websocket server sections, to the IP of your Clio server. This entry can take the form of a comma-separated list if you are running multiple Clio nodes.
|
||||
|
||||
## Ledger sequence
|
||||
|
||||
@@ -97,3 +97,11 @@ To enable the caching for a source, `forwarding_cache_timeout` value should be a
|
||||
|
||||
`forwarding_cache_timeout` defines for how long (in seconds) a cache entry will be valid after being placed into the cache.
|
||||
Zero value turns off the cache feature.
|
||||
|
||||
## Graceful shutdown (not fully implemented yet)
|
||||
|
||||
Clio can be gracefully shut down by sending a `SIGINT` (Ctrl+C) or `SIGTERM` signal.
|
||||
The process will stop accepting new connections and will wait for the time specified in `graceful_period` config value (or 10 seconds by default).
|
||||
If Clio finishes all the scheduled operations before the end of the period, it will stop immediately.
|
||||
Otherwise, it will wait for the period to finish and then exit without finishing operations.
|
||||
If Clio receives a second signal during the period, it will stop immediately.
|
||||
|
||||
@@ -27,15 +27,6 @@ In case of a spurious failure of unit tests, it is possible to re-run the `cover
|
||||
|
||||
The default coverage report format is `html-details`, but developers can override it to any of the formats listed in `cmake/CodeCoverage.cmake` by setting `CODE_COVERAGE_REPORT_FORMAT` variable in `cmake`. For example, CI is setting this parameter to `xml` for the [codecov](https://codecov.io) integration.
|
||||
|
||||
If some unit tests predictably fail (e.g., due to absence of a Cassandra database), it is possible to set unit tests options in the `CODE_COVERAGE_TESTS_ARGS` cmake variable, as demonstrated below:
|
||||
|
||||
```sh
|
||||
cd .build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug -o tests=True -o coverage=True
|
||||
cmake -DCODE_COVERAGE_REPORT_FORMAT=json-details -DCMAKE_BUILD_TYPE=Debug -DCODE_COVERAGE_TESTS_ARGS="--gtest_filter=-BackendCassandra*" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
cmake --build . --target coverage_report
|
||||
```
|
||||
|
||||
After the `coverage_report` target is completed, the generated coverage report will be stored inside the build directory as either:
|
||||
|
||||
- A File named `coverage_report.*`, with a suitable extension for the report format.
|
||||
|
||||
@@ -35,7 +35,10 @@
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
"forwarding_cache_timeout": 0.250, // in seconds, could be 0, which means no cache
|
||||
"forwarding": {
|
||||
"cache_timeout": 0.250, // in seconds, could be 0, which means no cache
|
||||
"request_timeout": 10.0 // time for Clio to wait for rippled to reply on a forwarded request (default is 10 seconds)
|
||||
},
|
||||
"dos_guard": {
|
||||
// Comma-separated list of IPs to exclude from rate limiting
|
||||
"whitelist": [
|
||||
@@ -64,8 +67,10 @@
|
||||
"admin_password": "xrp",
|
||||
// If local_admin is true, Clio will consider requests come from 127.0.0.1 as admin requests
|
||||
// It's true by default unless admin_password is set,'local_admin' : true and 'admin_password' can not be set at the same time
|
||||
"local_amdin": false
|
||||
"local_admin": false
|
||||
},
|
||||
// Time in seconds for graceful shutdown. Defaults to 10 seconds. Not fully implemented yet.
|
||||
"graceful_period": 10.0,
|
||||
// Overrides log level on a per logging channel.
|
||||
// Defaults to global "log_level" for each unspecified channel.
|
||||
"log_channels": [
|
||||
@@ -94,6 +99,15 @@
|
||||
"log_level": "trace"
|
||||
}
|
||||
],
|
||||
"cache": {
|
||||
// Configure this to use either "num_diffs", "num_cursors_from_diff", or "num_cursors_from_account". By default, Clio uses "num_diffs".
|
||||
"num_diffs": 32, // Generate the cursors from the latest ledger diff, then use the cursors to partition the ledger to load concurrently. The cursors number is affected by the busyness of the network.
|
||||
// "num_cursors_from_diff": 3200, // Read the cursors from the diff table until we have enough cursors to partition the ledger to load concurrently.
|
||||
// "num_cursors_from_account": 3200, // Read the cursors from the account table until we have enough cursors to partition the ledger to load concurrently.
|
||||
"num_markers": 48, // The number of markers is the number of coroutines to load the cache concurrently.
|
||||
"page_fetch_size": 512, // The number of rows to load for each page.
|
||||
"load": "async" // "sync" to load cache synchronously or "async" to load cache asynchronously or "none"/"no" to turn off the cache.
|
||||
},
|
||||
"prometheus": {
|
||||
"enabled": true,
|
||||
"compress_reply": true
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 1,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
@@ -85,9 +86,11 @@
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"textMode": "auto"
|
||||
"showPercentChange": false,
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.2.0",
|
||||
"pluginVersion": "10.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
@@ -105,6 +108,372 @@
|
||||
"title": "Service state",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"0": {
|
||||
"color": "blue",
|
||||
"index": 0,
|
||||
"text": "No"
|
||||
},
|
||||
"1": {
|
||||
"color": "green",
|
||||
"index": 1,
|
||||
"text": "Yes"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 3,
|
||||
"x": 3,
|
||||
"y": 0
|
||||
},
|
||||
"id": 14,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"showPercentChange": false,
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "read_only",
|
||||
"instant": false,
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Read only",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"0": {
|
||||
"color": "blue",
|
||||
"index": 0,
|
||||
"text": "No"
|
||||
},
|
||||
"1": {
|
||||
"color": "green",
|
||||
"index": 1,
|
||||
"text": "Yes"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 3,
|
||||
"x": 6,
|
||||
"y": 0
|
||||
},
|
||||
"id": 15,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"showPercentChange": false,
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "etl_writing",
|
||||
"instant": false,
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Writing data to DB",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"0": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "No"
|
||||
},
|
||||
"1": {
|
||||
"color": "red",
|
||||
"index": 1,
|
||||
"text": "Yes"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 3,
|
||||
"x": 9,
|
||||
"y": 0
|
||||
},
|
||||
"id": 16,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"showPercentChange": false,
|
||||
"textMode": "auto",
|
||||
"wideLayout": true
|
||||
},
|
||||
"pluginVersion": "10.4.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "etl_amendment_blocked",
|
||||
"instant": false,
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Amendment blocked",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 13,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "PBFA97CFB590B2093"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "timestamp(etl_last_publish_seconds) - etl_last_publish_seconds",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"legendFormat": "ledger age",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Ledger Age",
|
||||
"transformations": [
|
||||
{
|
||||
"id": "filterByValue",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"config": {
|
||||
"id": "lower",
|
||||
"options": {
|
||||
"value": 31500000
|
||||
}
|
||||
},
|
||||
"fieldName": "ledger age"
|
||||
}
|
||||
],
|
||||
"match": "all",
|
||||
"type": "include"
|
||||
}
|
||||
}
|
||||
],
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
@@ -166,9 +535,9 @@
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 9,
|
||||
"x": 3,
|
||||
"y": 0
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
@@ -263,7 +632,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
"y": 8
|
||||
},
|
||||
"id": 9,
|
||||
"options": {
|
||||
@@ -358,7 +727,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
"y": 16
|
||||
},
|
||||
"id": 11,
|
||||
"options": {
|
||||
@@ -453,7 +822,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
"y": 16
|
||||
},
|
||||
"id": 6,
|
||||
"options": {
|
||||
@@ -550,7 +919,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
"y": 24
|
||||
},
|
||||
"id": 10,
|
||||
"options": {
|
||||
@@ -645,7 +1014,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
"y": 24
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
@@ -740,7 +1109,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
"y": 32
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
@@ -839,7 +1208,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
"y": 32
|
||||
},
|
||||
"id": 12,
|
||||
"options": {
|
||||
@@ -973,7 +1342,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 32
|
||||
"y": 40
|
||||
},
|
||||
"id": 5,
|
||||
"options": {
|
||||
@@ -1081,7 +1450,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 32
|
||||
"y": 40
|
||||
},
|
||||
"id": 3,
|
||||
"options": {
|
||||
@@ -1186,9 +1555,9 @@
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 10,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 40
|
||||
"y": 48
|
||||
},
|
||||
"id": 7,
|
||||
"options": {
|
||||
@@ -1222,7 +1591,7 @@
|
||||
}
|
||||
],
|
||||
"refresh": "5s",
|
||||
"schemaVersion": 38,
|
||||
"schemaVersion": 39,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
@@ -1235,6 +1604,6 @@
|
||||
"timezone": "",
|
||||
"title": "Clio",
|
||||
"uid": "aeaae84e-c194-47b2-ad65-86e45eebb815",
|
||||
"version": 3,
|
||||
"version": 1,
|
||||
"weekStart": ""
|
||||
}
|
||||
|
||||
3
docs/img/doxygen-docs-output.png
Normal file
3
docs/img/doxygen-docs-output.png
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:16e6f0a44b2dc7462f813f07ba754575e93033bc8bbfbe1d28f8bf1927915028
|
||||
size 394711
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 130 B |
47
docs/trouble_shooting.md
Normal file
47
docs/trouble_shooting.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Troubleshooting Guide
|
||||
This guide will help you troubleshoot common issues of Clio.
|
||||
|
||||
## Can't connect to DB
|
||||
If you see the error log message `Could not connect to Cassandra: No hosts available`, this means that Clio can't connect to the database. Check the following:
|
||||
- Make sure the database is running at the specified address and port.
|
||||
- Make sure the database is accessible from the machine where Clio is running.
|
||||
You can use [cqlsh](https://pypi.org/project/cqlsh/) to check the connection to the database.
|
||||
If you would like to run a local ScyllaDB, you can call:
|
||||
```sh
|
||||
docker run --rm -p 9042:9042 --name clio-scylla -d scylladb/scylla
|
||||
```
|
||||
|
||||
## Check the server status of Clio
|
||||
To check if Clio is syncing with rippled:
|
||||
```sh
|
||||
curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep seq
|
||||
```
|
||||
If Clio is syncing with rippled, the `seq` value will be increasing.
|
||||
|
||||
## Clio fails to start
|
||||
If you see the error log message `Failed to fetch ETL state from...`, this means the configured rippled node is not reachable. Check the following:
|
||||
- Make sure the rippled node is running at the specified address and port.
|
||||
- Make sure the rippled node is accessible from the machine where Clio is running.
|
||||
|
||||
If you would like to run Clio without an avaliable rippled node, you can add below setting to Clio's configuration file:
|
||||
```
|
||||
"allow_no_etl": true
|
||||
```
|
||||
|
||||
## Clio is not added to secure_gateway in rippled's config
|
||||
If you see the warning message `AsyncCallData is_unlimited is false.`, this means that Clio is not added to the `secure_gateway` of `port_grpc` session in the rippled configuration file. It will slow down the sync process. Please add Clio's IP to the `secure_gateway` in the rippled configuration file for both grpc and ws port.
|
||||
|
||||
## Clio is slow
|
||||
To speed up the response time, Clio has a cache inside. However, cache can take time to warm up. If you see slow response time, you can firstly check if cache is still loading.
|
||||
You can check the cache status by calling:
|
||||
```sh
|
||||
curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep is_full
|
||||
curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep is_enabled
|
||||
```
|
||||
If `is_full` is false, it means the cache is still loading. Normally, the Clio can respond quicker if cache finishs loading. If `is_enabled` is false, it means the cache is disabled in the configuration file or there is data corruption in the database.
|
||||
|
||||
## Receive error message `Too many requests`
|
||||
If client sees the error message `Too many requests`, this means that the client is blocked by Clio's DosGuard protection. You may want to add the client's IP to the whitelist in the configuration file, Or update other your DosGuard settings.
|
||||
|
||||
|
||||
|
||||
@@ -39,39 +39,17 @@ namespace data {
|
||||
template <typename T>
|
||||
concept SomeBackendCounters = requires(T a) {
|
||||
typename T::PtrType;
|
||||
{
|
||||
a.registerTooBusy()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteSync(std::chrono::steady_clock::time_point{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteSyncRetry()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteStarted()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteFinished(std::chrono::steady_clock::time_point{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteRetry()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadStarted(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadFinished(std::chrono::steady_clock::time_point{}, std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadRetry(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadError(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.report()
|
||||
} -> std::same_as<boost::json::object>;
|
||||
{ a.registerTooBusy() } -> std::same_as<void>;
|
||||
{ a.registerWriteSync(std::chrono::steady_clock::time_point{}) } -> std::same_as<void>;
|
||||
{ a.registerWriteSyncRetry() } -> std::same_as<void>;
|
||||
{ a.registerWriteStarted() } -> std::same_as<void>;
|
||||
{ a.registerWriteFinished(std::chrono::steady_clock::time_point{}) } -> std::same_as<void>;
|
||||
{ a.registerWriteRetry() } -> std::same_as<void>;
|
||||
{ a.registerReadStarted(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadFinished(std::chrono::steady_clock::time_point{}, std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadRetry(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadError(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.report() } -> std::same_as<boost::json::object>;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -90,7 +90,7 @@ BackendInterface::fetchLedgerObject(
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
return obj;
|
||||
}
|
||||
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
@@ -297,16 +297,23 @@ BackendInterface::fetchLedgerPage(
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
)
|
||||
{
|
||||
LedgerPage page;
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
bool reachedEnd = false;
|
||||
|
||||
while (keys.size() < limit && !reachedEnd) {
|
||||
ripple::uint256 const& curCursor = !keys.empty() ? keys.back() : (cursor ? *cursor : firstKey);
|
||||
ripple::uint256 const& curCursor = [&]() {
|
||||
if (!keys.empty())
|
||||
return keys.back();
|
||||
return (cursor ? *cursor : firstKey);
|
||||
}();
|
||||
|
||||
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
|
||||
if (!succ) {
|
||||
reachedEnd = true;
|
||||
} else {
|
||||
@@ -326,6 +333,9 @@ BackendInterface::fetchLedgerPage(
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
LOG(gLog.error()) << msg.str();
|
||||
|
||||
if (corruptionDetector_.has_value())
|
||||
corruptionDetector_->onCorruptionDetected();
|
||||
}
|
||||
}
|
||||
if (!keys.empty() && !reachedEnd)
|
||||
@@ -350,14 +360,42 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
|
||||
ripple::SerialIter it(bytes->data(), bytes->size());
|
||||
ripple::SLE const sle{it, key};
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
|
||||
fees.base = sle.getFieldU64(ripple::sfBaseFee);
|
||||
// XRPFees amendment introduced new fields for fees calculations.
|
||||
// New fields are set and the old fields are removed via `set_fees` tx.
|
||||
// Fallback to old fields if `set_fees` was not yet used to update the fields on this tx.
|
||||
auto hasNewFields = false;
|
||||
{
|
||||
auto const baseFeeXRP = sle.at(~ripple::sfBaseFeeDrops);
|
||||
auto const reserveBaseXRP = sle.at(~ripple::sfReserveBaseDrops);
|
||||
auto const reserveIncrementXRP = sle.at(~ripple::sfReserveIncrementDrops);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReserveBase) != -1)
|
||||
fees.reserve = sle.getFieldU32(ripple::sfReserveBase);
|
||||
if (baseFeeXRP)
|
||||
fees.base = baseFeeXRP->xrp();
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReserveIncrement) != -1)
|
||||
fees.increment = sle.getFieldU32(ripple::sfReserveIncrement);
|
||||
if (reserveBaseXRP)
|
||||
fees.reserve = reserveBaseXRP->xrp();
|
||||
|
||||
if (reserveIncrementXRP)
|
||||
fees.increment = reserveIncrementXRP->xrp();
|
||||
|
||||
hasNewFields = baseFeeXRP || reserveBaseXRP || reserveIncrementXRP;
|
||||
}
|
||||
|
||||
if (not hasNewFields) {
|
||||
// Fallback to old fields
|
||||
auto const baseFee = sle.at(~ripple::sfBaseFee);
|
||||
auto const reserveBase = sle.at(~ripple::sfReserveBase);
|
||||
auto const reserveIncrement = sle.at(~ripple::sfReserveIncrement);
|
||||
|
||||
if (baseFee)
|
||||
fees.base = baseFee.value();
|
||||
|
||||
if (reserveBase)
|
||||
fees.reserve = reserveBase.value();
|
||||
|
||||
if (reserveIncrement)
|
||||
fees.increment = reserveIncrement.value();
|
||||
}
|
||||
|
||||
return fees;
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
@@ -44,6 +45,7 @@
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
@@ -138,6 +140,7 @@ protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
LedgerCache cache_;
|
||||
std::optional<etl::CorruptionDetector<LedgerCache>> corruptionDetector_;
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
@@ -162,6 +165,17 @@ public:
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sets the corruption detector.
|
||||
*
|
||||
* @param detector The corruption detector to set
|
||||
*/
|
||||
void
|
||||
setCorruptionDetector(etl::CorruptionDetector<LedgerCache> detector)
|
||||
{
|
||||
corruptionDetector_ = std::move(detector);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by sequence number.
|
||||
*
|
||||
@@ -199,6 +213,19 @@ public:
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const;
|
||||
|
||||
/**
|
||||
* @brief Fetch the specified number of account root object indexes by page, the accounts need to exist for seq.
|
||||
*
|
||||
* @param number The number of accounts to fetch
|
||||
* @param pageSize The maximum number of accounts per page
|
||||
* @param seq The accounts need to exist for this sequence
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of ripple::uint256 representing the account roots
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences that are stored in the DB.
|
||||
*
|
||||
@@ -422,7 +449,7 @@ public:
|
||||
std::uint32_t limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor object.
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
#include <ripple/protocol/nft.h>
|
||||
|
||||
@@ -171,11 +172,6 @@ public:
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
// forward queries by ledger/tx sequence `>=`
|
||||
// so we have to advance the index by one
|
||||
if (forward)
|
||||
++cursor->transactionIndex;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,13 +206,13 @@ public:
|
||||
}
|
||||
|
||||
void
|
||||
writeLedger(ripple::LedgerHeader const& ledgerInfo, std::string&& blob) override
|
||||
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
|
||||
{
|
||||
executor_.write(schema_->insertLedgerHeader, ledgerInfo.seq, std::move(blob));
|
||||
executor_.write(schema_->insertLedgerHeader, ledgerHeader.seq, std::move(blob));
|
||||
|
||||
executor_.write(schema_->insertLedgerHash, ledgerInfo.hash, ledgerInfo.seq);
|
||||
executor_.write(schema_->insertLedgerHash, ledgerHeader.hash, ledgerHeader.seq);
|
||||
|
||||
ledgerSequence_ = ledgerInfo.seq;
|
||||
ledgerSequence_ = ledgerHeader.seq;
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
@@ -560,7 +556,7 @@ public:
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
if (auto const result = res->template get<Blob>(); result) {
|
||||
if (result->size())
|
||||
return *result;
|
||||
return result;
|
||||
} else {
|
||||
LOG(log_.debug()) << "Could not fetch ledger object - no rows";
|
||||
}
|
||||
@@ -596,7 +592,7 @@ public:
|
||||
if (auto const result = res->template get<ripple::uint256>(); result) {
|
||||
if (*result == lastKey)
|
||||
return std::nullopt;
|
||||
return *result;
|
||||
return result;
|
||||
}
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch successor - no rows";
|
||||
@@ -693,6 +689,50 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
std::vector<ripple::uint256> liveAccounts;
|
||||
std::optional<ripple::AccountID> lastItem;
|
||||
|
||||
while (liveAccounts.size() < number) {
|
||||
Statement const statement = lastItem ? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize})
|
||||
: schema_->selectAccountFromBegining.bind(Limit{pageSize});
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
if (res) {
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
break;
|
||||
}
|
||||
// The results should not contain duplicates, we just filter out deleted accounts
|
||||
std::vector<ripple::uint256> fullAccounts;
|
||||
for (auto [account] : extract<ripple::AccountID>(results)) {
|
||||
fullAccounts.push_back(ripple::keylet::account(account).key);
|
||||
lastItem = account;
|
||||
}
|
||||
auto const objs = doFetchLedgerObjects(fullAccounts, seq, yield);
|
||||
|
||||
for (auto i = 0u; i < fullAccounts.size(); i++) {
|
||||
if (not objs[i].empty()) {
|
||||
if (liveAccounts.size() < number) {
|
||||
liveAccounts.push_back(fullAccounts[i]);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch account from account_tx: " << res.error();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return liveAccounts;
|
||||
}
|
||||
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -732,9 +772,7 @@ public:
|
||||
std::cend(keys),
|
||||
std::cbegin(objs),
|
||||
std::back_inserter(results),
|
||||
[](auto const& key, auto const& obj) {
|
||||
return LedgerObject{key, obj};
|
||||
}
|
||||
[](auto const& key, auto const& obj) { return LedgerObject{key, obj}; }
|
||||
);
|
||||
|
||||
return results;
|
||||
|
||||
@@ -45,6 +45,7 @@ LedgerCache::waitUntilCacheContainsSeq(uint32_t seq)
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
|
||||
std::unique_lock lock(mtx_);
|
||||
cv_.wait(lock, [this, seq] { return latestSeq_ >= seq; });
|
||||
return;
|
||||
@@ -89,8 +90,9 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
std::optional<LedgerObject>
|
||||
LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
if (disabled_ or not full_)
|
||||
return {};
|
||||
|
||||
std::shared_lock const lck{mtx_};
|
||||
++successorReqCounter_.get();
|
||||
if (seq != latestSeq_)
|
||||
@@ -105,8 +107,9 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
std::optional<LedgerObject>
|
||||
LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
if (disabled_ or not full_)
|
||||
return {};
|
||||
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
@@ -120,6 +123,9 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
std::optional<Blob>
|
||||
LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (disabled_)
|
||||
return {};
|
||||
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
return {};
|
||||
@@ -139,6 +145,12 @@ LedgerCache::setDisabled()
|
||||
disabled_ = true;
|
||||
}
|
||||
|
||||
bool
|
||||
LedgerCache::isDisabled() const
|
||||
{
|
||||
return disabled_;
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCache::setFull()
|
||||
{
|
||||
|
||||
@@ -133,6 +133,12 @@ public:
|
||||
void
|
||||
setDisabled();
|
||||
|
||||
/**
|
||||
* @return true if the cache is disabled; false otherwise
|
||||
*/
|
||||
bool
|
||||
isDisabled() const;
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
|
||||
@@ -1,42 +1,75 @@
|
||||
# Backend
|
||||
|
||||
## Background
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra and ScyllaDB are the only supported databases that are production-ready. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. Currently, Cassandra and ScyllaDB are the only supported databases that are production-ready.
|
||||
|
||||
To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file.
|
||||
|
||||
## Data Model
|
||||
The data model used by Clio to read and write ledger data is different from what Rippled uses. Rippled uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record. Rippled nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existnce of the leaf node data to other Rippled nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized Rippled nodes.
|
||||
|
||||
Since Clio only extracts past validated ledger data from a group of trusted Rippled nodes, it can be safely assumed that these ledger data are correct without the need to validate with other nodes in the XRP peer-to-peer network. Because of this, Clio is able to use a flattened data model to store the past validated ledger data, which allows for direct record lookup with much faster constant time operations.
|
||||
The data model used by Clio to read and write ledger data is different from what `rippled` uses. `rippled` uses a novel data structure named [*SHAMap*](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record.
|
||||
|
||||
There are three main types of data in each XRP ledger version, they are [Ledger Header](https://xrpl.org/ledger-header.html), [Transaction Set](https://xrpl.org/transaction-formats.html) and [State Data](https://xrpl.org/ledger-object-types.html). Due to the structural differences of the different types of databases, Clio may choose to represent these data using a different schema for each unique database type.
|
||||
`rippled` nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existence of the leaf node data to other `rippled` nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized `rippled` nodes.
|
||||
|
||||
**Keywords**
|
||||
*Sequence*: A unique incrementing identification number used to label the different ledger versions.
|
||||
*Hash*: The SHA512-half (calculate SHA512 and take the first 256 bits) hash of various ledger data like the entire ledger or specific ledger objects.
|
||||
*Ledger Object*: The [binary-encoded](https://xrpl.org/serialization.html) STObject containing specific data (i.e. metadata, transaction data).
|
||||
*Metadata*: The data containing [detailed information](https://xrpl.org/transaction-metadata.html#transaction-metadata) of the outcome of a specific transaction, regardless of whether the transaction was successful.
|
||||
*Transaction data*: The data containing the [full details](https://xrpl.org/transaction-common-fields.html) of a specific transaction.
|
||||
*Object Index*: The pseudo-random unique identifier of a ledger object, created by hashing the data of the object.
|
||||
Since Clio only extracts past validated ledger data from a group of trusted `rippled` nodes, it can be safely assumed that the ledger data is correct without the need to validate with other nodes in the XRP peer-to-peer network. Because of this, Clio is able to use a flattened data model to store the past validated ledger data, which allows for direct record lookup with much faster constant time operations.
|
||||
|
||||
There are three main types of data in each XRP Ledger version:
|
||||
|
||||
- [Ledger Header](https://xrpl.org/ledger-header.html)
|
||||
|
||||
- [Transaction Set](https://xrpl.org/transaction-formats.html)
|
||||
|
||||
- [State Data](https://xrpl.org/ledger-object-types.html)
|
||||
|
||||
Due to the structural differences of the different types of databases, Clio may choose to represent these data types using a different schema for each unique database type.
|
||||
|
||||
### Keywords
|
||||
|
||||
**Sequence**: A unique incrementing identification number used to label the different ledger versions.
|
||||
|
||||
**Hash**: The SHA512-half (calculate SHA512 and take the first 256 bits) hash of various ledger data like the entire ledger or specific ledger objects.
|
||||
|
||||
**Ledger Object**: The [binary-encoded](https://xrpl.org/serialization.html) STObject containing specific data (i.e. metadata, transaction data).
|
||||
|
||||
**Metadata**: The data containing [detailed information](https://xrpl.org/transaction-metadata.html#transaction-metadata) of the outcome of a specific transaction, regardless of whether the transaction was successful.
|
||||
|
||||
**Transaction data**: The data containing the [full details](https://xrpl.org/transaction-common-fields.html) of a specific transaction.
|
||||
|
||||
**Object Index**: The pseudo-random unique identifier of a ledger object, created by hashing the data of the object.
|
||||
|
||||
## Cassandra Implementation
|
||||
Cassandra is a distributed wide-column NoSQL database designed to handle large data throughput with high availability and no single point of failure. By leveraging Cassandra, Clio will be able to quickly and reliably scale up when needed simply by adding more Cassandra nodes to the Cassandra cluster configuration.
|
||||
|
||||
In Cassandra, Clio will be creating 9 tables to store the ledger data, they are `ledger_transactions`, `transactions`, `ledger_hashes`, `ledger_range`, `objects`, `ledgers`, `diff`, `account_tx`, and `successor`. Their schemas and how they work are detailed below.
|
||||
Cassandra is a distributed wide-column NoSQL database designed to handle large data throughput with high availability and no single point of failure. By leveraging Cassandra, Clio is able to quickly and reliably scale up when needed simply by adding more Cassandra nodes to the Cassandra cluster configuration.
|
||||
|
||||
*Note, if you would like visually explore the data structure of the Cassandra database, you can first run Clio server with database `type` configured as `cassandra` to fill ledger data from Rippled nodes into Cassandra, then use a GUI database management tool like [Datastax's Opcenter](https://docs.datastax.com/en/install/6.0/install/opscInstallOpsc.html) to interactively view it.*
|
||||
In Cassandra, Clio creates 9 tables to store the ledger data:
|
||||
|
||||
- `ledger_transactions`
|
||||
- `transactions`
|
||||
- `ledger_hashes`
|
||||
- `ledger_range`
|
||||
- `objects`
|
||||
- `ledgers`
|
||||
- `diff`
|
||||
- `account_tx`
|
||||
- `successor`
|
||||
|
||||
Their schemas and how they work are detailed in the following sections.
|
||||
|
||||
> **Note**: If you would like visually explore the data structure of the Cassandra database, run the Clio server with the database `type` configured as `cassandra` to fill ledger data from the `rippled` nodes into Cassandra. Then, use a GUI database management tool like [Datastax's Opcenter](https://docs.datastax.com/en/install/6.0/install/opscInstallOpsc.html) to interactively view it.
|
||||
|
||||
### ledger_transactions
|
||||
|
||||
```
|
||||
CREATE TABLE clio.ledger_transactions (
|
||||
ledger_sequence bigint, # The sequence number of the ledger version
|
||||
hash blob, # Hash of all the transactions on this ledger version
|
||||
PRIMARY KEY (ledger_sequence, hash)
|
||||
) WITH CLUSTERING ORDER BY (hash ASC) ...
|
||||
```
|
||||
This table stores the hashes of all transactions in a given ledger sequence ordered by the hash value in ascending order.
|
||||
```
|
||||
|
||||
This table stores the hashes of all transactions in a given ledger sequence and is sorted by the hash value in ascending order.
|
||||
|
||||
### transactions
|
||||
|
||||
```
|
||||
CREATE TABLE clio.transactions (
|
||||
hash blob PRIMARY KEY, # The transaction hash
|
||||
@@ -45,29 +78,36 @@ CREATE TABLE clio.transactions (
|
||||
metadata blob, # Metadata of the transaction
|
||||
transaction blob # Data of the transaction
|
||||
) ...
|
||||
```
|
||||
```
|
||||
|
||||
This table stores the full transaction and metadata of each ledger version with the transaction hash as the primary key.
|
||||
|
||||
To look up all the transactions that were validated in a ledger version with sequence `n`, one can first get the all the transaction hashes in that ledger version by querying `SELECT * FROM ledger_transactions WHERE ledger_sequence = n;`. Then, iterate through the list of hashes and query `SELECT * FROM transactions WHERE hash = one_of_the_hash_from_the_list;` to get the detailed transaction data.
|
||||
To lookup all the transactions that were validated in a ledger version with sequence `n`, first get the all the transaction hashes in that ledger version by querying `SELECT * FROM ledger_transactions WHERE ledger_sequence = n;`. Then, iterate through the list of hashes and query `SELECT * FROM transactions WHERE hash = one_of_the_hash_from_the_list;` to get the detailed transaction data.
|
||||
|
||||
### ledger_hashes
|
||||
|
||||
```
|
||||
CREATE TABLE clio.ledger_hashes (
|
||||
hash blob PRIMARY KEY, # Hash of entire ledger version's data
|
||||
sequence bigint # The sequence of the ledger version
|
||||
) ...
|
||||
```
|
||||
```
|
||||
|
||||
This table stores the hash of all ledger versions by their sequences.
|
||||
|
||||
### ledger_range
|
||||
|
||||
```
|
||||
CREATE TABLE clio.ledger_range (
|
||||
is_latest boolean PRIMARY KEY, # Whether this sequence is the stopping range
|
||||
sequence bigint # The sequence number of the starting/stopping range
|
||||
) ...
|
||||
```
|
||||
This table marks the range of ledger versions that is stored on this specific Cassandra node. Because of its nature, there are only two records in this table with `false` and `true` values for `is_latest`, marking the starting and ending sequence of the ledger range.
|
||||
```
|
||||
|
||||
This table marks the range of ledger versions that is stored on this specific Cassandra node. Because of its nature, there are only two records in this table with `false` and `true` values for `is_latest`, marking the starting and ending sequence of the ledger range.
|
||||
|
||||
### objects
|
||||
|
||||
```
|
||||
CREATE TABLE clio.objects (
|
||||
key blob, # Object index of the object
|
||||
@@ -75,31 +115,37 @@ CREATE TABLE clio.objects (
|
||||
object blob, # Data of the object
|
||||
PRIMARY KEY (key, sequence)
|
||||
) WITH CLUSTERING ORDER BY (sequence DESC) ...
|
||||
```
|
||||
This table stores the specific data of all objects that ever existed on the XRP network, even if they are deleted (which is represented with a special `0x` value). The records are ordered by descending sequence, where the newest validated ledger objects are at the top.
|
||||
```
|
||||
|
||||
This table is updated when all data for a given ledger sequence has been written to the various tables in the database. For each ledger, many associated records are written to different tables. This table is used as a synchronization mechanism, to prevent the application from reading data from a ledger for which all data has not yet been fully written.
|
||||
The `objects` table stores the specific data of all objects that ever existed on the XRP network, even if they are deleted (which is represented with a special `0x` value). The records are ordered by descending sequence, where the newest validated ledger objects are at the top.
|
||||
|
||||
The table is updated when all data for a given ledger sequence has been written to the various tables in the database. For each ledger, many associated records are written to different tables. This table is used as a synchronization mechanism, to prevent the application from reading data from a ledger for which all data has not yet been fully written.
|
||||
|
||||
### ledgers
|
||||
|
||||
```
|
||||
CREATE TABLE clio.ledgers (
|
||||
sequence bigint PRIMARY KEY, # Sequence of the ledger version
|
||||
header blob # Data of the header
|
||||
) ...
|
||||
```
|
||||
```
|
||||
|
||||
This table stores the ledger header data of specific ledger versions by their sequence.
|
||||
|
||||
### diff
|
||||
|
||||
```
|
||||
CREATE TABLE clio.diff (
|
||||
seq bigint, # Sequence of the ledger version
|
||||
key blob, # Hash of changes in the ledger version
|
||||
PRIMARY KEY (seq, key)
|
||||
) WITH CLUSTERING ORDER BY (key ASC) ...
|
||||
```
|
||||
```
|
||||
|
||||
This table stores the object index of all the changes in each ledger version.
|
||||
|
||||
### account_tx
|
||||
|
||||
```
|
||||
CREATE TABLE clio.account_tx (
|
||||
account blob,
|
||||
@@ -107,10 +153,12 @@ CREATE TABLE clio.account_tx (
|
||||
hash blob, # Hash of the transaction
|
||||
PRIMARY KEY (account, seq_idx)
|
||||
) WITH CLUSTERING ORDER BY (seq_idx DESC) ...
|
||||
```
|
||||
```
|
||||
|
||||
This table stores the list of transactions affecting a given account. This includes transactions made by the account, as well as transactions received.
|
||||
|
||||
### successor
|
||||
|
||||
```
|
||||
CREATE TABLE clio.successor (
|
||||
key blob, # Object index
|
||||
@@ -118,30 +166,35 @@ CREATE TABLE clio.successor (
|
||||
next blob, # Index of the next object that existed in this sequence
|
||||
PRIMARY KEY (key, seq)
|
||||
) WITH CLUSTERING ORDER BY (seq ASC) ...
|
||||
```
|
||||
This table is the important backbone of how histories of ledger objects are stored in Cassandra. The successor table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was upated on. Due to the unique nature of the table with each key being ordered by the sequence, by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger object at that ledger sequence. The special value of `0x00...00` and `0xFF...FF` are used to label the head and tail of the Linked List in the successor table. The diagram below showcases how tracing through the same table but with different sequence parameter filtering can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`.
|
||||
```
|
||||
|
||||
This table is the important backbone of how histories of ledger objects are stored in Cassandra. The `successor` table stores the object index of all ledger objects that were validated on the XRP network along with the ledger sequence that the object was updated on.
|
||||
|
||||
As each key is ordered by the sequence, which is achieved by tracing through the table with a specific sequence number, Clio can recreate a Linked List data structure that represents all the existing ledger objects at that ledger sequence. The special values of `0x00...00` and `0xFF...FF` are used to label the *head* and *tail* of the Linked List in the successor table.
|
||||
|
||||
The diagram below showcases how tracing through the same table, but with different sequence parameter filtering, can result in different Linked List data representing the corresponding past state of the ledger objects. A query like `SELECT * FROM successor WHERE key = ? AND seq <= n ORDER BY seq DESC LIMIT 1;` can effectively trace through the successor table and get the Linked List of a specific sequence `n`.
|
||||
|
||||

|
||||
*P.S.: The `diff` is `(DELETE 0x00...02, CREATE 0x00...03)` for `seq=1001` and `(CREATE 0x00...04)` for `seq=1002`, which is both accurately reflected with the Linked List trace*
|
||||
|
||||
In each new ledger version with sequence `n`, a ledger object `v` can either be **created**, **modified**, or **deleted**. For all three of these operations, the procedure to update the successor table can be broken down in to two steps:
|
||||
1. Trace through the Linked List of the previous sequence to to find the ledger object `e` with the greatest object index smaller or equal than the `v`'s index. Save `e`'s `next` value (the index of the next ledger object) as `w`.
|
||||
> **Note**: The `diff` is `(DELETE 0x00...02, CREATE 0x00...03)` for `seq=1001` and `(CREATE 0x00...04)` for `seq=1002`, which is both accurately reflected with the Linked List trace.
|
||||
|
||||
In each new ledger version with sequence `n`, a ledger object `v` can either be **created**, **modified**, or **deleted**.
|
||||
|
||||
For all three of these operations, the procedure to update the successor table can be broken down into two steps:
|
||||
|
||||
1. Trace through the Linked List of the previous sequence to find the ledger object `e` with the greatest object index smaller or equal than the `v`'s index. Save `e`'s `next` value (the index of the next ledger object) as `w`.
|
||||
|
||||
2. If `v` is...
|
||||
1. Being **created**, add two new records of `seq=n` with one being `e` pointing to `v`, and `v` pointing to `w` (Linked List insertion operation).
|
||||
2. Being **modified**, do nothing.
|
||||
3. Being **deleted**, add a record of `seq=n` with `e` pointing to `v`'s `next` value (Linked List deletion operation).
|
||||
|
||||
### NFT data model
|
||||
In `rippled` NFTs are stored in NFTokenPage ledger objects. This object is
|
||||
implemented to save ledger space and has the property that it gives us O(1)
|
||||
lookup time for an NFT, assuming we know who owns the NFT at a particular
|
||||
ledger. However, if we do not know who owns the NFT at a specific ledger
|
||||
height we have no alternative in rippled other than scanning the entire
|
||||
ledger. Because of this tradeoff, clio implements a special NFT indexing data
|
||||
structure that allows clio users to query NFTs quickly, while keeping
|
||||
rippled's space-saving optimizations.
|
||||
## NFT data model
|
||||
|
||||
In `rippled` NFTs are stored in `NFTokenPage` ledger objects. This object is implemented to save ledger space and has the property that it gives us O(1) lookup time for an NFT, assuming we know who owns the NFT at a particular ledger. However, if we do not know who owns the NFT at a specific ledger height we have no alternative but to scan the entire ledger in `rippled`. Because of this tradeoff, Clio implements a special NFT indexing data structure that allows Clio users to query NFTs quickly, while keeping rippled's space-saving optimizations.
|
||||
|
||||
### nf_tokens
|
||||
|
||||
#### nf_tokens
|
||||
```
|
||||
CREATE TABLE clio.nf_tokens (
|
||||
token_id blob, # The NFT's ID
|
||||
@@ -151,21 +204,21 @@ CREATE TABLE clio.nf_tokens (
|
||||
PRIMARY KEY (token_id, sequence)
|
||||
) WITH CLUSTERING ORDER BY (sequence DESC) ...
|
||||
```
|
||||
This table indexes NFT IDs with their owner at a given ledger. So
|
||||
|
||||
This table indexes NFT IDs with their owner at a given ledger.
|
||||
|
||||
The example query below shows how you could search for the owner of token `N` at ledger `Y` and see whether the token was burned.
|
||||
|
||||
```
|
||||
SELECT * FROM nf_tokens
|
||||
WHERE token_id = N AND seq <= Y
|
||||
ORDER BY seq DESC LIMIT 1;
|
||||
```
|
||||
will give you the owner of token N at ledger Y and whether it was burned. If
|
||||
the token is burned, the owner field indicates the account that owned the
|
||||
token at the time it was burned; it does not indicate the person who burned
|
||||
the token, necessarily. If you need to determine who burned the token you can
|
||||
use the `nft_history` API, which will give you the NFTokenBurn transaction
|
||||
that burned this token, along with the account that submitted that
|
||||
transaction.
|
||||
|
||||
#### issuer_nf_tokens_v2
|
||||
If the token is burned, the owner field indicates the account that owned the token at the time it was burned; it does not indicate the person who burned the token, necessarily. If you need to determine who burned the token you can use the `nft_history` API, which will give you the `NFTokenBurn` transaction that burned this token, along with the account that submitted that transaction.
|
||||
|
||||
### issuer_nf_tokens_v2
|
||||
|
||||
```
|
||||
CREATE TABLE clio.issuer_nf_tokens_v2 (
|
||||
issuer blob, # The NFT issuer's account ID
|
||||
@@ -174,13 +227,12 @@ CREATE TABLE clio.issuer_nf_tokens_v2 (
|
||||
PRIMARY KEY (issuer, taxon, token_id)
|
||||
) WITH CLUSTERING ORDER BY (taxon ASC, token_id ASC) ...
|
||||
```
|
||||
This table indexes token IDs against their issuer and issuer/taxon
|
||||
combination. This is useful for determining all the NFTs a specific account
|
||||
issued, or all the NFTs a specific account issued with a specific taxon. It is
|
||||
not useful to know all the NFTs with a given taxon while excluding issuer, since the
|
||||
meaning of a taxon is left to an issuer.
|
||||
|
||||
#### nf_token_uris
|
||||
This table indexes token IDs against their issuer and issuer/taxon
|
||||
combination. This is useful for determining all the NFTs a specific account issued, or all the NFTs a specific account issued with a specific taxon. It is not useful to know all the NFTs with a given taxon while excluding issuer, since the meaning of a taxon is left to an issuer.
|
||||
|
||||
### nf_token_uris
|
||||
|
||||
```
|
||||
CREATE TABLE clio.nf_token_uris (
|
||||
token_id blob, # The NFT's ID
|
||||
@@ -189,23 +241,17 @@ CREATE TABLE clio.nf_token_uris (
|
||||
PRIMARY KEY (token_id, sequence)
|
||||
) WITH CLUSTERING ORDER BY (sequence DESC) ...
|
||||
```
|
||||
This table is used to store an NFT's URI. Without storing this here, we would
|
||||
need to traverse the NFT owner's entire set of NFTs to find the URI, again due
|
||||
to the way that NFTs are stored in rippled. Furthermore, instead of storing
|
||||
this in the `nf_tokens` table, we store it here to save space. A given NFT
|
||||
will have only one entry in this table (see caveat below), written to this
|
||||
table as soon as clio sees the NFTokenMint transaction, or when clio loads an
|
||||
NFTokenPage from the initial ledger it downloaded. However, the `nf_tokens`
|
||||
table is written to every time an NFT changes ownership, or if it is burned.
|
||||
|
||||
Given this, why do we have to store the sequence? Unfortunately there is an
|
||||
extreme edge case where a given NFT ID can be burned, and then re-minted with
|
||||
a different URI. This is extremely unlikely, and might be fixed in a future
|
||||
version to rippled, but just in case we can handle that edge case by allowing
|
||||
a given NFT ID to have a new URI assigned in this case, without removing the
|
||||
prior URI.
|
||||
This table is used to store an NFT's URI. Without storing this here, we would need to traverse the NFT owner's entire set of NFTs to find the URI, again due to the way that NFTs are stored in `rippled`. Furthermore, instead of storing this in the `nf_tokens` table, we store it here to save space.
|
||||
|
||||
A given NFT will have only one entry in this table (see caveat below), and will be written to this table as soon as Clio sees the `NFTokenMint` transaction, or when Clio loads an `NFTokenPage` from the initial ledger it downloaded. However, the `nf_tokens` table is written to every time an NFT changes ownership, or if it is burned.
|
||||
|
||||
> **Why do we have to store the sequence?**
|
||||
>
|
||||
> Unfortunately there is an extreme edge case where a given NFT ID can be burned, and then re-minted with a different URI. This is extremely unlikely, and might be fixed in a future version of `rippled`. Currently, Clio handles this edge case by allowing the NFT ID to have a new URI assigned, without removing the prior URI.
|
||||
|
||||
### nf_token_transactions
|
||||
|
||||
#### nf_token_transactions
|
||||
```
|
||||
CREATE TABLE clio.nf_token_transactions (
|
||||
token_id blob, # The NFT's ID
|
||||
@@ -214,7 +260,5 @@ CREATE TABLE clio.nf_token_transactions (
|
||||
PRIMARY KEY (token_id, seq_idx)
|
||||
) WITH CLUSTERING ORDER BY (seq_idx DESC) ...
|
||||
```
|
||||
This table is the NFT equivalent of `account_tx`. It's motivated by the exact
|
||||
same reasons and serves the analogous purpose here. It drives the
|
||||
`nft_history` API.
|
||||
|
||||
The `nf_token_transactions` table serves as the NFT counterpart to `account_tx`, inspired by the same motivations and fulfilling a similar role within this context. It drives the `nft_history` API.
|
||||
|
||||
@@ -41,21 +41,10 @@ namespace data::cassandra {
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeSettingsProvider = requires(T a) {
|
||||
{
|
||||
a.getSettings()
|
||||
} -> std::same_as<Settings>;
|
||||
{
|
||||
a.getKeyspace()
|
||||
} -> std::same_as<std::string>;
|
||||
{
|
||||
a.getTablePrefix()
|
||||
} -> std::same_as<std::optional<std::string>>;
|
||||
{
|
||||
a.getReplicationFactor()
|
||||
} -> std::same_as<uint16_t>;
|
||||
{
|
||||
a.getTtl()
|
||||
} -> std::same_as<uint16_t>;
|
||||
{ a.getSettings() } -> std::same_as<Settings>;
|
||||
{ a.getKeyspace() } -> std::same_as<std::string>;
|
||||
{ a.getTablePrefix() } -> std::same_as<std::optional<std::string>>;
|
||||
{ a.getReplicationFactor() } -> std::same_as<uint16_t>;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -71,42 +60,18 @@ concept SomeExecutionStrategy = requires(
|
||||
PreparedStatement prepared,
|
||||
boost::asio::yield_context token
|
||||
) {
|
||||
{
|
||||
T(settings, handle)
|
||||
};
|
||||
{
|
||||
a.sync()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.isTooBusy()
|
||||
} -> std::same_as<bool>;
|
||||
{
|
||||
a.writeSync(statement)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.writeSync(prepared)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.write(prepared)
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.write(std::move(statements))
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.read(token, prepared)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.read(token, statement)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.read(token, statements)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.readEach(token, statements)
|
||||
} -> std::same_as<std::vector<Result>>;
|
||||
{
|
||||
a.stats()
|
||||
} -> std::same_as<boost::json::object>;
|
||||
{ T(settings, handle) };
|
||||
{ a.sync() } -> std::same_as<void>;
|
||||
{ a.isTooBusy() } -> std::same_as<bool>;
|
||||
{ a.writeSync(statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.writeSync(prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.write(prepared) } -> std::same_as<void>;
|
||||
{ a.write(std::move(statements)) } -> std::same_as<void>;
|
||||
{ a.read(token, prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statements) } -> std::same_as<ResultOrError>;
|
||||
{ a.readEach(token, statements) } -> std::same_as<std::vector<Result>>;
|
||||
{ a.stats() } -> std::same_as<boost::json::object>;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -114,12 +79,8 @@ concept SomeExecutionStrategy = requires(
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
|
||||
{
|
||||
T(ioc)
|
||||
};
|
||||
{
|
||||
a.shouldRetry(err)
|
||||
} -> std::same_as<bool>;
|
||||
{ T(ioc) };
|
||||
{ a.shouldRetry(err) } -> std::same_as<bool>;
|
||||
{
|
||||
a.retry([]() {})
|
||||
} -> std::same_as<void>;
|
||||
|
||||
@@ -98,10 +98,8 @@ public:
|
||||
PRIMARY KEY (key, sequence)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (sequence DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -114,10 +112,8 @@ public:
|
||||
transaction blob,
|
||||
metadata blob
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -128,10 +124,8 @@ public:
|
||||
hash blob,
|
||||
PRIMARY KEY (ledger_sequence, hash)
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -143,10 +137,8 @@ public:
|
||||
next blob,
|
||||
PRIMARY KEY (key, seq)
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -157,10 +149,8 @@ public:
|
||||
key blob,
|
||||
PRIMARY KEY (seq, key)
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -173,10 +163,8 @@ public:
|
||||
PRIMARY KEY (account, seq_idx)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (seq_idx DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -186,10 +174,8 @@ public:
|
||||
sequence bigint PRIMARY KEY,
|
||||
header blob
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -199,10 +185,8 @@ public:
|
||||
hash blob PRIMARY KEY,
|
||||
sequence bigint
|
||||
)
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -227,10 +211,8 @@ public:
|
||||
PRIMARY KEY (token_id, sequence)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (sequence DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -243,10 +225,8 @@ public:
|
||||
PRIMARY KEY (issuer, taxon, token_id)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (taxon ASC, token_id ASC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -259,10 +239,8 @@ public:
|
||||
PRIMARY KEY (token_id, sequence)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (sequence DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
@@ -275,10 +253,8 @@ public:
|
||||
PRIMARY KEY (token_id, seq_idx)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (seq_idx DESC)
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"),
|
||||
settingsProvider_.get().getTtl()
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
|
||||
return statements;
|
||||
@@ -586,6 +562,32 @@ public:
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountFromBegining = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > 0
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountFromToken = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > token(?)
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTxForward = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
|
||||
@@ -68,12 +68,11 @@ tag_invoke(boost::json::value_to_tag<Settings::SecureConnectionBundle>, boost::j
|
||||
}
|
||||
} // namespace impl
|
||||
|
||||
SettingsProvider::SettingsProvider(util::Config const& cfg, uint16_t ttl)
|
||||
SettingsProvider::SettingsProvider(util::Config const& cfg)
|
||||
: config_{cfg}
|
||||
, keyspace_{cfg.valueOr<std::string>("keyspace", "clio")}
|
||||
, tablePrefix_{cfg.maybeValue<std::string>("table_prefix")}
|
||||
, replicationFactor_{cfg.valueOr<uint16_t>("replication_factor", 3)}
|
||||
, ttl_{ttl}
|
||||
, settings_{parseSettings()}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -39,7 +39,6 @@ class SettingsProvider {
|
||||
std::string keyspace_;
|
||||
std::optional<std::string> tablePrefix_;
|
||||
uint16_t replicationFactor_;
|
||||
uint16_t ttl_;
|
||||
Settings settings_;
|
||||
|
||||
public:
|
||||
@@ -47,9 +46,8 @@ public:
|
||||
* @brief Create a settings provider from the specified config.
|
||||
*
|
||||
* @param cfg The config of Clio to use
|
||||
* @param ttl Time to live setting
|
||||
*/
|
||||
explicit SettingsProvider(util::Config const& cfg, uint16_t ttl = 0);
|
||||
explicit SettingsProvider(util::Config const& cfg);
|
||||
|
||||
/**
|
||||
* @return The cluster settings
|
||||
@@ -60,7 +58,7 @@ public:
|
||||
/**
|
||||
* @return The specified keyspace
|
||||
*/
|
||||
[[nodiscard]] inline std::string
|
||||
[[nodiscard]] std::string
|
||||
getKeyspace() const
|
||||
{
|
||||
return keyspace_;
|
||||
@@ -69,7 +67,7 @@ public:
|
||||
/**
|
||||
* @return The optional table prefix to use in all queries
|
||||
*/
|
||||
[[nodiscard]] inline std::optional<std::string>
|
||||
[[nodiscard]] std::optional<std::string>
|
||||
getTablePrefix() const
|
||||
{
|
||||
return tablePrefix_;
|
||||
@@ -78,21 +76,12 @@ public:
|
||||
/**
|
||||
* @return The replication factor
|
||||
*/
|
||||
[[nodiscard]] inline uint16_t
|
||||
[[nodiscard]] uint16_t
|
||||
getReplicationFactor() const
|
||||
{
|
||||
return replicationFactor_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The default time to live to use in all `create` queries
|
||||
*/
|
||||
[[nodiscard]] inline uint16_t
|
||||
getTtl() const
|
||||
{
|
||||
return ttl_;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] std::optional<std::string>
|
||||
parseOptionalCertificate() const;
|
||||
|
||||
@@ -19,9 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/Expected.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
@@ -59,8 +58,8 @@ struct Limit {
|
||||
class Handle;
|
||||
class CassandraError;
|
||||
|
||||
using MaybeError = util::Expected<void, CassandraError>;
|
||||
using ResultOrError = util::Expected<Result, CassandraError>;
|
||||
using Error = util::Unexpected<CassandraError>;
|
||||
using MaybeError = std::expected<void, CassandraError>;
|
||||
using ResultOrError = std::expected<Result, CassandraError>;
|
||||
using Error = std::unexpected<CassandraError>;
|
||||
|
||||
} // namespace data::cassandra
|
||||
|
||||
@@ -23,10 +23,10 @@
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/Statement.hpp"
|
||||
#include "util/Expected.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <expected>
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ struct Settings {
|
||||
*/
|
||||
struct ContactPoints {
|
||||
std::string contactPoints = "127.0.0.1"; // defaults to localhost
|
||||
std::optional<uint16_t> port = {};
|
||||
std::optional<uint16_t> port;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -87,16 +87,16 @@ struct Settings {
|
||||
std::size_t writeBatchSize = DEFAULT_BATCH_SIZE;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO{};
|
||||
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
/** @brief SSL certificate */
|
||||
std::optional<std::string> certificate{}; // ssl context
|
||||
std::optional<std::string> certificate = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
/** @brief Username/login */
|
||||
std::optional<std::string> username{};
|
||||
std::optional<std::string> username = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
/** @brief Password to match the `username` */
|
||||
std::optional<std::string> password{};
|
||||
std::optional<std::string> password = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
/**
|
||||
* @brief Creates a new Settings object as a copy of the current one with overridden contact points.
|
||||
@@ -105,7 +105,7 @@ struct Settings {
|
||||
withContactPoints(std::string_view contactPoints)
|
||||
{
|
||||
auto tmp = *this;
|
||||
tmp.connectionInfo = ContactPoints{std::string{contactPoints}};
|
||||
tmp.connectionInfo = ContactPoints{.contactPoints = std::string{contactPoints}, .port = std::nullopt};
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
||||
@@ -121,7 +121,8 @@ public:
|
||||
// reinterpret_cast is needed here :'(
|
||||
auto const rc = bindBytes(reinterpret_cast<unsigned char const*>(value.data()), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as bytes)");
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType> || std::is_same_v<DecayedType, UintByteTupleType>) {
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType> ||
|
||||
std::is_same_v<DecayedType, UintByteTupleType>) {
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::forward<Type>(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32> or <uint32_t, ripple::uint256>");
|
||||
} else if constexpr (std::is_same_v<DecayedType, ByteVectorType>) {
|
||||
|
||||
@@ -2,11 +2,13 @@ add_library(clio_etl)
|
||||
|
||||
target_sources(
|
||||
clio_etl
|
||||
PRIVATE NFTHelpers.cpp
|
||||
PRIVATE CacheLoaderSettings.cpp
|
||||
ETLHelpers.cpp
|
||||
ETLService.cpp
|
||||
ETLState.cpp
|
||||
LoadBalancer.cpp
|
||||
CacheLoaderSettings.cpp
|
||||
NetworkValidatedLedgers.cpp
|
||||
NFTHelpers.cpp
|
||||
Source.cpp
|
||||
impl/ForwardingCache.cpp
|
||||
impl/ForwardingSource.cpp
|
||||
|
||||
@@ -22,6 +22,9 @@
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/CacheLoaderSettings.hpp"
|
||||
#include "etl/impl/CacheLoader.hpp"
|
||||
#include "etl/impl/CursorFromAccountProvider.hpp"
|
||||
#include "etl/impl/CursorFromDiffProvider.hpp"
|
||||
#include "etl/impl/CursorFromFixDiffNumProvider.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/async/context/BasicExecutionContext.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -41,10 +44,7 @@ namespace etl {
|
||||
* @tparam CursorProviderType The type of the cursor provider to use
|
||||
* @tparam ExecutionContextType The type of the execution context to use
|
||||
*/
|
||||
template <
|
||||
typename CacheType,
|
||||
typename CursorProviderType = impl::CursorProvider,
|
||||
typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
template <typename CacheType, typename ExecutionContextType = util::async::CoroExecutionContext>
|
||||
class CacheLoader {
|
||||
using CacheLoaderType = impl::CacheLoaderImpl<CacheType>;
|
||||
|
||||
@@ -88,7 +88,22 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
auto const provider = CursorProviderType{backend_, settings_.numCacheDiffs};
|
||||
std::shared_ptr<impl::BaseCursorProvider> provider;
|
||||
if (settings_.numCacheCursorsFromDiff != 0) {
|
||||
LOG(log_.info()) << "Loading cache with cursor from num_cursors_from_diff="
|
||||
<< settings_.numCacheCursorsFromDiff;
|
||||
provider = std::make_shared<impl::CursorFromDiffProvider>(backend_, settings_.numCacheCursorsFromDiff);
|
||||
} else if (settings_.numCacheCursorsFromAccount != 0) {
|
||||
LOG(log_.info()) << "Loading cache with cursor from num_cursors_from_account="
|
||||
<< settings_.numCacheCursorsFromAccount;
|
||||
provider = std::make_shared<impl::CursorFromAccountProvider>(
|
||||
backend_, settings_.numCacheCursorsFromAccount, settings_.cachePageFetchSize
|
||||
);
|
||||
} else {
|
||||
LOG(log_.info()) << "Loading cache with cursor from num_diffs=" << settings_.numCacheDiffs;
|
||||
provider = std::make_shared<impl::CursorFromFixDiffNumProvider>(backend_, settings_.numCacheDiffs);
|
||||
}
|
||||
|
||||
loader_ = std::make_unique<CacheLoaderType>(
|
||||
ctx_,
|
||||
backend_,
|
||||
@@ -96,7 +111,7 @@ public:
|
||||
seq,
|
||||
settings_.numCacheMarkers,
|
||||
settings_.cachePageFetchSize,
|
||||
provider.getCursors(seq)
|
||||
provider->getCursors(seq)
|
||||
);
|
||||
|
||||
if (settings_.isSync()) {
|
||||
|
||||
@@ -53,7 +53,13 @@ make_CacheLoaderSettings(util::Config const& config)
|
||||
settings.numThreads = config.valueOr("io_threads", settings.numThreads);
|
||||
if (config.contains("cache")) {
|
||||
auto const cache = config.section("cache");
|
||||
// Given diff number to generate cursors
|
||||
settings.numCacheDiffs = cache.valueOr<size_t>("num_diffs", settings.numCacheDiffs);
|
||||
// Given cursors number fetching from diff
|
||||
settings.numCacheCursorsFromDiff = cache.valueOr<size_t>("num_cursors_from_diff", 0);
|
||||
// Given cursors number fetching from account
|
||||
settings.numCacheCursorsFromAccount = cache.valueOr<size_t>("num_cursors_from_account", 0);
|
||||
|
||||
settings.numCacheMarkers = cache.valueOr<size_t>("num_markers", settings.numCacheMarkers);
|
||||
settings.cachePageFetchSize = cache.valueOr<size_t>("page_fetch_size", settings.cachePageFetchSize);
|
||||
|
||||
|
||||
@@ -32,10 +32,12 @@ struct CacheLoaderSettings {
|
||||
/** @brief Ways to load the cache */
|
||||
enum class LoadStyle { ASYNC, SYNC, NONE };
|
||||
|
||||
size_t numCacheDiffs = 32; /**< number of diffs to use to generate cursors */
|
||||
size_t numCacheMarkers = 48; /**< number of markers to use at one time to traverse the ledger */
|
||||
size_t cachePageFetchSize = 512; /**< number of ledger objects to fetch concurrently per marker */
|
||||
size_t numThreads = 2; /**< number of threads to use for loading cache */
|
||||
size_t numCacheDiffs = 32; /**< number of diffs to use to generate cursors */
|
||||
size_t numCacheMarkers = 48; /**< number of markers to use at one time to traverse the ledger */
|
||||
size_t cachePageFetchSize = 512; /**< number of ledger objects to fetch concurrently per marker */
|
||||
size_t numThreads = 2; /**< number of threads to use for loading cache */
|
||||
size_t numCacheCursorsFromDiff = 0; /**< number of cursors to fetch from diff */
|
||||
size_t numCacheCursorsFromAccount = 0; /**< number of cursors to fetch from account_tx */
|
||||
|
||||
LoadStyle loadStyle = LoadStyle::ASYNC; /**< how to load the cache */
|
||||
|
||||
|
||||
67
src/etl/CorruptionDetector.hpp
Normal file
67
src/etl/CorruptionDetector.hpp
Normal file
@@ -0,0 +1,67 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <functional>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief A helper to notify Clio operator about a corruption in the DB
|
||||
*
|
||||
* @tparam CacheType The type of the cache to disable on corruption
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class CorruptionDetector {
|
||||
std::reference_wrapper<SystemState> state_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Corruption Detector object
|
||||
*
|
||||
* @param state The system state
|
||||
* @param cache The cache to disable on corruption
|
||||
*/
|
||||
CorruptionDetector(SystemState& state, CacheType& cache) : state_{std::ref(state)}, cache_{std::ref(cache)}
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Notify the operator about a corruption in the DB.
|
||||
*/
|
||||
void
|
||||
onCorruptionDetected()
|
||||
{
|
||||
if (not state_.get().isCorruptionDetected) {
|
||||
state_.get().isCorruptionDetected = true;
|
||||
|
||||
LOG(log_.error()) << "Disabling the cache to avoid corrupting the DB further. Please investigate.";
|
||||
cache_.get().setDisabled();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
46
src/etl/ETLHelpers.cpp
Normal file
46
src/etl/ETLHelpers.cpp
Normal file
@@ -0,0 +1,46 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
std::vector<ripple::uint256>
|
||||
getMarkers(size_t numMarkers)
|
||||
{
|
||||
ASSERT(numMarkers <= 256, "Number of markers must be <= 256. Got: {}", numMarkers);
|
||||
|
||||
unsigned char const incr = 256 / numMarkers;
|
||||
|
||||
std::vector<ripple::uint256> markers;
|
||||
markers.reserve(numMarkers);
|
||||
ripple::uint256 base{0};
|
||||
for (size_t i = 0; i < numMarkers; ++i) {
|
||||
markers.push_back(base);
|
||||
base.data()[0] += incr;
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
} // namespace etl
|
||||
@@ -20,11 +20,8 @@
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -35,83 +32,6 @@
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
/**
|
||||
* @brief This datastructure is used to keep track of the sequence of the most recent ledger validated by the network.
|
||||
*
|
||||
* There are two methods that will wait until certain conditions are met. This datastructure is able to be "stopped".
|
||||
* When the datastructure is stopped, any threads currently waiting are unblocked.
|
||||
* Any later calls to methods of this datastructure will not wait. Once the datastructure is stopped, the datastructure
|
||||
* remains stopped for the rest of its lifetime.
|
||||
*/
|
||||
class NetworkValidatedLedgers {
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
|
||||
mutable std::mutex m_;
|
||||
std::condition_variable cv_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief A factory function for NetworkValidatedLedgers
|
||||
*
|
||||
* @return A shared pointer to a new instance of NetworkValidatedLedgers
|
||||
*/
|
||||
static std::shared_ptr<NetworkValidatedLedgers>
|
||||
make_ValidatedLedgers()
|
||||
{
|
||||
return std::make_shared<NetworkValidatedLedgers>();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Notify the datastructure that idx has been validated by the network.
|
||||
*
|
||||
* @param idx Sequence validated by network
|
||||
*/
|
||||
void
|
||||
push(uint32_t idx)
|
||||
{
|
||||
std::lock_guard const lck(m_);
|
||||
if (!max_ || idx > *max_)
|
||||
max_ = idx;
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get most recently validated sequence.
|
||||
*
|
||||
* If no ledgers are known to have been validated, this function waits until the next ledger is validated
|
||||
*
|
||||
* @return Sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
getMostRecent()
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
cv_.wait(lck, [this]() { return max_; });
|
||||
return max_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Waits for the sequence to be validated by the network.
|
||||
*
|
||||
* @param sequence The sequence to wait for
|
||||
* @param maxWaitMs Maximum time to wait for the sequence to be validated. If empty, wait indefinitely
|
||||
* @return true if sequence was validated, false otherwise a return value of false means the datastructure has been
|
||||
* stopped
|
||||
*/
|
||||
bool
|
||||
waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs = {})
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
|
||||
if (maxWaitMs) {
|
||||
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
|
||||
} else {
|
||||
cv_.wait(lck, pred);
|
||||
}
|
||||
return pred();
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: does the note make sense? lockfree queues provide the same blocking behaviour just without mutex, don't they?
|
||||
/**
|
||||
@@ -228,20 +148,7 @@ public:
|
||||
* @param numMarkers Total markers to partition for
|
||||
* @return The markers
|
||||
*/
|
||||
inline std::vector<ripple::uint256>
|
||||
getMarkers(size_t numMarkers)
|
||||
{
|
||||
ASSERT(numMarkers <= 256, "Number of markers must be <= 256. Got: {}", numMarkers);
|
||||
std::vector<ripple::uint256>
|
||||
getMarkers(size_t numMarkers);
|
||||
|
||||
unsigned char const incr = 256 / numMarkers;
|
||||
|
||||
std::vector<ripple::uint256> markers;
|
||||
markers.reserve(numMarkers);
|
||||
ripple::uint256 base{0};
|
||||
for (size_t i = 0; i < numMarkers; ++i) {
|
||||
markers.push_back(base);
|
||||
base.data()[0] += incr;
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,6 +20,10 @@
|
||||
#include "etl/ETLService.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
@@ -130,7 +134,8 @@ ETLService::monitor()
|
||||
}
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
|
||||
return amendmentBlockHandler_.onAmendmentBlock();
|
||||
amendmentBlockHandler_.onAmendmentBlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (ledger) {
|
||||
@@ -150,8 +155,7 @@ ETLService::monitor()
|
||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||
uint32_t nextSequence = rng->maxSequence + 1;
|
||||
|
||||
LOG(log_.debug()) << "Database is populated. "
|
||||
<< "Starting monitor loop. sequence = " << nextSequence;
|
||||
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
||||
|
||||
while (not isStopping()) {
|
||||
nextSequence = publishNextSequence(nextSequence);
|
||||
@@ -204,7 +208,7 @@ ETLService::monitorReadOnly()
|
||||
|
||||
if (!rng) {
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent()) {
|
||||
return *net;
|
||||
return net;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -261,9 +265,9 @@ ETLService::ETLService(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
)
|
||||
: backend_(backend)
|
||||
, loadBalancer_(balancer)
|
||||
@@ -276,8 +280,11 @@ ETLService::ETLService(
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
state_.isReadOnly = config.valueOr("read_only", state_.isReadOnly);
|
||||
state_.isReadOnly = config.valueOr("read_only", static_cast<bool>(state_.isReadOnly));
|
||||
extractorThreads_ = config.valueOr<uint32_t>("extractor_threads", extractorThreads_);
|
||||
txnThreshold_ = config.valueOr<size_t>("txn_threshold", txnThreshold_);
|
||||
|
||||
// This should probably be done in the backend factory but we don't have state available until here
|
||||
backend_->setCorruptionDetector(CorruptionDetector<data::LedgerCache>{state_, backend->cache()});
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etl/impl/LedgerPublisher.hpp"
|
||||
#include "etl/impl/Transformer.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
@@ -77,16 +77,14 @@ namespace etl {
|
||||
*/
|
||||
class ETLService {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using SubscriptionManagerType = feed::SubscriptionManager;
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using NetworkValidatedLedgersType = NetworkValidatedLedgers;
|
||||
using DataPipeType = etl::impl::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheType = data::LedgerCache;
|
||||
using CacheLoaderType = etl::CacheLoader<CacheType>;
|
||||
using LedgerFetcherType = etl::impl::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = etl::impl::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
|
||||
using ExtractorType = etl::impl::Extractor<DataPipeType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::impl::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher<SubscriptionManagerType, CacheType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher<CacheType>;
|
||||
using AmendmentBlockHandlerType = etl::impl::AmendmentBlockHandler<>;
|
||||
using TransformerType =
|
||||
etl::impl::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
@@ -95,7 +93,7 @@ class ETLService {
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
std::shared_ptr<NetworkValidatedLedgersType> networkValidatedLedgers_;
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> networkValidatedLedgers_;
|
||||
|
||||
std::uint32_t extractorThreads_ = 1;
|
||||
std::thread worker_;
|
||||
@@ -128,9 +126,9 @@ public:
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
@@ -151,9 +149,9 @@ public:
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> ledgers
|
||||
)
|
||||
{
|
||||
auto etl = std::make_shared<ETLService>(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
@@ -201,6 +199,17 @@ public:
|
||||
return state_.isAmendmentBlocked;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether Clio detected DB corruptions.
|
||||
*
|
||||
* @return true if corruption of DB was detected and cache was stopped.
|
||||
*/
|
||||
bool
|
||||
isCorruptionDetected() const
|
||||
{
|
||||
return state_.isCorruptionDetected;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get state of ETL as a JSON object
|
||||
*
|
||||
@@ -212,8 +221,8 @@ public:
|
||||
boost::json::object result;
|
||||
|
||||
result["etl_sources"] = loadBalancer_->toJson();
|
||||
result["is_writer"] = state_.isWriting.load();
|
||||
result["read_only"] = state_.isReadOnly;
|
||||
result["is_writer"] = static_cast<int>(state_.isWriting);
|
||||
result["read_only"] = static_cast<int>(state_.isReadOnly);
|
||||
auto last = ledgerPublisher_.getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(ledgerPublisher_.lastPublishAgeSeconds());
|
||||
|
||||
@@ -27,21 +27,23 @@
|
||||
#include <ripple/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etl {
|
||||
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv)
|
||||
std::optional<ETLState>
|
||||
tag_invoke(boost::json::value_to_tag<std::optional<ETLState>>, boost::json::value const& jv)
|
||||
{
|
||||
ETLState state;
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
if (!jsonObject.contains(JS(error))) {
|
||||
if (jsonObject.contains(JS(result)) && jsonObject.at(JS(result)).as_object().contains(JS(info))) {
|
||||
auto const rippledInfo = jsonObject.at(JS(result)).as_object().at(JS(info)).as_object();
|
||||
if (rippledInfo.contains(JS(network_id)))
|
||||
state.networkID.emplace(boost::json::value_to<int64_t>(rippledInfo.at(JS(network_id))));
|
||||
}
|
||||
if (jsonObject.contains(JS(error)))
|
||||
return std::nullopt;
|
||||
|
||||
if (jsonObject.contains(JS(result)) && jsonObject.at(JS(result)).as_object().contains(JS(info))) {
|
||||
auto const rippledInfo = jsonObject.at(JS(result)).as_object().at(JS(info)).as_object();
|
||||
if (rippledInfo.contains(JS(network_id)))
|
||||
state.networkID.emplace(boost::json::value_to<int64_t>(rippledInfo.at(JS(network_id))));
|
||||
}
|
||||
|
||||
return state;
|
||||
|
||||
@@ -47,11 +47,11 @@ struct ETLState {
|
||||
fetchETLStateFromSource(Forward& source) noexcept
|
||||
{
|
||||
auto const serverInfoRippled = data::synchronous([&source](auto yield) {
|
||||
return source.forwardToRippled({{"command", "server_info"}}, std::nullopt, yield);
|
||||
return source.forwardToRippled({{"command", "server_info"}}, std::nullopt, {}, yield);
|
||||
});
|
||||
|
||||
if (serverInfoRippled)
|
||||
return boost::json::value_to<ETLState>(boost::json::value(*serverInfoRippled));
|
||||
return boost::json::value_to<std::optional<ETLState>>(boost::json::value(*serverInfoRippled));
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -63,7 +63,7 @@ struct ETLState {
|
||||
* @param jv The json value to convert
|
||||
* @return The ETLState
|
||||
*/
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv);
|
||||
std::optional<ETLState>
|
||||
tag_invoke(boost::json::value_to_tag<std::optional<ETLState>>, boost::json::value const& jv);
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,10 +20,11 @@
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Random.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
@@ -55,31 +56,34 @@ LoadBalancer::make_LoadBalancer(
|
||||
Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
SourceFactory sourceFactory
|
||||
)
|
||||
{
|
||||
return std::make_shared<LoadBalancer>(config, ioc, backend, subscriptions, validatedLedgers);
|
||||
return std::make_shared<LoadBalancer>(
|
||||
config, ioc, std::move(backend), std::move(subscriptions), std::move(validatedLedgers), std::move(sourceFactory)
|
||||
);
|
||||
}
|
||||
|
||||
LoadBalancer::LoadBalancer(
|
||||
Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
SourceFactory sourceFactory
|
||||
)
|
||||
{
|
||||
auto const forwardingCacheTimeout = config.valueOr<float>("forwarding_cache_timeout", 0.f);
|
||||
auto const forwardingCacheTimeout = config.valueOr<float>("forwarding.cache_timeout", 0.f);
|
||||
if (forwardingCacheTimeout > 0.f) {
|
||||
forwardingCache_ = impl::ForwardingCache{
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::duration<float>{forwardingCacheTimeout})
|
||||
};
|
||||
forwardingCache_ = impl::ForwardingCache{Config::toMilliseconds(forwardingCacheTimeout)};
|
||||
}
|
||||
|
||||
static constexpr std::uint32_t MAX_DOWNLOAD = 256;
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value) {
|
||||
downloadRanges_ = std::clamp(*value, 1u, MAX_DOWNLOAD);
|
||||
ASSERT(*value > 0 and *value <= MAX_DOWNLOAD, "'num_markers' value in config must be in range 1-256");
|
||||
downloadRanges_ = *value;
|
||||
} else if (backend->fetchLedgerRange()) {
|
||||
downloadRanges_ = 4;
|
||||
}
|
||||
@@ -95,30 +99,37 @@ LoadBalancer::LoadBalancer(
|
||||
}
|
||||
};
|
||||
|
||||
auto const forwardingTimeout = Config::toMilliseconds(config.valueOr<float>("forwarding.request_timeout", 10.));
|
||||
for (auto const& entry : config.array("etl_sources")) {
|
||||
auto source = make_Source(
|
||||
auto source = sourceFactory(
|
||||
entry,
|
||||
ioc,
|
||||
backend,
|
||||
subscriptions,
|
||||
validatedLedgers,
|
||||
forwardingTimeout,
|
||||
[this]() {
|
||||
if (not hasForwardingSource_)
|
||||
if (not hasForwardingSource_.lock().get())
|
||||
chooseForwardingSource();
|
||||
},
|
||||
[this]() { chooseForwardingSource(); },
|
||||
[this]() { forwardingCache_->invalidate(); }
|
||||
[this](bool wasForwarding) {
|
||||
if (wasForwarding)
|
||||
chooseForwardingSource();
|
||||
},
|
||||
[this]() {
|
||||
if (forwardingCache_.has_value())
|
||||
forwardingCache_->invalidate();
|
||||
}
|
||||
);
|
||||
|
||||
// checking etl node validity
|
||||
auto const stateOpt = ETLState::fetchETLStateFromSource(source);
|
||||
auto const stateOpt = ETLState::fetchETLStateFromSource(*source);
|
||||
|
||||
if (!stateOpt) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"Failed to fetch ETL state from source = {} Please check the configuration and network",
|
||||
source.toString()
|
||||
));
|
||||
} else if (etlState_ && etlState_->networkID && stateOpt->networkID && etlState_->networkID != stateOpt->networkID) {
|
||||
LOG(log_.warn()) << "Failed to fetch ETL state from source = " << source->toString()
|
||||
<< " Please check the configuration and network";
|
||||
} else if (etlState_ && etlState_->networkID && stateOpt->networkID &&
|
||||
etlState_->networkID != stateOpt->networkID) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"ETL sources must be on the same network. Source network id = {} does not match others network id = {}",
|
||||
*(stateOpt->networkID),
|
||||
@@ -129,16 +140,19 @@ LoadBalancer::LoadBalancer(
|
||||
}
|
||||
|
||||
sources_.push_back(std::move(source));
|
||||
LOG(log_.info()) << "Added etl source - " << sources_.back().toString();
|
||||
LOG(log_.info()) << "Added etl source - " << sources_.back()->toString();
|
||||
}
|
||||
|
||||
if (!etlState_)
|
||||
checkOnETLFailure("Failed to fetch ETL state from any source. Please check the configuration and network");
|
||||
|
||||
if (sources_.empty())
|
||||
checkOnETLFailure("No ETL sources configured. Please check the configuration");
|
||||
|
||||
// This is made separate from source creation to prevent UB in case one of the sources will call
|
||||
// chooseForwardingSource while we are still filling the sources_ vector
|
||||
for (auto& source : sources_) {
|
||||
source.run();
|
||||
for (auto const& source : sources_) {
|
||||
source->run();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,59 +161,64 @@ LoadBalancer::~LoadBalancer()
|
||||
sources_.clear();
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
|
||||
std::vector<std::string>
|
||||
LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly, std::chrono::steady_clock::duration retryAfter)
|
||||
{
|
||||
std::vector<std::string> response;
|
||||
auto const success = execute(
|
||||
execute(
|
||||
[this, &response, &sequence, cacheOnly](auto& source) {
|
||||
auto [data, res] = source.loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
|
||||
if (!res) {
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source.toString();
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
} else {
|
||||
response = std::move(data);
|
||||
}
|
||||
|
||||
return res;
|
||||
},
|
||||
sequence
|
||||
sequence,
|
||||
retryAfter
|
||||
);
|
||||
return {std::move(response), success};
|
||||
return response;
|
||||
}
|
||||
|
||||
LoadBalancer::OptionalGetLedgerResponseType
|
||||
LoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
LoadBalancer::fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects,
|
||||
bool getObjectNeighbors,
|
||||
std::chrono::steady_clock::duration retryAfter
|
||||
)
|
||||
{
|
||||
GetLedgerResponseType response;
|
||||
bool const success = execute(
|
||||
execute(
|
||||
[&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](auto& source) {
|
||||
auto [status, data] = source.fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
auto [status, data] = source->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
response = std::move(data);
|
||||
if (status.ok() && response.validated()) {
|
||||
LOG(log.info()) << "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source.toString();
|
||||
<< " from source = " << source->toString();
|
||||
return true;
|
||||
}
|
||||
|
||||
LOG(log.warn()) << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source.toString();
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
},
|
||||
ledgerSequence
|
||||
ledgerSequence,
|
||||
retryAfter
|
||||
);
|
||||
if (success) {
|
||||
return response;
|
||||
}
|
||||
return {};
|
||||
return response;
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
LoadBalancer::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
bool isAdmin,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
@@ -209,15 +228,16 @@ LoadBalancer::forwardToRippled(
|
||||
}
|
||||
}
|
||||
|
||||
std::size_t sourceIdx = 0;
|
||||
if (!sources_.empty())
|
||||
sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
|
||||
std::size_t sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0u;
|
||||
|
||||
auto xUserValue = isAdmin ? ADMIN_FORWARDING_X_USER_VALUE : USER_FORWARDING_X_USER_VALUE;
|
||||
|
||||
std::optional<boost::json::object> response;
|
||||
while (numAttempts < sources_.size()) {
|
||||
if (auto res = sources_[sourceIdx].forwardToRippled(request, clientIp, yield)) {
|
||||
if (auto res = sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield)) {
|
||||
response = std::move(res);
|
||||
break;
|
||||
}
|
||||
@@ -237,54 +257,51 @@ LoadBalancer::toJson() const
|
||||
{
|
||||
boost::json::array ret;
|
||||
for (auto& src : sources_)
|
||||
ret.push_back(src.toJson());
|
||||
ret.push_back(src->toJson());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
bool
|
||||
LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
void
|
||||
LoadBalancer::execute(Func f, uint32_t ledgerSequence, std::chrono::steady_clock::duration retryAfter)
|
||||
{
|
||||
std::size_t sourceIdx = 0;
|
||||
if (!sources_.empty())
|
||||
sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
ASSERT(not sources_.empty(), "ETL sources must be configured to execute functions.");
|
||||
size_t sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0;
|
||||
size_t numAttempts = 0;
|
||||
|
||||
while (true) {
|
||||
auto& source = sources_[sourceIdx];
|
||||
|
||||
LOG(log_.debug()) << "Attempting to execute func. ledger sequence = " << ledgerSequence
|
||||
<< " - source = " << source.toString();
|
||||
<< " - source = " << source->toString();
|
||||
// Originally, it was (source->hasLedger(ledgerSequence) || true)
|
||||
/* Sometimes rippled has ledger but doesn't actually know. However,
|
||||
but this does NOT happen in the normal case and is safe to remove
|
||||
This || true is only needed when loading full history standalone */
|
||||
if (source.hasLedger(ledgerSequence)) {
|
||||
if (source->hasLedger(ledgerSequence)) {
|
||||
bool const res = f(source);
|
||||
if (res) {
|
||||
LOG(log_.debug()) << "Successfully executed func at source = " << source.toString()
|
||||
LOG(log_.debug()) << "Successfully executed func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
break;
|
||||
}
|
||||
|
||||
LOG(log_.warn()) << "Failed to execute func at source = " << source.toString()
|
||||
LOG(log_.warn()) << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
} else {
|
||||
LOG(log_.warn()) << "Ledger not present at source = " << source.toString()
|
||||
LOG(log_.warn()) << "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
numAttempts++;
|
||||
if (numAttempts % sources_.size() == 0) {
|
||||
LOG(log_.info()) << "Ledger sequence " << ledgerSequence
|
||||
<< " is not yet available from any configured sources. "
|
||||
<< "Sleeping and trying again";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
<< " is not yet available from any configured sources. Sleeping and trying again";
|
||||
std::this_thread::sleep_for(retryAfter);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<ETLState>
|
||||
@@ -300,12 +317,15 @@ LoadBalancer::getETLState() noexcept
|
||||
void
|
||||
LoadBalancer::chooseForwardingSource()
|
||||
{
|
||||
hasForwardingSource_ = false;
|
||||
LOG(log_.info()) << "Choosing a new source to forward subscriptions";
|
||||
auto hasForwardingSourceLock = hasForwardingSource_.lock();
|
||||
hasForwardingSourceLock.get() = false;
|
||||
for (auto& source : sources_) {
|
||||
if (source.isConnected()) {
|
||||
source.setForwarding(true);
|
||||
hasForwardingSource_ = true;
|
||||
return;
|
||||
if (not hasForwardingSourceLock.get() and source->isConnected()) {
|
||||
source->setForwarding(true);
|
||||
hasForwardingSourceLock.get() = true;
|
||||
} else {
|
||||
source->setForwarding(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,11 +20,12 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "etl/impl/ForwardingCache.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
@@ -38,22 +39,14 @@
|
||||
#include <org/xrpl/rpc/v1/ledger.pb.h>
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
class ProbingSource;
|
||||
} // namespace etl
|
||||
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
} // namespace feed
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
@@ -73,15 +66,30 @@ private:
|
||||
static constexpr std::uint32_t DEFAULT_DOWNLOAD_RANGES = 16;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
// Forwarding cache must be destroyed after sources because sources have a callnack to invalidate cache
|
||||
// Forwarding cache must be destroyed after sources because sources have a callback to invalidate cache
|
||||
std::optional<impl::ForwardingCache> forwardingCache_;
|
||||
std::vector<Source> sources_;
|
||||
std::optional<std::string> forwardingXUserValue_;
|
||||
|
||||
std::vector<SourcePtr> sources_;
|
||||
std::optional<ETLState> etlState_;
|
||||
std::uint32_t downloadRanges_ =
|
||||
DEFAULT_DOWNLOAD_RANGES; /*< The number of markers to use when downloading initial ledger */
|
||||
std::atomic_bool hasForwardingSource_{false};
|
||||
|
||||
// Using mutext instead of atomic_bool because choosing a new source to
|
||||
// forward messages should be done with a mutual exclusion otherwise there will be a race condition
|
||||
util::Mutex<bool> hasForwardingSource_{false};
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Value for the X-User header when forwarding admin requests
|
||||
*/
|
||||
static constexpr std::string_view ADMIN_FORWARDING_X_USER_VALUE = "clio_admin";
|
||||
|
||||
/**
|
||||
* @brief Value for the X-User header when forwarding user requests
|
||||
*/
|
||||
static constexpr std::string_view USER_FORWARDING_X_USER_VALUE = "clio_user";
|
||||
|
||||
/**
|
||||
* @brief Create an instance of the load balancer.
|
||||
*
|
||||
@@ -90,13 +98,15 @@ public:
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
* @param sourceFactory A factory function to create a source
|
||||
*/
|
||||
LoadBalancer(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
SourceFactory sourceFactory = make_Source
|
||||
);
|
||||
|
||||
/**
|
||||
@@ -107,6 +117,7 @@ public:
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
* @param sourceFactory A factory function to create a source
|
||||
* @return A shared pointer to a new instance of LoadBalancer
|
||||
*/
|
||||
static std::shared_ptr<LoadBalancer>
|
||||
@@ -114,22 +125,28 @@ public:
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
SourceFactory sourceFactory = make_Source
|
||||
);
|
||||
|
||||
~LoadBalancer();
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param cacheOnly Whether to only write to cache and not to the DB; defaults to false
|
||||
* @return A std::pair<std::vector<std::string>, bool> The ledger data and a bool indicating whether the download
|
||||
* was successful
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(uint32_t sequence, bool cacheOnly = false);
|
||||
std::vector<std::string>
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
bool cacheOnly = false,
|
||||
std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2}
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Fetch data for a specific ledger.
|
||||
@@ -140,11 +157,17 @@ public:
|
||||
* @param ledgerSequence Sequence of the ledger to fetch
|
||||
* @param getObjects Whether to get the account state diff between this ledger and the prior one
|
||||
* @param getObjectNeighbors Whether to request object neighbors
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return The extracted data, if extraction was successful. If the ledger was found
|
||||
* in the database or the server is shutting down, the optional will be empty
|
||||
*/
|
||||
OptionalGetLedgerResponseType
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors);
|
||||
fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects,
|
||||
bool getObjectNeighbors,
|
||||
std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2}
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Represent the state of this load balancer as a JSON object
|
||||
@@ -159,6 +182,7 @@ public:
|
||||
*
|
||||
* @param request JSON-RPC request to forward
|
||||
* @param clientIp The IP address of the peer, if known
|
||||
* @param isAdmin Whether the request is from an admin
|
||||
* @param yield The coroutine context
|
||||
* @return Response received from rippled node as JSON object on success; nullopt on failure
|
||||
*/
|
||||
@@ -166,6 +190,7 @@ public:
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
bool isAdmin,
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
|
||||
@@ -186,12 +211,12 @@ private:
|
||||
*
|
||||
* @param f Function to execute. This function takes the ETL source as an argument, and returns a bool
|
||||
* @param ledgerSequence f is executed for each Source that has this ledger
|
||||
* @return true if f was eventually executed successfully. false if the ledger was found in the database or the
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* server is shutting down
|
||||
*/
|
||||
template <typename Func>
|
||||
bool
|
||||
execute(Func f, uint32_t ledgerSequence);
|
||||
void
|
||||
execute(Func f, uint32_t ledgerSequence, std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2});
|
||||
|
||||
/**
|
||||
* @brief Choose a new source to forward requests
|
||||
|
||||
65
src/etl/NetworkValidatedLedgers.cpp
Normal file
65
src/etl/NetworkValidatedLedgers.cpp
Normal file
@@ -0,0 +1,65 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etl/NetworkValidatedLedgers.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
|
||||
namespace etl {
|
||||
std::shared_ptr<NetworkValidatedLedgers>
|
||||
NetworkValidatedLedgers::make_ValidatedLedgers()
|
||||
{
|
||||
return std::make_shared<NetworkValidatedLedgers>();
|
||||
}
|
||||
|
||||
void
|
||||
NetworkValidatedLedgers::push(uint32_t idx)
|
||||
{
|
||||
std::lock_guard const lck(m_);
|
||||
if (!max_ || idx > *max_)
|
||||
max_ = idx;
|
||||
cv_.notify_all();
|
||||
}
|
||||
|
||||
std::optional<uint32_t>
|
||||
NetworkValidatedLedgers::getMostRecent()
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
cv_.wait(lck, [this]() { return max_; });
|
||||
return max_;
|
||||
}
|
||||
|
||||
bool
|
||||
NetworkValidatedLedgers::waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs)
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
|
||||
if (maxWaitMs) {
|
||||
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
|
||||
} else {
|
||||
cv_.wait(lck, pred);
|
||||
}
|
||||
return pred();
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
86
src/etl/NetworkValidatedLedgers.hpp
Normal file
86
src/etl/NetworkValidatedLedgers.hpp
Normal file
@@ -0,0 +1,86 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
|
||||
#include <condition_variable>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This datastructure is used to keep track of the sequence of the most recent ledger validated by the network.
|
||||
*
|
||||
* There are two methods that will wait until certain conditions are met. This datastructure is able to be "stopped".
|
||||
* When the datastructure is stopped, any threads currently waiting are unblocked.
|
||||
* Any later calls to methods of this datastructure will not wait. Once the datastructure is stopped, the datastructure
|
||||
* remains stopped for the rest of its lifetime.
|
||||
*/
|
||||
class NetworkValidatedLedgers : public NetworkValidatedLedgersInterface {
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
|
||||
mutable std::mutex m_;
|
||||
std::condition_variable cv_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief A factory function for NetworkValidatedLedgers
|
||||
*
|
||||
* @return A shared pointer to a new instance of NetworkValidatedLedgers
|
||||
*/
|
||||
static std::shared_ptr<NetworkValidatedLedgers>
|
||||
make_ValidatedLedgers();
|
||||
|
||||
/**
|
||||
* @brief Notify the datastructure that idx has been validated by the network.
|
||||
*
|
||||
* @param idx Sequence validated by network
|
||||
*/
|
||||
void
|
||||
push(uint32_t idx) final;
|
||||
|
||||
/**
|
||||
* @brief Get most recently validated sequence.
|
||||
*
|
||||
* If no ledgers are known to have been validated, this function waits until the next ledger is validated
|
||||
*
|
||||
* @return Sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
getMostRecent() final;
|
||||
|
||||
/**
|
||||
* @brief Waits for the sequence to be validated by the network.
|
||||
*
|
||||
* @param sequence The sequence to wait for
|
||||
* @param maxWaitMs Maximum time to wait for the sequence to be validated. If empty, wait indefinitely
|
||||
* @return true if sequence was validated, false otherwise a return value of false means the datastructure has been
|
||||
* stopped
|
||||
*/
|
||||
bool
|
||||
waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs = {}) final;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
64
src/etl/NetworkValidatedLedgersInterface.hpp
Normal file
64
src/etl/NetworkValidatedLedgersInterface.hpp
Normal file
@@ -0,0 +1,64 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief An interface for NetworkValidatedLedgers
|
||||
*/
|
||||
class NetworkValidatedLedgersInterface {
|
||||
public:
|
||||
virtual ~NetworkValidatedLedgersInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Notify the datastructure that idx has been validated by the network.
|
||||
*
|
||||
* @param idx Sequence validated by network
|
||||
*/
|
||||
virtual void
|
||||
push(uint32_t idx) = 0;
|
||||
|
||||
/**
|
||||
* @brief Get most recently validated sequence.
|
||||
*
|
||||
* If no ledgers are known to have been validated, this function waits until the next ledger is validated
|
||||
*
|
||||
* @return Sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
*/
|
||||
virtual std::optional<uint32_t>
|
||||
getMostRecent() = 0;
|
||||
|
||||
/**
|
||||
* @brief Waits for the sequence to be validated by the network.
|
||||
*
|
||||
* @param sequence The sequence to wait for
|
||||
* @param maxWaitMs Maximum time to wait for the sequence to be validated. If empty, wait indefinitely
|
||||
* @return true if sequence was validated, false otherwise a return value of false means the datastructure has been
|
||||
* stopped
|
||||
*/
|
||||
virtual bool
|
||||
waitUntilValidatedByNetwork(uint32_t sequence, std::optional<uint32_t> maxWaitMs = {}) = 0;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
@@ -1,31 +1,40 @@
|
||||
# ETL subsystem
|
||||
|
||||
A single clio node has one or more ETL sources, specified in the config
|
||||
file. clio will subscribe to the `ledgers` stream of each of the ETL
|
||||
sources. This stream sends a message whenever a new ledger is validated. Upon
|
||||
receiving a message on the stream, clio will then fetch the data associated
|
||||
with the newly validated ledger from one of the ETL sources. The fetch is
|
||||
performed via a gRPC request (`GetLedger`). This request returns the ledger
|
||||
header, transactions+metadata blobs, and every ledger object
|
||||
added/modified/deleted as part of this ledger. ETL then writes all of this data
|
||||
to the databases, and moves on to the next ledger. ETL does not apply
|
||||
transactions, but rather extracts the already computed results of those
|
||||
transactions (all of the added/modified/deleted SHAMap leaf nodes of the state
|
||||
tree).
|
||||
A single Clio node has one or more ETL sources specified in the config file. Clio subscribes to the `ledgers` stream of each of the ETL sources. The stream sends a message whenever a new ledger is validated.
|
||||
|
||||
If the database is entirely empty, ETL must download an entire ledger in full
|
||||
(as opposed to just the diff, as described above). This download is done via the
|
||||
`GetLedgerData` gRPC request. `GetLedgerData` allows clients to page through an
|
||||
entire ledger over several RPC calls. ETL will page through an entire ledger,
|
||||
and write each object to the database.
|
||||
Upon receiving a message on the stream, Clio fetches the data associated with the newly validated ledger from one of the ETL sources. The fetch is performed via a gRPC request called `GetLedger`. This request returns the ledger header, transactions and metadata blobs, and every ledger object added/modified/deleted as part of this ledger. The ETL subsystem then writes all of this data to the databases, and moves on to the next ledger.
|
||||
|
||||
If the database is not empty, clio will first come up in a "soft"
|
||||
read-only mode. In read-only mode, the server does not perform ETL and simply
|
||||
publishes new ledgers as they are written to the database.
|
||||
If the database is not updated within a certain time period
|
||||
(currently hard coded at 20 seconds), clio will begin the ETL
|
||||
process and start writing to the database. The database will report an error when
|
||||
trying to write a record with a key that already exists. ETL uses this error to
|
||||
determine that another process is writing to the database, and subsequently
|
||||
falls back to a soft read-only mode. clio can also operate in strict
|
||||
read-only mode, in which case they will never write to the database.
|
||||
If the database is not empty, clio will first come up in a "soft" read-only mode. In read-only mode, the server does not perform ETL and simply publishes new ledgers as they are written to the database. If the database is not updated within a certain time period (currently hard coded at 20 seconds), clio will begin the ETL process and start writing to the database. The database will report an error when trying to write a record with a key that already exists. ETL uses this error to determine that another process is writing to the database, and subsequently falls back to a soft read-only mode. clio can also operate in strict read-only mode, in which case they will never write to the database.
|
||||
|
||||
## Ledger cache
|
||||
|
||||
To efficiently reduce database load and improve RPC performance, we maintain a ledger cache in memory. The cache stores all entities of the latest ledger as a map of index to object, and is updated whenever a new ledger is validated.
|
||||
|
||||
The `successor` table stores each ledger's object indexes as a Linked List.
|
||||
|
||||

|
||||
|
||||
The Linked List is used by the cache loader to load all ledger objects belonging to the latest ledger to memory concurrently. The head of the Linked List is data::firstKey(**0x0000000000000000000000000000000000000000000000000000000000000000**), and the tail is data::lastKey(**0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF**).
|
||||
|
||||
The Linked List is partitioned into multiple segments by cursors and each segment will be picked by a coroutine to load. There are total `cache.num_markers` coroutines to load the ledger objects concurrently. A coroutine will pick a segment from a queue and load it with the step of `cache.page_fetch_size` until the queue is empty.
|
||||
|
||||
For example, if segment **0x08581464C55B0B2C8C4FA27FA8DE0ED695D3BE019E7BE0969C925F868FE27A51-0x08A67682E62229DA4D597D308C8F028ECF47962D5068A78802E22B258DC25D22** is assigned to a coroutine, the coroutine will load the ledger objects from index **0x08581464C55B0B2C8C4FA27FA8DE0ED695D3BE019E7BE0969C925F868FE27A51** to
|
||||
**0x08A67682E62229DA4D597D308C8F028ECF47962D5068A78802E22B258DC25D22**. The coroutine will continuously request `cache.page_fetch_size` objects from the database until it reaches the end of the segment. After the coroutine finishes loading this segment, it will fetch the next segment in the queue and repeat.
|
||||
|
||||
Because of the nature of the Linked List, the cursors are crucial to balancing the workload of each coroutine. There are 3 types of cursor generation that can be used:
|
||||
|
||||
- **cache.num_diffs**: Cursors will be generated by the changed objects in the latest `cache.num_diffs` number of ledgers. The default value is 32. In *mainnet*, this type works well because the network is fairly busy and the number of changed objects in each ledger is relatively stable. Thus, we are able to get enough cursors after removing the deleted objects on *mainnet*.
|
||||
For other networks, like the *devnet*, the number of changed objects in each ledger is not stable. When the network is silent, one coroutine may load a large number of objects while the other coroutines are idle. Below is a comparison of the number of cursors and loading time on *devnet*:
|
||||
|
||||
| Cursors | Loading time /seconds |
|
||||
| ------- | --------------------- |
|
||||
| 11 | 2072 |
|
||||
| 33 | 983 |
|
||||
| 120 | 953 |
|
||||
| 200 | 843 |
|
||||
| 250 | 816 |
|
||||
| 500 | 792 |
|
||||
|
||||
- **cache.num_cursors_from_diff**: Cursors will be generated by the changed objects in the recent ledgers. The generator will keep reading the previous ledger until we have `cache.num_cursors_from_diff` cursors. This type is the evolved version of `cache.num_diffs`. It removes the network busyness factor and only considers the number of cursors. The cache loading can be well tuned by this configuration.
|
||||
|
||||
- **cache.num_cursors_from_account**: If the server does not have enough historical ledgers, another option is to generate the cursors by the account. The generator will keep reading accounts from the `account_tx` table until there are `cache.num_cursors_from_account` cursors.
|
||||
|
||||
@@ -20,37 +20,41 @@
|
||||
#include "etl/Source.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/impl/ForwardingSource.hpp"
|
||||
#include "etl/impl/GrpcSource.hpp"
|
||||
#include "etl/impl/SourceImpl.hpp"
|
||||
#include "etl/impl/SubscriptionSource.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace etl {
|
||||
|
||||
template class SourceImpl<>;
|
||||
|
||||
Source
|
||||
SourcePtr
|
||||
make_Source(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
Source::OnDisconnectHook onDisconnect,
|
||||
Source::OnConnectHook onConnect,
|
||||
Source::OnLedgerClosedHook onLedgerClosed
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
std::chrono::steady_clock::duration forwardingTimeout,
|
||||
SourceBase::OnConnectHook onConnect,
|
||||
SourceBase::OnDisconnectHook onDisconnect,
|
||||
SourceBase::OnLedgerClosedHook onLedgerClosed
|
||||
)
|
||||
{
|
||||
auto const ip = config.valueOr<std::string>("ip", {});
|
||||
auto const wsPort = config.valueOr<std::string>("ws_port", {});
|
||||
auto const grpcPort = config.valueOr<std::string>("grpc_port", {});
|
||||
|
||||
impl::ForwardingSource forwardingSource{ip, wsPort};
|
||||
impl::ForwardingSource forwardingSource{ip, wsPort, forwardingTimeout};
|
||||
impl::GrpcSource grpcSource{ip, grpcPort, std::move(backend)};
|
||||
auto subscriptionSource = std::make_unique<impl::SubscriptionSource>(
|
||||
ioc,
|
||||
@@ -63,9 +67,9 @@ make_Source(
|
||||
std::move(onLedgerClosed)
|
||||
);
|
||||
|
||||
return Source{
|
||||
return std::make_unique<impl::SourceImpl<>>(
|
||||
ip, wsPort, grpcPort, std::move(grpcSource), std::move(subscriptionSource), std::move(forwardingSource)
|
||||
};
|
||||
);
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,11 +20,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/impl/ForwardingSource.hpp"
|
||||
#include "etl/impl/GrpcSource.hpp"
|
||||
#include "etl/impl/SubscriptionSource.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
@@ -37,9 +34,11 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
@@ -48,122 +47,48 @@ namespace etl {
|
||||
/**
|
||||
* @brief Provides an implementation of a ETL source
|
||||
*
|
||||
* @tparam GrpcSourceType The type of the gRPC source
|
||||
* @tparam SubscriptionSourceTypePtr The type of the subscription source
|
||||
* @tparam ForwardingSourceType The type of the forwarding source
|
||||
*/
|
||||
template <
|
||||
typename GrpcSourceType = impl::GrpcSource,
|
||||
typename SubscriptionSourceTypePtr = std::unique_ptr<impl::SubscriptionSource>,
|
||||
typename ForwardingSourceType = impl::ForwardingSource>
|
||||
class SourceImpl {
|
||||
std::string ip_;
|
||||
std::string wsPort_;
|
||||
std::string grpcPort_;
|
||||
|
||||
GrpcSourceType grpcSource_;
|
||||
SubscriptionSourceTypePtr subscriptionSource_;
|
||||
ForwardingSourceType forwardingSource_;
|
||||
|
||||
class SourceBase {
|
||||
public:
|
||||
using OnConnectHook = impl::SubscriptionSource::OnConnectHook;
|
||||
using OnDisconnectHook = impl::SubscriptionSource::OnDisconnectHook;
|
||||
using OnLedgerClosedHook = impl::SubscriptionSource::OnLedgerClosedHook;
|
||||
using OnConnectHook = std::function<void()>;
|
||||
using OnDisconnectHook = std::function<void(bool)>;
|
||||
using OnLedgerClosedHook = std::function<void()>;
|
||||
|
||||
/**
|
||||
* @brief Construct a new SourceImpl object
|
||||
*
|
||||
* @param ip The IP of the source
|
||||
* @param wsPort The web socket port of the source
|
||||
* @param grpcPort The gRPC port of the source
|
||||
* @param grpcSource The gRPC source
|
||||
* @param subscriptionSource The subscription source
|
||||
* @param forwardingSource The forwarding source
|
||||
*/
|
||||
template <typename SomeGrpcSourceType, typename SomeForwardingSourceType>
|
||||
requires std::is_same_v<GrpcSourceType, SomeGrpcSourceType> and
|
||||
std::is_same_v<ForwardingSourceType, SomeForwardingSourceType>
|
||||
SourceImpl(
|
||||
std::string ip,
|
||||
std::string wsPort,
|
||||
std::string grpcPort,
|
||||
SomeGrpcSourceType&& grpcSource,
|
||||
SubscriptionSourceTypePtr subscriptionSource,
|
||||
SomeForwardingSourceType&& forwardingSource
|
||||
)
|
||||
: ip_(std::move(ip))
|
||||
, wsPort_(std::move(wsPort))
|
||||
, grpcPort_(std::move(grpcPort))
|
||||
, grpcSource_(std::forward<SomeGrpcSourceType>(grpcSource))
|
||||
, subscriptionSource_(std::move(subscriptionSource))
|
||||
, forwardingSource_(std::forward<SomeForwardingSourceType>(forwardingSource))
|
||||
{
|
||||
}
|
||||
virtual ~SourceBase() = default;
|
||||
|
||||
/**
|
||||
* @brief Run subscriptions loop of the source
|
||||
*/
|
||||
void
|
||||
run()
|
||||
{
|
||||
subscriptionSource_->run();
|
||||
}
|
||||
virtual void
|
||||
run() = 0;
|
||||
|
||||
/**
|
||||
* @brief Check if source is connected
|
||||
*
|
||||
* @return true if source is connected; false otherwise
|
||||
*/
|
||||
bool
|
||||
isConnected() const
|
||||
{
|
||||
return subscriptionSource_->isConnected();
|
||||
}
|
||||
virtual bool
|
||||
isConnected() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Set the forwarding state of the source.
|
||||
*
|
||||
* @param isForwarding Whether to forward or not
|
||||
*/
|
||||
void
|
||||
setForwarding(bool isForwarding)
|
||||
{
|
||||
subscriptionSource_->setForwarding(isForwarding);
|
||||
}
|
||||
virtual void
|
||||
setForwarding(bool isForwarding) = 0;
|
||||
|
||||
/**
|
||||
* @brief Represent the source as a JSON object
|
||||
*
|
||||
* @return JSON representation of the source
|
||||
*/
|
||||
boost::json::object
|
||||
toJson() const
|
||||
{
|
||||
boost::json::object res;
|
||||
|
||||
res["validated_range"] = subscriptionSource_->validatedRange();
|
||||
res["is_connected"] = std::to_string(static_cast<int>(subscriptionSource_->isConnected()));
|
||||
res["ip"] = ip_;
|
||||
res["ws_port"] = wsPort_;
|
||||
res["grpc_port"] = grpcPort_;
|
||||
|
||||
auto last = subscriptionSource_->lastMessageTime();
|
||||
if (last.time_since_epoch().count() != 0) {
|
||||
res["last_msg_age_seconds"] = std::to_string(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::steady_clock::now() - last).count()
|
||||
);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
virtual boost::json::object
|
||||
toJson() const = 0;
|
||||
|
||||
/** @return String representation of the source (for debug) */
|
||||
std::string
|
||||
toString() const
|
||||
{
|
||||
return "{validated range: " + subscriptionSource_->validatedRange() + ", ip: " + ip_ +
|
||||
", web socket port: " + wsPort_ + ", grpc port: " + grpcPort_ + "}";
|
||||
}
|
||||
virtual std::string
|
||||
toString() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Check if ledger is known by this source.
|
||||
@@ -171,11 +96,8 @@ public:
|
||||
* @param sequence The ledger sequence to check
|
||||
* @return true if ledger is in the range of this source; false otherwise
|
||||
*/
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const
|
||||
{
|
||||
return subscriptionSource_->hasLedger(sequence);
|
||||
}
|
||||
virtual bool
|
||||
hasLedger(uint32_t sequence) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch data for a specific ledger.
|
||||
@@ -188,11 +110,8 @@ public:
|
||||
* @param getObjectNeighbors Whether to request object neighbors; defaults to false
|
||||
* @return A std::pair of the response status and the response itself
|
||||
*/
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(uint32_t sequence, bool getObjects = true, bool getObjectNeighbors = false)
|
||||
{
|
||||
return grpcSource_.fetchLedger(sequence, getObjects, getObjectNeighbors);
|
||||
}
|
||||
virtual std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
fetchLedger(uint32_t sequence, bool getObjects = true, bool getObjectNeighbors = false) = 0;
|
||||
|
||||
/**
|
||||
* @brief Download a ledger in full.
|
||||
@@ -202,34 +121,40 @@ public:
|
||||
* @param cacheOnly Only insert into cache, not the DB; defaults to false
|
||||
* @return A std::pair of the data and a bool indicating whether the download was successful
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(uint32_t sequence, std::uint32_t numMarkers, bool cacheOnly = false)
|
||||
{
|
||||
return grpcSource_.loadInitialLedger(sequence, numMarkers, cacheOnly);
|
||||
}
|
||||
virtual std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(uint32_t sequence, std::uint32_t numMarkers, bool cacheOnly = false) = 0;
|
||||
|
||||
/**
|
||||
* @brief Forward a request to rippled.
|
||||
*
|
||||
* @param request The request to forward
|
||||
* @param forwardToRippledClientIp IP of the client forwarding this request if known
|
||||
* @param xUserValue Value of the X-User header
|
||||
* @param yield The coroutine context
|
||||
* @return Response wrapped in an optional on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<boost::json::object>
|
||||
virtual std::optional<boost::json::object>
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& forwardToRippledClientIp,
|
||||
std::string_view xUserValue,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
return forwardingSource_.forwardToRippled(request, forwardToRippledClientIp, yield);
|
||||
}
|
||||
) const = 0;
|
||||
};
|
||||
|
||||
extern template class SourceImpl<>;
|
||||
using SourcePtr = std::unique_ptr<SourceBase>;
|
||||
|
||||
using Source = SourceImpl<>;
|
||||
using SourceFactory = std::function<SourcePtr(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
std::chrono::steady_clock::duration forwardingTimeout,
|
||||
SourceBase::OnConnectHook onConnect,
|
||||
SourceBase::OnDisconnectHook onDisconnect,
|
||||
SourceBase::OnLedgerClosedHook onLedgerClosed
|
||||
)>;
|
||||
|
||||
/**
|
||||
* @brief Create a source
|
||||
@@ -239,22 +164,24 @@ using Source = SourceImpl<>;
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers data structure
|
||||
* @param onDisconnect The hook to call on disconnect
|
||||
* @param forwardingTimeout The timeout for forwarding to rippled
|
||||
* @param onConnect The hook to call on connect
|
||||
* @param onDisconnect The hook to call on disconnect
|
||||
* @param onLedgerClosed The hook to call on ledger closed. This is called when a ledger is closed and the source is set
|
||||
* as forwarding.
|
||||
* @return The created source
|
||||
*/
|
||||
Source
|
||||
SourcePtr
|
||||
make_Source(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
Source::OnDisconnectHook onDisconnect,
|
||||
Source::OnConnectHook onConnect,
|
||||
Source::OnLedgerClosedHook onLedgerClosed
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
std::chrono::steady_clock::duration forwardingTimeout,
|
||||
SourceBase::OnConnectHook onConnect,
|
||||
SourceBase::OnDisconnectHook onDisconnect,
|
||||
SourceBase::OnLedgerClosedHook onLedgerClosed
|
||||
);
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,6 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/prometheus/Bool.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace etl {
|
||||
@@ -33,9 +37,19 @@ struct SystemState {
|
||||
* In strict read-only mode, the process will never attempt to become the ETL writer, and will only publish ledgers
|
||||
* as they are written to the database.
|
||||
*/
|
||||
bool isReadOnly = false;
|
||||
util::prometheus::Bool isReadOnly = PrometheusService::boolMetric(
|
||||
"read_only",
|
||||
util::prometheus::Labels{},
|
||||
"Whether the process is in strict read-only mode"
|
||||
);
|
||||
|
||||
/** @brief Whether the process is writing to the database. */
|
||||
util::prometheus::Bool isWriting = PrometheusService::boolMetric(
|
||||
"etl_writing",
|
||||
util::prometheus::Labels{},
|
||||
"Whether the process is writing to the database"
|
||||
);
|
||||
|
||||
std::atomic_bool isWriting = false; /**< @brief Whether the process is writing to the database. */
|
||||
std::atomic_bool isStopping = false; /**< @brief Whether the software is stopping. */
|
||||
std::atomic_bool writeConflict = false; /**< @brief Whether a write conflict was detected. */
|
||||
|
||||
@@ -46,7 +60,23 @@ struct SystemState {
|
||||
* arrived from rippled and therefore can't extract the ledger diff. When this happens, Clio can't proceed with ETL
|
||||
* and should log this error and only handle RPC requests.
|
||||
*/
|
||||
std::atomic_bool isAmendmentBlocked = false;
|
||||
util::prometheus::Bool isAmendmentBlocked = PrometheusService::boolMetric(
|
||||
"etl_amendment_blocked",
|
||||
util::prometheus::Labels{},
|
||||
"Whether clio detected an amendment block"
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Whether clio detected a corruption that needs manual attention.
|
||||
*
|
||||
* When corruption is detected, Clio should disable cache and stop the cache loading process in order to prevent
|
||||
* further corruption.
|
||||
*/
|
||||
util::prometheus::Bool isCorruptionDetected = PrometheusService::boolMetric(
|
||||
"etl_corruption_detected",
|
||||
util::prometheus::Labels{},
|
||||
"Whether clio detected a corruption that needs manual attention"
|
||||
);
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -106,8 +106,8 @@ public:
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok()) {
|
||||
LOG(log_.error()) << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code() << " message = " << status_.error_message();
|
||||
LOG(log_.error()) << "AsyncCallData status_ not ok: code = " << status_.error_code()
|
||||
<< " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited()) {
|
||||
|
||||
@@ -19,37 +19,20 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <any>
|
||||
#include <type_traits>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
// Note: This is a workaround for util::Expected. This is not needed when using std::expected.
|
||||
// Will be removed after the migration to std::expected is complete (#1173)
|
||||
// Issue to track this removal can be found here: https://github.com/XRPLF/clio/issues/1174
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
namespace etl::impl {
|
||||
|
||||
namespace util::async::impl {
|
||||
|
||||
/**
|
||||
* @brief A wrapper for std::any to workaround issues with boost.outcome
|
||||
*/
|
||||
class Any {
|
||||
std::any value_;
|
||||
|
||||
public:
|
||||
Any() = default;
|
||||
Any(Any const&) = default;
|
||||
|
||||
Any(Any&&) = default;
|
||||
// note: this needs to be `auto` instead of `std::any` because of a bug in gcc 11.4
|
||||
Any(auto&& v)
|
||||
requires(std::is_same_v<std::decay_t<decltype(v)>, std::any>)
|
||||
: value_{std::forward<decltype(v)>(v)}
|
||||
{
|
||||
}
|
||||
|
||||
operator std::any&() noexcept
|
||||
{
|
||||
return value_;
|
||||
}
|
||||
struct CursorPair {
|
||||
ripple::uint256 start;
|
||||
ripple::uint256 end;
|
||||
};
|
||||
|
||||
} // namespace util::async::impl
|
||||
struct BaseCursorProvider {
|
||||
[[nodiscard]] std::vector<CursorPair> virtual getCursors(uint32_t seq) const = 0;
|
||||
virtual ~BaseCursorProvider() = default;
|
||||
};
|
||||
|
||||
} // namespace etl::impl
|
||||
@@ -21,12 +21,13 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/impl/CursorProvider.hpp"
|
||||
#include "etl/impl/BaseCursorProvider.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/context/detail/config.hpp>
|
||||
#include <ripple/basics/Blob.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
@@ -38,7 +39,6 @@
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <ranges>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
@@ -86,7 +86,7 @@ public:
|
||||
stop() noexcept
|
||||
{
|
||||
for (auto& t : tasks_)
|
||||
t.requestStop();
|
||||
t.abort();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -113,7 +113,7 @@ private:
|
||||
spawnWorker(uint32_t const seq, size_t cachePageFetchSize)
|
||||
{
|
||||
return ctx_.execute([this, seq, cachePageFetchSize](auto token) {
|
||||
while (not token.isStopRequested()) {
|
||||
while (not token.isStopRequested() and not cache_.get().isDisabled()) {
|
||||
auto cursor = queue_.tryPop();
|
||||
if (not cursor.has_value()) {
|
||||
return; // queue is empty
|
||||
@@ -122,7 +122,7 @@ private:
|
||||
auto [start, end] = cursor.value();
|
||||
LOG(log_.debug()) << "Starting a cursor: " << ripple::strHex(start);
|
||||
|
||||
while (not token.isStopRequested()) {
|
||||
while (not token.isStopRequested() and not cache_.get().isDisabled()) {
|
||||
auto res = data::retryOnTimeout([this, seq, cachePageFetchSize, &start, token]() {
|
||||
return backend_->fetchLedgerPage(start, seq, cachePageFetchSize, false, token);
|
||||
});
|
||||
|
||||
82
src/etl/impl/CursorFromAccountProvider.hpp
Normal file
82
src/etl/impl/CursorFromAccountProvider.hpp
Normal file
@@ -0,0 +1,82 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/impl/BaseCursorProvider.hpp"
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
namespace etl::impl {
|
||||
|
||||
class CursorFromAccountProvider : public BaseCursorProvider {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
size_t numCursors_;
|
||||
size_t pageSize_;
|
||||
|
||||
public:
|
||||
CursorFromAccountProvider(std::shared_ptr<BackendInterface> const& backend, size_t numCursors, size_t pageSize)
|
||||
: backend_{backend}, numCursors_{numCursors}, pageSize_{pageSize}
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] std::vector<CursorPair>
|
||||
getCursors(uint32_t const seq) const override
|
||||
{
|
||||
namespace rg = std::ranges;
|
||||
|
||||
auto accountRoots = [this, seq]() {
|
||||
return data::synchronousAndRetryOnTimeout([this, seq](auto yield) {
|
||||
return backend_->fetchAccountRoots(numCursors_, pageSize_, seq, yield);
|
||||
});
|
||||
}();
|
||||
|
||||
rg::sort(accountRoots);
|
||||
std::vector<ripple::uint256> cursors{data::firstKey};
|
||||
rg::copy(accountRoots.begin(), accountRoots.end(), std::back_inserter(cursors));
|
||||
rg::sort(cursors);
|
||||
cursors.push_back(data::lastKey);
|
||||
|
||||
std::vector<CursorPair> pairs;
|
||||
pairs.reserve(cursors.size());
|
||||
|
||||
// FIXME: this should be `cursors | vs::pairwise` (C++23)
|
||||
std::transform(
|
||||
std::begin(cursors),
|
||||
std::prev(std::end(cursors)),
|
||||
std::next(std::begin(cursors)),
|
||||
std::back_inserter(pairs),
|
||||
[](auto&& a, auto&& b) -> CursorPair { return {a, b}; }
|
||||
);
|
||||
|
||||
return pairs;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etl::impl
|
||||
109
src/etl/impl/CursorFromDiffProvider.hpp
Normal file
109
src/etl/impl/CursorFromDiffProvider.hpp
Normal file
@@ -0,0 +1,109 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/impl/BaseCursorProvider.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <ranges>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
namespace etl::impl {
|
||||
|
||||
class CursorFromDiffProvider : public BaseCursorProvider {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
size_t numCursors_;
|
||||
|
||||
public:
|
||||
CursorFromDiffProvider(std::shared_ptr<BackendInterface> const& backend, size_t numCursors)
|
||||
: backend_{backend}, numCursors_{numCursors}
|
||||
{
|
||||
}
|
||||
|
||||
[[nodiscard]] std::vector<CursorPair>
|
||||
getCursors(uint32_t const seq) const override
|
||||
{
|
||||
namespace rg = std::ranges;
|
||||
namespace vs = std::views;
|
||||
|
||||
auto const fetchDiff = [this, seq](uint32_t offset) {
|
||||
return data::synchronousAndRetryOnTimeout([this, seq, offset](auto yield) {
|
||||
return backend_->fetchLedgerDiff(seq - offset, yield);
|
||||
});
|
||||
};
|
||||
|
||||
auto const range = backend_->fetchLedgerRange();
|
||||
ASSERT(range.has_value(), "Ledger range is not available when cache is loading");
|
||||
|
||||
std::set<ripple::uint256> liveCursors;
|
||||
std::set<ripple::uint256> deletedCursors;
|
||||
auto i = 0;
|
||||
while (liveCursors.size() < numCursors_ and seq - i >= range->minSequence) {
|
||||
auto diffs = fetchDiff(i++);
|
||||
rg::copy(
|
||||
diffs //
|
||||
| vs::filter([&deletedCursors](auto const& obj) {
|
||||
return not obj.blob.empty() and !deletedCursors.contains(obj.key);
|
||||
}) //
|
||||
| vs::transform([](auto const& obj) { return obj.key; }),
|
||||
std::inserter(liveCursors, std::begin(liveCursors))
|
||||
);
|
||||
|
||||
// track the deleted objects
|
||||
rg::copy(
|
||||
diffs //
|
||||
| vs::filter([](auto const& obj) { return obj.blob.empty(); }) //
|
||||
| vs::transform([](auto const& obj) { return obj.key; }),
|
||||
std::inserter(deletedCursors, std::begin(deletedCursors))
|
||||
);
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> cursors{data::firstKey};
|
||||
rg::copy(liveCursors | vs::take(std::min(liveCursors.size(), numCursors_)), std::back_inserter(cursors));
|
||||
rg::sort(cursors);
|
||||
cursors.push_back(data::lastKey);
|
||||
|
||||
std::vector<CursorPair> pairs;
|
||||
pairs.reserve(cursors.size());
|
||||
|
||||
// FIXME: this should be `cursors | vs::pairwise` (C++23)
|
||||
std::transform(
|
||||
std::begin(cursors),
|
||||
std::prev(std::end(cursors)),
|
||||
std::next(std::begin(cursors)),
|
||||
std::back_inserter(pairs),
|
||||
[](auto&& a, auto&& b) -> CursorPair { return {a, b}; }
|
||||
);
|
||||
|
||||
return pairs;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etl::impl
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user