mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 20:05:51 +00:00
Compare commits
140 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1aafbaacb | ||
|
|
965a2883fe | ||
|
|
1e9eaee311 | ||
|
|
a15ee2a8cc | ||
|
|
26ed78fc05 | ||
|
|
8575f786a8 | ||
|
|
08b02c64cb | ||
|
|
b358649cf9 | ||
|
|
6bd72355db | ||
|
|
a1699d7484 | ||
|
|
957aadd25a | ||
|
|
8f89a5913d | ||
|
|
ecfe5e84e5 | ||
|
|
03c0940649 | ||
|
|
dc5aacfe39 | ||
|
|
3fda74e3f7 | ||
|
|
df27c4e629 | ||
|
|
37ee74c293 | ||
|
|
ec335176bb | ||
|
|
ab33b26ec4 | ||
|
|
28c8fa2a9a | ||
|
|
12bbed194c | ||
|
|
1fa09006f8 | ||
|
|
e3b6fc4bd4 | ||
|
|
34594ff8c0 | ||
|
|
40eeb57920 | ||
|
|
3eb36c049c | ||
|
|
81602e8ae7 | ||
|
|
0cef9e0620 | ||
|
|
81d1b30607 | ||
|
|
923d021c83 | ||
|
|
cd2b09ffb7 | ||
|
|
3c62a1f42c | ||
|
|
f97e0690c8 | ||
|
|
eeaccbabd9 | ||
|
|
13d2d4e2ca | ||
|
|
350a45e7e2 | ||
|
|
ce86572274 | ||
|
|
ac97788db8 | ||
|
|
2893492569 | ||
|
|
b63e98bda0 | ||
|
|
f4df5c2185 | ||
|
|
93d5c12b14 | ||
|
|
2514b7986e | ||
|
|
d30e63d49a | ||
|
|
61f1e0853d | ||
|
|
eb1831c489 | ||
|
|
07bd4b0760 | ||
|
|
e26a1e37b5 | ||
|
|
e89640bcfb | ||
|
|
ae135759ef | ||
|
|
28188aa0f9 | ||
|
|
af485a0634 | ||
|
|
b609298870 | ||
|
|
d077093a8d | ||
|
|
781f3b3c48 | ||
|
|
a8bae96ad4 | ||
|
|
fe9649d872 | ||
|
|
431b5f5ab8 | ||
|
|
b1dc2775fb | ||
|
|
dd35a7cfd2 | ||
|
|
a9d685d5c0 | ||
|
|
6065d324b5 | ||
|
|
fe7b5fe18f | ||
|
|
1c663988f5 | ||
|
|
d11d566121 | ||
|
|
a467cb2526 | ||
|
|
f62e36dc94 | ||
|
|
d933ce2a29 | ||
|
|
db751e3807 | ||
|
|
3c4a8f0cfb | ||
|
|
397ce97175 | ||
|
|
ac6ad13f6c | ||
|
|
7d1d1749bc | ||
|
|
acf359d631 | ||
|
|
a34e107b86 | ||
|
|
b886586de3 | ||
|
|
a57abb15a3 | ||
|
|
c87586a265 | ||
|
|
8172670c93 | ||
|
|
3fdcd3315b | ||
|
|
dd018f1c5e | ||
|
|
c2b462da75 | ||
|
|
252920ec57 | ||
|
|
9ef6801c55 | ||
|
|
24c562fa2a | ||
|
|
35f119a268 | ||
|
|
1be368dcaf | ||
|
|
a5fbb01299 | ||
|
|
3b75d88a35 | ||
|
|
f0224581a5 | ||
|
|
b998473673 | ||
|
|
8ebe2d6a80 | ||
|
|
3bab90ca7a | ||
|
|
74660aebf1 | ||
|
|
db08de466a | ||
|
|
1bacad9e49 | ||
|
|
ca16858878 | ||
|
|
feae85782c | ||
|
|
b016c1d7ba | ||
|
|
0597a9d685 | ||
|
|
05bea6a971 | ||
|
|
fa660ef400 | ||
|
|
25d9e3cc36 | ||
|
|
58f13e1660 | ||
|
|
a16b680a7a | ||
|
|
320ebaa5d2 | ||
|
|
058df4d12a | ||
|
|
5145d07693 | ||
|
|
5e9e5f6f65 | ||
|
|
1ce7bcbc28 | ||
|
|
243858df12 | ||
|
|
b363cc93af | ||
|
|
200d97f0de | ||
|
|
1ec5d3e5a3 | ||
|
|
e062121917 | ||
|
|
1aab2b94b1 | ||
|
|
5de87b9ef8 | ||
|
|
398db13f4d | ||
|
|
5e8ffb66b4 | ||
|
|
939740494b | ||
|
|
ff3d2b5600 | ||
|
|
7080b4d549 | ||
|
|
8d783ecd6a | ||
|
|
5e6682ddc7 | ||
|
|
fca29694a0 | ||
|
|
a541e6d00e | ||
|
|
9bd38dd290 | ||
|
|
f683b25f76 | ||
|
|
91ad1ffc3b | ||
|
|
64b4a908da | ||
|
|
ac752c656e | ||
|
|
4fe868aaeb | ||
|
|
59eb40a1f2 | ||
|
|
0b5f667e4a | ||
|
|
fa42c5c900 | ||
|
|
0818b6ce5b | ||
|
|
e2cc56d25a | ||
|
|
caaa01bf0f | ||
|
|
4b53bef1f5 |
@@ -1,7 +1,7 @@
|
||||
---
|
||||
Language: Cpp
|
||||
AccessModifierOffset: -4
|
||||
AlignAfterOpenBracket: AlwaysBreak
|
||||
AlignAfterOpenBracket: BlockIndent
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
AlignEscapedNewlinesLeft: true
|
||||
@@ -18,20 +18,8 @@ AlwaysBreakBeforeMultilineStrings: true
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
BraceWrapping:
|
||||
AfterClass: true
|
||||
AfterControlStatement: true
|
||||
AfterEnum: false
|
||||
AfterFunction: true
|
||||
AfterNamespace: false
|
||||
AfterObjCDeclaration: true
|
||||
AfterStruct: true
|
||||
AfterUnion: true
|
||||
BeforeCatch: true
|
||||
BeforeElse: true
|
||||
IndentBraces: false
|
||||
BreakBeforeBinaryOperators: false
|
||||
BreakBeforeBraces: Custom
|
||||
BreakBeforeBraces: WebKit
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: true
|
||||
ColumnLimit: 120
|
||||
@@ -43,13 +31,15 @@ Cpp11BracedListStyle: true
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
FixNamespaceComments: true
|
||||
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
|
||||
IncludeBlocks: Regroup
|
||||
IncludeCategories:
|
||||
- Regex: '^<(BeastConfig)'
|
||||
Priority: 0
|
||||
- Regex: '^<(ripple)/'
|
||||
- Regex: '^".*"$'
|
||||
Priority: 1
|
||||
- Regex: '^<.*\.(h|hpp)>$'
|
||||
Priority: 2
|
||||
- Regex: '^<(boost)/'
|
||||
- Regex: '^<.*>$'
|
||||
Priority: 3
|
||||
- Regex: '.*'
|
||||
Priority: 4
|
||||
@@ -58,6 +48,8 @@ IndentCaseLabels: true
|
||||
IndentFunctionDeclarationAfterType: false
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
IndentRequiresClause: true
|
||||
RequiresClausePosition: OwnLine
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
NamespaceIndentation: None
|
||||
@@ -70,6 +62,7 @@ PenaltyBreakString: 1000
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 200
|
||||
PointerAlignment: Left
|
||||
QualifierAlignment: Right
|
||||
ReflowComments: true
|
||||
SortIncludes: true
|
||||
SpaceAfterCStyleCast: false
|
||||
|
||||
133
.clang-tidy
Normal file
133
.clang-tidy
Normal file
@@ -0,0 +1,133 @@
|
||||
---
|
||||
Checks: '-*,
|
||||
bugprone-argument-comment,
|
||||
bugprone-assert-side-effect,
|
||||
bugprone-bad-signal-to-kill-thread,
|
||||
bugprone-bool-pointer-implicit-conversion,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-dangling-handle,
|
||||
bugprone-dynamic-static-initializers,
|
||||
bugprone-empty-catch,
|
||||
bugprone-fold-init-type,
|
||||
bugprone-forward-declaration-namespace,
|
||||
bugprone-inaccurate-erase,
|
||||
bugprone-incorrect-roundings,
|
||||
bugprone-infinite-loop,
|
||||
bugprone-integer-division,
|
||||
bugprone-lambda-function-name,
|
||||
bugprone-macro-parentheses,
|
||||
bugprone-macro-repeated-side-effects,
|
||||
bugprone-misplaced-operator-in-strlen-in-alloc,
|
||||
bugprone-misplaced-pointer-arithmetic-in-alloc,
|
||||
bugprone-misplaced-widening-cast,
|
||||
bugprone-move-forwarding-reference,
|
||||
bugprone-multiple-new-in-one-expression,
|
||||
bugprone-multiple-statement-macro,
|
||||
bugprone-no-escape,
|
||||
bugprone-non-zero-enum-to-bool-conversion,
|
||||
bugprone-parent-virtual-call,
|
||||
bugprone-posix-return,
|
||||
bugprone-redundant-branch-condition,
|
||||
bugprone-reserved-identifier,
|
||||
bugprone-unused-return-value,
|
||||
bugprone-shared-ptr-array-mismatch,
|
||||
bugprone-signal-handler,
|
||||
bugprone-signed-char-misuse,
|
||||
bugprone-sizeof-container,
|
||||
bugprone-sizeof-expression,
|
||||
bugprone-spuriously-wake-up-functions,
|
||||
bugprone-standalone-empty,
|
||||
bugprone-string-constructor,
|
||||
bugprone-string-integer-assignment,
|
||||
bugprone-string-literal-with-embedded-nul,
|
||||
bugprone-stringview-nullptr,
|
||||
bugprone-suspicious-enum-usage,
|
||||
bugprone-suspicious-include,
|
||||
bugprone-suspicious-memory-comparison,
|
||||
bugprone-suspicious-memset-usage,
|
||||
bugprone-suspicious-missing-comma,
|
||||
bugprone-suspicious-realloc-usage,
|
||||
bugprone-suspicious-semicolon,
|
||||
bugprone-suspicious-string-compare,
|
||||
bugprone-swapped-arguments,
|
||||
bugprone-switch-missing-default-case,
|
||||
bugprone-terminating-continue,
|
||||
bugprone-throw-keyword-missing,
|
||||
bugprone-too-small-loop-variable,
|
||||
bugprone-undefined-memory-manipulation,
|
||||
bugprone-undelegated-constructor,
|
||||
bugprone-unhandled-exception-at-new,
|
||||
bugprone-unhandled-self-assignment,
|
||||
bugprone-unique-ptr-array-mismatch,
|
||||
bugprone-unsafe-functions,
|
||||
bugprone-unused-raii,
|
||||
bugprone-use-after-move,
|
||||
bugprone-virtual-near-miss,
|
||||
cppcoreguidelines-init-variables,
|
||||
cppcoreguidelines-misleading-capture-default-by-value,
|
||||
cppcoreguidelines-pro-type-member-init,
|
||||
cppcoreguidelines-pro-type-static-cast-downcast,
|
||||
cppcoreguidelines-rvalue-reference-param-not-moved,
|
||||
cppcoreguidelines-use-default-member-init,
|
||||
cppcoreguidelines-virtual-class-destructor,
|
||||
llvm-namespace-comment,
|
||||
misc-const-correctness,
|
||||
misc-definitions-in-headers,
|
||||
misc-header-include-cycle,
|
||||
misc-include-cleaner,
|
||||
misc-misplaced-const,
|
||||
misc-redundant-expression,
|
||||
misc-static-assert,
|
||||
misc-throw-by-value-catch-by-reference,
|
||||
misc-unused-alias-decls,
|
||||
misc-unused-using-decls,
|
||||
modernize-concat-nested-namespaces,
|
||||
modernize-deprecated-headers,
|
||||
modernize-make-shared,
|
||||
modernize-make-unique,
|
||||
modernize-pass-by-value,
|
||||
modernize-type-traits,
|
||||
modernize-use-emplace,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-override,
|
||||
modernize-use-using,
|
||||
performance-faster-string-find,
|
||||
performance-for-range-copy,
|
||||
performance-implicit-conversion-in-loop,
|
||||
performance-inefficient-vector-operation,
|
||||
performance-move-const-arg,
|
||||
performance-move-constructor-init,
|
||||
performance-no-automatic-move,
|
||||
performance-trivially-destructible,
|
||||
readability-avoid-const-params-in-decls,
|
||||
readability-braces-around-statements,
|
||||
readability-const-return-type,
|
||||
readability-container-contains,
|
||||
readability-container-size-empty,
|
||||
readability-convert-member-functions-to-static,
|
||||
readability-duplicate-include,
|
||||
readability-else-after-return,
|
||||
readability-implicit-bool-conversion,
|
||||
readability-inconsistent-declaration-parameter-name,
|
||||
readability-make-member-function-const,
|
||||
readability-misleading-indentation,
|
||||
readability-non-const-parameter,
|
||||
readability-redundant-declaration,
|
||||
readability-redundant-member-init,
|
||||
readability-redundant-string-init,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-suspicious-call-argument
|
||||
'
|
||||
|
||||
CheckOptions:
|
||||
readability-braces-around-statements.ShortStatementLines: 2
|
||||
bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true
|
||||
bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc;::std::expected
|
||||
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*'
|
||||
|
||||
HeaderFilterRegex: '^.*/(src|unittests)/.*\.(h|hpp)$'
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
5
.clangd
Normal file
5
.clangd
Normal file
@@ -0,0 +1,5 @@
|
||||
Diagnostics:
|
||||
UnusedIncludes: Strict
|
||||
MissingIncludes: Strict
|
||||
Includes:
|
||||
IgnoreHeader: ".*/(detail|impl)/.*"
|
||||
11
.codecov.yml
Normal file
11
.codecov.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 50%
|
||||
threshold: 2%
|
||||
|
||||
patch:
|
||||
default:
|
||||
target: 20% # Need to bump this number https://docs.codecov.com/docs/commit-status#patch-status
|
||||
threshold: 2%
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Pushing a release branch requires an annotated tag at the released commit
|
||||
branch=$(git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
if [[ $branch =~ master ]]; then
|
||||
# check if HEAD commit is tagged
|
||||
if ! git describe --exact-match HEAD; then
|
||||
echo "Commits to master must be tagged"
|
||||
exit 1
|
||||
fi
|
||||
elif [[ $branch =~ release/* ]]; then
|
||||
IFS=/ read -r branch rel_ver <<< ${branch}
|
||||
tag=$(git describe --tags --abbrev=0)
|
||||
if [[ "${rel_ver}" != "${tag}" ]]; then
|
||||
echo "release/${rel_ver} branches must have annotated tag ${rel_ver}"
|
||||
echo "git tag -am\"${rel_ver}\" ${rel_ver}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@@ -4,10 +4,61 @@ exec 1>&2
|
||||
|
||||
# paths to check and re-format
|
||||
sources="src unittests"
|
||||
formatter="clang-format-11 -i"
|
||||
formatter="clang-format -i"
|
||||
version=$($formatter --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "17.0.0" > "$version" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 17 of `which clang-format` is required.
|
||||
Your version is $version.
|
||||
Please fix paths and run again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 3
|
||||
fi
|
||||
|
||||
# check there is no .h headers, only .hpp
|
||||
wrong_headers=$(find $sources -name "*.h" | sed 's/^/ - /')
|
||||
if [[ ! -z "$wrong_headers" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
Found .h headers in the source code. Please rename them to .hpp:
|
||||
|
||||
$wrong_headers
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 2
|
||||
fi
|
||||
|
||||
function grep_code {
|
||||
grep -l "${1}" ${sources} -r --include \*.hpp --include \*.cpp
|
||||
}
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
# make all includes to be <...> style
|
||||
grep_code '#include ".*"' | xargs sed -i '' -E 's|#include "(.*)"|#include <\1>|g'
|
||||
|
||||
# make local includes to be "..." style
|
||||
main_src_dirs=$(find ./src -maxdepth 1 -type d -exec basename {} \; | tr '\n' '|' | sed 's/|$//' | sed 's/|/\\|/g')
|
||||
grep_code "#include <\($main_src_dirs\)/.*>" | xargs sed -i '' -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g"
|
||||
else
|
||||
# make all includes to be <...> style
|
||||
grep_code '#include ".*"' | xargs sed -i -E 's|#include "(.*)"|#include <\1>|g'
|
||||
|
||||
# make local includes to be "..." style
|
||||
main_src_dirs=$(find ./src -type d -maxdepth 1 -exec basename {} \; | paste -sd '|' | sed 's/|/\\|/g')
|
||||
grep_code "#include <\($main_src_dirs\)/.*>" | xargs sed -i -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g"
|
||||
fi
|
||||
|
||||
first=$(git diff $sources)
|
||||
find $sources -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
|
||||
find $sources -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
|
||||
second=$(git diff $sources)
|
||||
changes=$(diff <(echo "$first") <(echo "$second") | wc -l | sed -e 's/^[[:space:]]*//')
|
||||
|
||||
@@ -24,4 +75,3 @@ EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
.githooks/ensure_release_tag
|
||||
|
||||
32
.github/actions/build_clio/action.yml
vendored
32
.github/actions/build_clio/action.yml
vendored
@@ -1,36 +1,18 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
default: default
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
target:
|
||||
description: Build target name
|
||||
default: all
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
|
||||
- name: Build Clio
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
run: |
|
||||
mkdir -p build
|
||||
cd build
|
||||
threads_num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=Release -o clio:tests=True --profile ${{ inputs.conan_profile }}
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -G Ninja
|
||||
cmake --build . --parallel $threads_num
|
||||
cmake --build . --parallel ${{ steps.number_of_threads.outputs.threads_number }} --target ${{ inputs.target }}
|
||||
|
||||
29
.github/actions/clang_format/action.yml
vendored
29
.github/actions/clang_format/action.yml
vendored
@@ -1,11 +1,27 @@
|
||||
name: Check format
|
||||
description: Check format using clang-format-11
|
||||
description: Check format using clang-format-17
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Github's ubuntu-20.04 image already has clang-format-11 installed
|
||||
- run: |
|
||||
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i
|
||||
- name: Add llvm repo
|
||||
run: |
|
||||
echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-17 main' | sudo tee -a /etc/apt/sources.list
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
shell: bash
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
sudo apt update -qq
|
||||
sudo apt install -y jq clang-format-17
|
||||
sudo rm /usr/bin/clang-format
|
||||
sudo ln -s /usr/bin/clang-format-17 /usr/bin/clang-format
|
||||
shell: bash
|
||||
|
||||
- name: Run formatter
|
||||
continue-on-error: true
|
||||
id: run_formatter
|
||||
run: |
|
||||
./.githooks/pre-commit
|
||||
shell: bash
|
||||
|
||||
- name: Check for differences
|
||||
@@ -13,3 +29,8 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
git diff --color --exit-code | tee "clang-format.patch"
|
||||
|
||||
- name: Fail job
|
||||
if: ${{ steps.run_formatter.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: exit 1
|
||||
|
||||
33
.github/actions/code_coverage/action.yml
vendored
Normal file
33
.github/actions/code_coverage/action.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Generate code coverage report
|
||||
description: Run tests, generate code coverage report and upload it to codecov.io
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
build/clio_tests --backend_host=scylladb
|
||||
|
||||
- name: Run gcovr
|
||||
shell: bash
|
||||
run: |
|
||||
gcovr -e unittests --xml build/coverage_report.xml -j8 --exclude-throw-branches
|
||||
|
||||
- name: Archive coverage report
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build/coverage_report.xml
|
||||
retention-days: 30
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: wandalen/wretry.action@v1.3.0
|
||||
with:
|
||||
action: codecov/codecov-action@v3
|
||||
with: |
|
||||
files: build/coverage_report.xml
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
token: ${{ env.CODECOV_TOKEN }}
|
||||
attempt_limit: 5
|
||||
attempt_delay: 10000
|
||||
41
.github/actions/generate/action.yml
vendored
Normal file
41
.github/actions/generate/action.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Run conan and cmake
|
||||
description: Run conan and cmake
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
default: 'false'
|
||||
build_type:
|
||||
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
|
||||
required: true
|
||||
default: 'Release'
|
||||
code_coverage:
|
||||
description: Whether conan's coverage option should be on or not
|
||||
required: true
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create build directory
|
||||
shell: bash
|
||||
run: mkdir -p build
|
||||
|
||||
- name: Run conan
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
run: |
|
||||
cd build
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:tests=True -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
run: |
|
||||
cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} ${{ inputs.extra_cmake_args }} .. -G Ninja
|
||||
26
.github/actions/get_number_of_threads/action.yml
vendored
Normal file
26
.github/actions/get_number_of_threads/action.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Get number of threads
|
||||
description: Determines number of threads to use on macOS and Linux
|
||||
outputs:
|
||||
threads_number:
|
||||
description: Number of threads to use
|
||||
value: ${{ steps.number_of_threads_export.outputs.num }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
id: number_of_threads_export
|
||||
run: |
|
||||
echo "num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}" >> $GITHUB_OUTPUT
|
||||
47
.github/actions/prepare_runner/action.yml
vendored
Normal file
47
.github/actions/prepare_runner/action.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: Prepare runner
|
||||
description: Install packages, set environment variables, create directories
|
||||
inputs:
|
||||
disable_ccache:
|
||||
description: Whether ccache should be disabled
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install packages on mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq gh
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: git config --global --add safe.directory $PWD
|
||||
|
||||
- name: Set env variables for macOS
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DIR=${{ github.workspace }}/.ccache" >> $GITHUB_ENV
|
||||
echo "CONAN_USER_HOME=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set env variables for Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DIR=/root/.ccache" >> $GITHUB_ENV
|
||||
echo "CONAN_USER_HOME=/root/" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CCACHE_DISABLE=1
|
||||
if: ${{ inputs.disable_ccache == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DISABLE=1" >> $GITHUB_ENV
|
||||
|
||||
- name: Create directories
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p $CCACHE_DIR
|
||||
mkdir -p $CONAN_USER_HOME/.conan
|
||||
|
||||
|
||||
15
.github/actions/restore_cache/action.yml
vendored
15
.github/actions/restore_cache/action.yml
vendored
@@ -7,6 +7,14 @@ inputs:
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
default: Release
|
||||
code_coverage:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: 'false'
|
||||
outputs:
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
@@ -28,7 +36,7 @@ runs:
|
||||
id: conan_hash
|
||||
shell: bash
|
||||
run: |
|
||||
conan info . -j info.json
|
||||
conan info . -j info.json -o clio:tests=True
|
||||
packages_info=$(cat info.json | jq '.[] | "\(.display_name): \(.id)"' | grep -v 'clio')
|
||||
echo "$packages_info"
|
||||
hash=$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)
|
||||
@@ -40,11 +48,12 @@ runs:
|
||||
id: conan_cache
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: ccache_cache
|
||||
if: ${{ env.CCACHE_DISABLE != '1' }}
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
|
||||
16
.github/actions/save_cache/action.yml
vendored
16
.github/actions/save_cache/action.yml
vendored
@@ -16,6 +16,16 @@ inputs:
|
||||
ccache_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_cache_miss_rate:
|
||||
description: How many cache misses happened
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
default: Release
|
||||
code_coverage:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -34,13 +44,13 @@ runs:
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-develop-${{ inputs.conan_hash }}
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-develop-${{ inputs.conan_hash }}
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' }}
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
|
||||
|
||||
|
||||
3
.github/actions/setup_conan/action.yml
vendored
3
.github/actions/setup_conan/action.yml
vendored
@@ -31,9 +31,6 @@ runs:
|
||||
shell: bash
|
||||
id: conan_setup_linux
|
||||
run: |
|
||||
conan profile new default --detect
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
echo "created_conan_profile=default" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
|
||||
189
.github/workflows/build.yml
vendored
189
.github/workflows/build.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Build Clio
|
||||
name: Build
|
||||
on:
|
||||
push:
|
||||
branches: [master, release/*, develop]
|
||||
@@ -8,28 +8,54 @@ on:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
name: Check format
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run clang-format
|
||||
uses: ./.github/actions/clang_format
|
||||
|
||||
build_mac:
|
||||
name: Build macOS
|
||||
build:
|
||||
name: Build
|
||||
needs: lint
|
||||
runs-on: [self-hosted, macOS]
|
||||
env:
|
||||
CCACHE_DIR: ${{ github.workspace }}/.ccache
|
||||
CONAN_USER_HOME: ${{ github.workspace }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
code_coverage: true
|
||||
# - os: macOS
|
||||
# build_type: Release
|
||||
# code_coverage: false
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq
|
||||
services:
|
||||
scylladb:
|
||||
image: ${{ (matrix.code_coverage) && 'scylladb/scylla' || '' }}
|
||||
options: >-
|
||||
--health-cmd "cqlsh -e 'describe cluster'"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: false
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
@@ -41,83 +67,44 @@ jobs:
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_mac
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
|
||||
build_linux:
|
||||
name: Build linux
|
||||
needs: lint
|
||||
runs-on: [self-hosted, Linux]
|
||||
container:
|
||||
image: conanio/gcc11:1.61.0
|
||||
options: --user root
|
||||
env:
|
||||
CCACHE_DIR: /root/.ccache
|
||||
CONAN_USER_HOME: /root/
|
||||
steps:
|
||||
- name: Get Clio
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
apt update -qq
|
||||
apt install -y jq
|
||||
|
||||
- name: Install ccache
|
||||
run: |
|
||||
wget https://github.com/ccache/ccache/releases/download/v4.8.3/ccache-4.8.3-linux-x86_64.tar.xz
|
||||
tar xf ./ccache-4.8.3-linux-x86_64.tar.xz
|
||||
mv ./ccache-4.8.3-linux-x86_64/ccache /usr/bin/ccache
|
||||
|
||||
- name: Fix git permissions
|
||||
run: git config --global --add safe.directory $PWD
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
|
||||
- name: Show ccache's statistics
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_linux
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Save cache
|
||||
@@ -128,26 +115,36 @@ jobs:
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
- name: Run code coverage
|
||||
if: ${{ matrix.code_coverage }}
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
test:
|
||||
name: Run Tests
|
||||
needs: build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
# - os: macOS
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
test_mac:
|
||||
needs: build_mac
|
||||
runs-on: [self-hosted, macOS]
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_mac
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
test_linux:
|
||||
needs: build_linux
|
||||
runs-on: [self-hosted, x-heavy]
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_linux
|
||||
name: clio_tests_${{ runner.os }}
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
|
||||
116
.github/workflows/clang-tidy.yml
vendored
Normal file
116
.github/workflows/clang-tidy.yml
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
name: Clang-tidy check
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 6 * * 1-5"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .clang_tidy
|
||||
- .github/workflows/clang-tidy.yml
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
clang_tidy:
|
||||
runs-on: [self-hosted, Linux]
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: Release
|
||||
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
|
||||
- name: Run clang-tidy
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-17 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
|
||||
|
||||
- name: Run pre-commit hook
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
run: ./.githooks/pre-commit
|
||||
|
||||
- name: Print issues found
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i '/error\||/!d' ./output.txt
|
||||
cat output.txt
|
||||
rm output.txt
|
||||
|
||||
- name: Create an issue
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
id: create_issue
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo -e 'Clang-tidy found issues in the code:\n' > issue.md
|
||||
echo -e "List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/" >> issue.md
|
||||
gh issue create --assignee 'cindyyan317,godexsoft,kuznetsss' --label bug --title 'Clang-tidy found bugs in code🐛' --body-file ./issue.md > create_issue.log
|
||||
created_issue=$(cat create_issue.log | sed 's|.*/||')
|
||||
echo "created_issue=$created_issue" >> $GITHUB_OUTPUT
|
||||
rm create_issue.log issue.md
|
||||
|
||||
- uses: crazy-max/ghaction-import-gpg@v5
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
|
||||
- name: Create PR with fixes
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
commit-message: "[CI] clang-tidy auto fixes"
|
||||
committer: Clio CI <skuznetsov@ripple.com>
|
||||
branch: "clang_tidy/autofix"
|
||||
branch-suffix: timestamp
|
||||
delete-branch: true
|
||||
title: "[CI] clang-tidy auto fixes"
|
||||
body: "Fixes #${{ steps.create_issue.outputs.created_issue }}. Please review and commit clang-tidy fixes."
|
||||
reviewers: "cindyyan317,godexsoft,kuznetsss"
|
||||
|
||||
- name: Fail the job
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: exit 1
|
||||
29
.github/workflows/clang-tidy_on_fix_merged.yml
vendored
Normal file
29
.github/workflows/clang-tidy_on_fix_merged.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Restart clang-tidy workflow
|
||||
on:
|
||||
push:
|
||||
branches: [develop]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
restart_clang_tidy:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
permissions:
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check last commit matches clang-tidy auto fixes
|
||||
id: check
|
||||
shell: bash
|
||||
run: |
|
||||
passed=$(if [[ $(git log -1 --pretty=format:%s | grep '\[CI\] clang-tidy auto fixes') ]]; then echo 'true' ; else echo 'false' ; fi)
|
||||
echo "passed=$passed" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run clang-tidy workflow
|
||||
if: ${{ contains(steps.check.outputs.passed, 'true') }}
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
run: gh workflow run clang-tidy.yml
|
||||
138
.github/workflows/nightly.yml
vendored
Normal file
138
.github/workflows/nightly.yml
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
name: Nightly release
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 5 * * 1-5'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build clio
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# - os: macOS
|
||||
# build_type: Release
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Compress clio_server
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
tar czf ./clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz ./clio_server
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz
|
||||
|
||||
|
||||
run_tests:
|
||||
needs: build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# - os: macOS
|
||||
# build_type: Release
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
nightly_release:
|
||||
needs: run_tests
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: nightly_release
|
||||
|
||||
- name: Prepare files
|
||||
shell: bash
|
||||
run: |
|
||||
cp ${{ github.workspace }}/.github/workflows/nightly_notes.md "${RUNNER_TEMP}/nightly_notes.md"
|
||||
cd nightly_release
|
||||
rm -r clio_tests*
|
||||
for d in $(ls); do
|
||||
archive_name=$(ls $d)
|
||||
mv ${d}/${archive_name} ./
|
||||
rm -r $d
|
||||
sha256sum ./$archive_name > ./${archive_name}.sha256sum
|
||||
cat ./$archive_name.sha256sum >> "${RUNNER_TEMP}/nightly_notes.md"
|
||||
done
|
||||
echo '```' >> "${RUNNER_TEMP}/nightly_notes.md"
|
||||
|
||||
- name: Remove current nightly release and nightly tag
|
||||
shell: bash
|
||||
run: |
|
||||
gh release delete nightly --yes || true
|
||||
git push origin :nightly || true
|
||||
|
||||
- name: Publish nightly release
|
||||
shell: bash
|
||||
run: |
|
||||
gh release create nightly --prerelease --title "Clio development (nightly) build" \
|
||||
--target $GITHUB_SHA --notes-file "${RUNNER_TEMP}/nightly_notes.md" \
|
||||
./nightly_release/clio_server*
|
||||
6
.github/workflows/nightly_notes.md
vendored
Normal file
6
.github/workflows/nightly_notes.md
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
> **Note:** Please remember that this is a development release and it is not recommended for production use.
|
||||
|
||||
Changelog (including previous releases): https://github.com/XRPLF/clio/commits/nightly
|
||||
|
||||
## SHA256 checksums
|
||||
```
|
||||
40
.github/workflows/update_docker_ci.yml
vendored
Normal file
40
.github/workflows/update_docker_ci.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: Update CI docker image
|
||||
on:
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- 'docker/ci/**'
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build_and_push:
|
||||
name: Build and push docker image
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PW }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: rippleci/clio_ci
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=gcc_11
|
||||
type=raw,value=${{ env.GITHUB_SHA }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ${{ github.workspace }}/docker/ci
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,5 +1,6 @@
|
||||
*clio*.log
|
||||
/build*/
|
||||
.devcontainer
|
||||
.build
|
||||
.cache
|
||||
.vscode
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <main/Build.h>
|
||||
#include "main/Build.hpp"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Build {
|
||||
static constexpr char versionString[] = "@VERSION@";
|
||||
|
||||
31
CMake/ClangTidy.cmake
Normal file
31
CMake/ClangTidy.cmake
Normal file
@@ -0,0 +1,31 @@
|
||||
if (lint)
|
||||
|
||||
# Find clang-tidy binary
|
||||
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
|
||||
set (_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
|
||||
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
|
||||
message (FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
|
||||
endif ()
|
||||
message (STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||
else ()
|
||||
find_program (_CLANG_TIDY_BIN NAMES "clang-tidy-17" "clang-tidy" REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (NOT _CLANG_TIDY_BIN)
|
||||
message (FATAL_ERROR
|
||||
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy.")
|
||||
endif ()
|
||||
|
||||
# Support for https://github.com/matus-chochlik/ctcache
|
||||
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
|
||||
if (CLANG_TIDY_CACHE_PATH)
|
||||
set (_CLANG_TIDY_CMD
|
||||
"${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
|
||||
CACHE STRING "A combined command to run clang-tidy with caching wrapper")
|
||||
else ()
|
||||
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
|
||||
endif ()
|
||||
|
||||
set (CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
|
||||
message (STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
|
||||
endif ()
|
||||
440
CMake/CodeCoverage.cmake
Normal file
440
CMake/CodeCoverage.cmake
Normal file
@@ -0,0 +1,440 @@
|
||||
# Copyright (c) 2012 - 2017, Lars Bilke
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
# may be used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# CHANGES:
|
||||
#
|
||||
# 2012-01-31, Lars Bilke
|
||||
# - Enable Code Coverage
|
||||
#
|
||||
# 2013-09-17, Joakim Söderberg
|
||||
# - Added support for Clang.
|
||||
# - Some additional usage instructions.
|
||||
#
|
||||
# 2016-02-03, Lars Bilke
|
||||
# - Refactored functions to use named parameters
|
||||
#
|
||||
# 2017-06-02, Lars Bilke
|
||||
# - Merged with modified version from github.com/ufz/ogs
|
||||
#
|
||||
# 2019-05-06, Anatolii Kurotych
|
||||
# - Remove unnecessary --coverage flag
|
||||
#
|
||||
# 2019-12-13, FeRD (Frank Dana)
|
||||
# - Deprecate COVERAGE_LCOVR_EXCLUDES and COVERAGE_GCOVR_EXCLUDES lists in favor
|
||||
# of tool-agnostic COVERAGE_EXCLUDES variable, or EXCLUDE setup arguments.
|
||||
# - CMake 3.4+: All excludes can be specified relative to BASE_DIRECTORY
|
||||
# - All setup functions: accept BASE_DIRECTORY, EXCLUDE list
|
||||
# - Set lcov basedir with -b argument
|
||||
# - Add automatic --demangle-cpp in lcovr, if 'c++filt' is available (can be
|
||||
# overridden with NO_DEMANGLE option in setup_target_for_coverage_lcovr().)
|
||||
# - Delete output dir, .info file on 'make clean'
|
||||
# - Remove Python detection, since version mismatches will break gcovr
|
||||
# - Minor cleanup (lowercase function names, update examples...)
|
||||
#
|
||||
# 2019-12-19, FeRD (Frank Dana)
|
||||
# - Rename Lcov outputs, make filtered file canonical, fix cleanup for targets
|
||||
#
|
||||
# 2020-01-19, Bob Apthorpe
|
||||
# - Added gfortran support
|
||||
#
|
||||
# 2020-02-17, FeRD (Frank Dana)
|
||||
# - Make all add_custom_target()s VERBATIM to auto-escape wildcard characters
|
||||
# in EXCLUDEs, and remove manual escaping from gcovr targets
|
||||
#
|
||||
# 2021-01-19, Robin Mueller
|
||||
# - Add CODE_COVERAGE_VERBOSE option which will allow to print out commands which are run
|
||||
# - Added the option for users to set the GCOVR_ADDITIONAL_ARGS variable to supply additional
|
||||
# flags to the gcovr command
|
||||
#
|
||||
# 2020-05-04, Mihchael Davis
|
||||
# - Add -fprofile-abs-path to make gcno files contain absolute paths
|
||||
# - Fix BASE_DIRECTORY not working when defined
|
||||
# - Change BYPRODUCT from folder to index.html to stop ninja from complaining about double defines
|
||||
#
|
||||
# 2021-05-10, Martin Stump
|
||||
# - Check if the generator is multi-config before warning about non-Debug builds
|
||||
#
|
||||
# 2022-02-22, Marko Wehle
|
||||
# - Change gcovr output from -o <filename> for --xml <filename> and --html <filename> output respectively.
|
||||
# This will allow for Multiple Output Formats at the same time by making use of GCOVR_ADDITIONAL_ARGS, e.g. GCOVR_ADDITIONAL_ARGS "--txt".
|
||||
#
|
||||
# 2022-09-28, Sebastian Mueller
|
||||
# - fix append_coverage_compiler_flags_to_target to correctly add flags
|
||||
# - replace "-fprofile-arcs -ftest-coverage" with "--coverage" (equivalent)
|
||||
#
|
||||
# 2023-12-15, Bronek Kozicki
|
||||
# - remove setup_target_for_coverage_lcov (slow) and setup_target_for_coverage_fastcov (no support for Clang)
|
||||
# - fix Clang support by adding find_program( ... llvm-cov )
|
||||
# - add Apple Clang support by adding execute_process( COMMAND xcrun -f llvm-cov ... )
|
||||
# - add CODE_COVERAGE_GCOV_TOOL to explicitly select gcov tool and disable find_program
|
||||
# - replace both functions setup_target_for_coverage_gcovr_* with single setup_target_for_coverage_gcovr
|
||||
# - add support for all gcovr output formats
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# 1. Copy this file into your cmake modules path.
|
||||
#
|
||||
# 2. Add the following line to your CMakeLists.txt (best inside an if-condition
|
||||
# using a CMake option() to enable it just optionally):
|
||||
# include(CodeCoverage)
|
||||
#
|
||||
# 3. Append necessary compiler flags for all supported source files:
|
||||
# append_coverage_compiler_flags()
|
||||
# Or for specific target:
|
||||
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
|
||||
#
|
||||
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
|
||||
#
|
||||
# 4. If you need to exclude additional directories from the report, specify them
|
||||
# using full paths in the COVERAGE_EXCLUDES variable before calling
|
||||
# setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES
|
||||
# '${PROJECT_SOURCE_DIR}/src/dir1/*'
|
||||
# '/path/to/my/src/dir2/*')
|
||||
# Or, use the EXCLUDE argument to setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "${PROJECT_SOURCE_DIR}/src/dir1/*" "/path/to/my/src/dir2/*")
|
||||
#
|
||||
# 4.a NOTE: With CMake 3.4+, COVERAGE_EXCLUDES or EXCLUDE can also be set
|
||||
# relative to the BASE_DIRECTORY (default: PROJECT_SOURCE_DIR)
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES "dir1/*")
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# FORMAT html-details
|
||||
# BASE_DIRECTORY "${PROJECT_SOURCE_DIR}/src"
|
||||
# EXCLUDE "dir2/*")
|
||||
#
|
||||
# 4.b If you need to pass specific options to gcovr, specify them in
|
||||
# GCOVR_ADDITIONAL_ARGS variable.
|
||||
# Example:
|
||||
# set (GCOVR_ADDITIONAL_ARGS --exclude-throw-branches --exclude-noncode-lines -s)
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "src/dir1" "src/dir2")
|
||||
#
|
||||
# 5. Use the functions described below to create a custom make target which
|
||||
# runs your test executable and produces a code coverage report.
|
||||
#
|
||||
# 6. Build a Debug build:
|
||||
# cmake -DCMAKE_BUILD_TYPE=Debug ..
|
||||
# make
|
||||
# make my_coverage_target
|
||||
|
||||
include(CMakeParseArguments)
|
||||
|
||||
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
|
||||
|
||||
# Check prereqs
|
||||
find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
||||
|
||||
if (DEFINED CODE_COVERAGE_GCOV_TOOL)
|
||||
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif (DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
|
||||
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if (APPLE)
|
||||
execute_process( COMMAND xcrun -f llvm-cov
|
||||
OUTPUT_VARIABLE LLVMCOV_PATH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
else()
|
||||
find_program( LLVMCOV_PATH llvm-cov )
|
||||
endif()
|
||||
if(LLVMCOV_PATH)
|
||||
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
|
||||
endif()
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
find_program( GCOV_PATH gcov )
|
||||
set(GCOV_TOOL "${GCOV_PATH}")
|
||||
endif()
|
||||
|
||||
# Check supported compiler (Clang, GNU and Flang)
|
||||
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||
foreach(LANG ${LANGUAGES})
|
||||
if("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
||||
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
||||
endif()
|
||||
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
|
||||
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
|
||||
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
|
||||
CACHE INTERNAL "")
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
include(CheckCXXCompilerFlag)
|
||||
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
|
||||
if(HAVE_cxx_fprofile_abs_path)
|
||||
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
include(CheckCCompilerFlag)
|
||||
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
|
||||
if(HAVE_c_fprofile_abs_path)
|
||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(CMAKE_Fortran_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_CXX_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C++ compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_C_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used for linking binaries during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
|
||||
FORCE )
|
||||
mark_as_advanced(
|
||||
CMAKE_Fortran_FLAGS_COVERAGE
|
||||
CMAKE_CXX_FLAGS_COVERAGE
|
||||
CMAKE_C_FLAGS_COVERAGE
|
||||
CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
|
||||
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
||||
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
|
||||
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
link_libraries(gcov)
|
||||
endif()
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
# NOTE! The executable should always have a ZERO as exit code otherwise
|
||||
# the coverage generation will not complete.
|
||||
#
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME ctest_coverage # New target name
|
||||
# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
|
||||
# DEPENDENCIES executable_target # Dependencies to build first
|
||||
# BASE_DIRECTORY "../" # Base directory for report
|
||||
# # (defaults to PROJECT_SOURCE_DIR)
|
||||
# FORMAT "cobertura" # Output format, one of:
|
||||
# # xml cobertura sonarqube json-summary
|
||||
# # json-details coveralls csv txt
|
||||
# # html-single html-nested html-details
|
||||
# # (xml is an alias to cobertura;
|
||||
# # if no format is set, defaults to xml)
|
||||
# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative
|
||||
# # to BASE_DIRECTORY, with CMake 3.4+)
|
||||
# )
|
||||
# The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the
|
||||
# GCVOR command.
|
||||
function(setup_target_for_coverage_gcovr)
|
||||
set(options NONE)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT GCOV_TOOL)
|
||||
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
|
||||
endif()
|
||||
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
|
||||
endif()
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if(DEFINED Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED Coverage_FORMAT)
|
||||
set(Coverage_FORMAT xml)
|
||||
endif()
|
||||
|
||||
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
|
||||
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
|
||||
else()
|
||||
if((Coverage_FORMAT STREQUAL "html-details")
|
||||
OR (Coverage_FORMAT STREQUAL "html-nested"))
|
||||
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
|
||||
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
|
||||
elseif((Coverage_FORMAT STREQUAL "json-summary")
|
||||
OR (Coverage_FORMAT STREQUAL "json-details")
|
||||
OR (Coverage_FORMAT STREQUAL "coveralls"))
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
|
||||
else()
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if ((Coverage_FORMAT STREQUAL "cobertura")
|
||||
OR (Coverage_FORMAT STREQUAL "xml"))
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty )
|
||||
set(Coverage_FORMAT cobertura) # overwrite xml
|
||||
elseif(Coverage_FORMAT STREQUAL "sonarqube")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "json-summary")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "json-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "coveralls")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
|
||||
elseif(Coverage_FORMAT STREQUAL "html-nested")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}" )
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
|
||||
endif()
|
||||
|
||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||
set(GCOVR_EXCLUDES "")
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||
endif()
|
||||
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
||||
|
||||
# Combine excludes to several -e arguments
|
||||
set(GCOVR_EXCLUDE_ARGS "")
|
||||
foreach(EXCLUDE ${GCOVR_EXCLUDES})
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
||||
endforeach()
|
||||
|
||||
# Set up commands which will be run to generate coverage data
|
||||
# Run tests
|
||||
set(GCOVR_EXEC_TESTS_CMD
|
||||
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
|
||||
)
|
||||
|
||||
# Create folder
|
||||
if(DEFINED GCOVR_CREATE_FOLDER)
|
||||
set(GCOVR_FOLDER_CMD
|
||||
${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
|
||||
else()
|
||||
set(GCOVR_FOLDER_CMD echo) # dummy
|
||||
endif()
|
||||
|
||||
# Running gcovr
|
||||
set(GCOVR_CMD
|
||||
${GCOVR_PATH}
|
||||
--gcov-executable ${GCOV_TOOL}
|
||||
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
|
||||
-r ${BASEDIR}
|
||||
${GCOVR_ADDITIONAL_ARGS}
|
||||
${GCOVR_EXCLUDE_ARGS}
|
||||
--object-directory=${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Executed command report")
|
||||
|
||||
message(STATUS "Command to run tests: ")
|
||||
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
|
||||
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
|
||||
|
||||
if(NOT GCOVR_FOLDER_CMD STREQUAL "echo")
|
||||
message(STATUS "Command to create a folder: ")
|
||||
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
|
||||
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
message(STATUS "Command to generate gcovr coverage data: ")
|
||||
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
|
||||
message(STATUS "${GCOVR_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
add_custom_target(${Coverage_NAME}
|
||||
COMMAND ${GCOVR_EXEC_TESTS_CMD}
|
||||
COMMAND ${GCOVR_FOLDER_CMD}
|
||||
COMMAND ${GCOVR_CMD}
|
||||
|
||||
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Running gcovr to produce code coverage report."
|
||||
)
|
||||
|
||||
# Show info where to find the report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ;
|
||||
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
|
||||
)
|
||||
endfunction() # setup_target_for_coverage_gcovr
|
||||
|
||||
function(append_coverage_compiler_flags)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
|
||||
endfunction() # append_coverage_compiler_flags
|
||||
|
||||
# Setup coverage for specific library
|
||||
function(append_coverage_compiler_flags_to_target name)
|
||||
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
|
||||
target_compile_options(${name} PRIVATE ${_flag_list})
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
target_link_libraries(${name} PRIVATE gcov)
|
||||
endif()
|
||||
endfunction()
|
||||
@@ -1,125 +0,0 @@
|
||||
# call add_coverage(module_name) to add coverage targets for the given module
|
||||
function (add_coverage module)
|
||||
if ("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang"
|
||||
OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
message ("[Coverage] Building with llvm Code Coverage Tools")
|
||||
# Using llvm gcov ; llvm install by xcode
|
||||
set (LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin)
|
||||
if (NOT EXISTS ${LLVM_COV_PATH}/llvm-cov)
|
||||
message (FATAL_ERROR "llvm-cov not found! Aborting.")
|
||||
endif ()
|
||||
|
||||
# set Flags
|
||||
target_compile_options (${module} PRIVATE
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_link_options (${module} PUBLIC
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_compile_options (clio PRIVATE
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_link_options (clio PUBLIC
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
# llvm-cov
|
||||
add_custom_target (${module}-ccov-preprocessing
|
||||
COMMAND LLVM_PROFILE_FILE=${module}.profraw $<TARGET_FILE:${module}>
|
||||
COMMAND ${LLVM_COV_PATH}/llvm-profdata merge -sparse ${module}.profraw -o
|
||||
${module}.profdata
|
||||
DEPENDS ${module})
|
||||
|
||||
add_custom_target (${module}-ccov-show
|
||||
COMMAND ${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata -show-line-counts-or-regions
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
# add summary for CI parse
|
||||
add_custom_target (${module}-ccov-report
|
||||
COMMAND
|
||||
${LLVM_COV_PATH}/llvm-cov report $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata
|
||||
-ignore-filename-regex=".*_makefiles|.*unittests|.*_deps"
|
||||
-show-region-summary=false
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
# exclude libs and unittests self
|
||||
add_custom_target (${module}-ccov
|
||||
COMMAND
|
||||
${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata -show-line-counts-or-regions
|
||||
-output-dir=${module}-llvm-cov -format="html"
|
||||
-ignore-filename-regex=".*_makefiles|.*unittests|.*_deps" > /dev/null 2>&1
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
add_custom_command (
|
||||
TARGET ${module}-ccov
|
||||
POST_BUILD
|
||||
COMMENT
|
||||
"Open ${module}-llvm-cov/index.html in your browser to view the coverage report."
|
||||
)
|
||||
elseif ("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
message ("[Coverage] Building with Gcc Code Coverage Tools")
|
||||
|
||||
find_program (GCOV_PATH gcov)
|
||||
if (NOT GCOV_PATH)
|
||||
message (FATAL_ERROR "gcov not found! Aborting...")
|
||||
endif () # NOT GCOV_PATH
|
||||
find_program (GCOVR_PATH gcovr)
|
||||
if (NOT GCOVR_PATH)
|
||||
message (FATAL_ERROR "gcovr not found! Aborting...")
|
||||
endif () # NOT GCOVR_PATH
|
||||
|
||||
set (COV_OUTPUT_PATH ${module}-gcc-cov)
|
||||
target_compile_options (${module} PRIVATE -fprofile-arcs -ftest-coverage
|
||||
-fPIC)
|
||||
target_link_libraries (${module} PRIVATE gcov)
|
||||
|
||||
target_compile_options (clio PRIVATE -fprofile-arcs -ftest-coverage
|
||||
-fPIC)
|
||||
target_link_libraries (clio PRIVATE gcov)
|
||||
# this target is used for CI as well generate the summary out.xml will send
|
||||
# to github action to generate markdown, we can paste it to comments or
|
||||
# readme
|
||||
add_custom_target (${module}-ccov
|
||||
COMMAND ${module} ${TEST_PARAMETER}
|
||||
COMMAND rm -rf ${COV_OUTPUT_PATH}
|
||||
COMMAND mkdir ${COV_OUTPUT_PATH}
|
||||
COMMAND
|
||||
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR} -x
|
||||
${COV_OUTPUT_PATH}/out.xml --exclude='${CMAKE_SOURCE_DIR}/unittests/'
|
||||
--exclude='${PROJECT_BINARY_DIR}/'
|
||||
COMMAND
|
||||
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR}
|
||||
--html ${COV_OUTPUT_PATH}/report.html
|
||||
--exclude='${CMAKE_SOURCE_DIR}/unittests/'
|
||||
--exclude='${PROJECT_BINARY_DIR}/'
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report.")
|
||||
|
||||
# generate the detail report
|
||||
add_custom_target (${module}-ccov-report
|
||||
COMMAND ${module} ${TEST_PARAMETER}
|
||||
COMMAND rm -rf ${COV_OUTPUT_PATH}
|
||||
COMMAND mkdir ${COV_OUTPUT_PATH}
|
||||
COMMAND
|
||||
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR}
|
||||
--html-details ${COV_OUTPUT_PATH}/index.html
|
||||
--exclude='${CMAKE_SOURCE_DIR}/unittests/'
|
||||
--exclude='${PROJECT_BINARY_DIR}/'
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report.")
|
||||
add_custom_command (
|
||||
TARGET ${module}-ccov-report
|
||||
POST_BUILD
|
||||
COMMENT
|
||||
"Open ${COV_OUTPUT_PATH}/index.html in your browser to view the coverage report."
|
||||
)
|
||||
else ()
|
||||
message (FATAL_ERROR "Complier not support yet")
|
||||
endif ()
|
||||
endfunction ()
|
||||
@@ -17,20 +17,34 @@ set(COMPILER_FLAGS
|
||||
-pedantic
|
||||
-Wpedantic
|
||||
-Wunused
|
||||
# FIXME: The following bunch are needed for gcc12 atm.
|
||||
-Wno-missing-requires
|
||||
-Wno-restrict
|
||||
-Wno-null-dereference
|
||||
-Wno-maybe-uninitialized
|
||||
-Wno-unknown-warning-option # and this to work with clang
|
||||
# TODO: Address these and others in https://github.com/XRPLF/clio/issues/1273
|
||||
)
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
#TODO: reenable when we change CI #884
|
||||
# if (is_gcc AND NOT lint)
|
||||
# list(APPEND COMPILER_FLAGS
|
||||
# -Wduplicated-branches
|
||||
# -Wduplicated-cond
|
||||
# -Wlogical-op
|
||||
# -Wuseless-cast
|
||||
# )
|
||||
# endif ()
|
||||
|
||||
if (is_clang)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (is_appleclang)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wduplicated-branches
|
||||
-Wduplicated-cond
|
||||
-Wlogical-op
|
||||
-Wuseless-cast
|
||||
-Wreorder-init-list
|
||||
)
|
||||
endif ()
|
||||
|
||||
|
||||
3
CMake/deps/libbacktrace.cmake
Normal file
3
CMake/deps/libbacktrace.cmake
Normal file
@@ -0,0 +1,3 @@
|
||||
target_compile_definitions(clio PUBLIC BOOST_STACKTRACE_LINK)
|
||||
target_compile_definitions(clio PUBLIC BOOST_STACKTRACE_USE_BACKTRACE)
|
||||
find_package(libbacktrace REQUIRED)
|
||||
110
CMakeLists.txt
110
CMakeLists.txt
@@ -9,6 +9,7 @@ option (tests "Build tests" FALSE)
|
||||
option (docs "Generate doxygen docs" FALSE)
|
||||
option (coverage "Build test coverage report" FALSE)
|
||||
option (packaging "Create distribution packages" FALSE)
|
||||
option (lint "Run clang-tidy checks during compilation" FALSE)
|
||||
# ========================================================================== #
|
||||
set (san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set (CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
@@ -18,6 +19,13 @@ set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
# Include required modules
|
||||
include (CMake/Ccache.cmake)
|
||||
include (CheckCXXCompilerFlag)
|
||||
include (CMake/ClangTidy.cmake)
|
||||
|
||||
# Set coverage build options
|
||||
if (tests AND coverage)
|
||||
include (CMake/CodeCoverage.cmake)
|
||||
append_coverage_compiler_flags()
|
||||
endif()
|
||||
|
||||
if (verbose)
|
||||
set (CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
@@ -42,6 +50,7 @@ include (CMake/deps/OpenSSL.cmake)
|
||||
include (CMake/deps/Threads.cmake)
|
||||
include (CMake/deps/libfmt.cmake)
|
||||
include (CMake/deps/cassandra.cmake)
|
||||
include (CMake/deps/libbacktrace.cmake)
|
||||
|
||||
# TODO: Include directory will be wrong when installed.
|
||||
target_include_directories (clio PUBLIC src)
|
||||
@@ -54,11 +63,14 @@ target_link_libraries (clio
|
||||
PUBLIC Boost::system
|
||||
PUBLIC Boost::log
|
||||
PUBLIC Boost::log_setup
|
||||
PUBLIC Boost::stacktrace_backtrace
|
||||
PUBLIC cassandra-cpp-driver::cassandra-cpp-driver
|
||||
PUBLIC fmt::fmt
|
||||
PUBLIC OpenSSL::Crypto
|
||||
PUBLIC OpenSSL::SSL
|
||||
PUBLIC xrpl::libxrpl
|
||||
PUBLIC dl
|
||||
PUBLIC libbacktrace::libbacktrace
|
||||
|
||||
INTERFACE Threads::Threads
|
||||
)
|
||||
@@ -72,6 +84,7 @@ target_sources (clio PRIVATE
|
||||
## Main
|
||||
src/main/impl/Build.cpp
|
||||
## Backend
|
||||
src/data/BackendCounters.cpp
|
||||
src/data/BackendInterface.cpp
|
||||
src/data/LedgerCache.cpp
|
||||
src/data/cassandra/impl/Future.cpp
|
||||
@@ -87,15 +100,23 @@ target_sources (clio PRIVATE
|
||||
src/etl/ProbingSource.cpp
|
||||
src/etl/NFTHelpers.cpp
|
||||
src/etl/ETLService.cpp
|
||||
src/etl/ETLState.cpp
|
||||
src/etl/LoadBalancer.cpp
|
||||
src/etl/impl/ForwardCache.cpp
|
||||
## Feed
|
||||
src/feed/SubscriptionManager.cpp
|
||||
src/feed/impl/TransactionFeed.cpp
|
||||
src/feed/impl/LedgerFeed.cpp
|
||||
src/feed/impl/ProposedTransactionFeed.cpp
|
||||
src/feed/impl/SingleFeedBase.cpp
|
||||
## Web
|
||||
src/web/impl/AdminVerificationStrategy.cpp
|
||||
src/web/IntervalSweepHandler.cpp
|
||||
src/web/Resolver.cpp
|
||||
## RPC
|
||||
src/rpc/Errors.cpp
|
||||
src/rpc/Factories.cpp
|
||||
src/rpc/AMMHelpers.cpp
|
||||
src/rpc/RPCHelpers.cpp
|
||||
src/rpc/Counters.cpp
|
||||
src/rpc/WorkQueue.cpp
|
||||
@@ -113,6 +134,7 @@ target_sources (clio PRIVATE
|
||||
src/rpc/handlers/AccountObjects.cpp
|
||||
src/rpc/handlers/AccountOffers.cpp
|
||||
src/rpc/handlers/AccountTx.cpp
|
||||
src/rpc/handlers/AMMInfo.cpp
|
||||
src/rpc/handlers/BookChanges.cpp
|
||||
src/rpc/handlers/BookOffers.cpp
|
||||
src/rpc/handlers/DepositAuthorized.cpp
|
||||
@@ -121,6 +143,7 @@ target_sources (clio PRIVATE
|
||||
src/rpc/handlers/LedgerData.cpp
|
||||
src/rpc/handlers/LedgerEntry.cpp
|
||||
src/rpc/handlers/LedgerRange.cpp
|
||||
src/rpc/handlers/NFTsByIssuer.cpp
|
||||
src/rpc/handlers/NFTBuyOffers.cpp
|
||||
src/rpc/handlers/NFTHistory.cpp
|
||||
src/rpc/handlers/NFTInfo.cpp
|
||||
@@ -129,11 +152,26 @@ target_sources (clio PRIVATE
|
||||
src/rpc/handlers/NoRippleCheck.cpp
|
||||
src/rpc/handlers/Random.cpp
|
||||
src/rpc/handlers/TransactionEntry.cpp
|
||||
src/rpc/handlers/Tx.cpp
|
||||
## Util
|
||||
src/util/config/Config.cpp
|
||||
src/util/log/Logger.cpp
|
||||
src/util/Taggable.cpp)
|
||||
src/util/prometheus/Http.cpp
|
||||
src/util/prometheus/Label.cpp
|
||||
src/util/prometheus/MetricBase.cpp
|
||||
src/util/prometheus/MetricBuilder.cpp
|
||||
src/util/prometheus/MetricsFamily.cpp
|
||||
src/util/prometheus/OStream.cpp
|
||||
src/util/prometheus/Prometheus.cpp
|
||||
src/util/Random.cpp
|
||||
src/util/requests/RequestBuilder.cpp
|
||||
src/util/requests/Types.cpp
|
||||
src/util/requests/WsConnection.cpp
|
||||
src/util/requests/impl/SslContext.cpp
|
||||
src/util/Taggable.cpp
|
||||
src/util/TerminationHandler.cpp
|
||||
src/util/TxUtils.cpp
|
||||
src/util/LedgerUtils.cpp
|
||||
)
|
||||
|
||||
# Clio server
|
||||
add_executable (clio_server src/main/Main.cpp)
|
||||
@@ -155,22 +193,39 @@ if (tests)
|
||||
unittests/ProfilerTests.cpp
|
||||
unittests/JsonUtilTests.cpp
|
||||
unittests/DOSGuardTests.cpp
|
||||
unittests/SubscriptionTests.cpp
|
||||
unittests/SubscriptionManagerTests.cpp
|
||||
unittests/util/TestGlobals.cpp
|
||||
unittests/util/AssertTests.cpp
|
||||
unittests/util/BatchingTests.cpp
|
||||
unittests/util/TestHttpServer.cpp
|
||||
unittests/util/TestObject.cpp
|
||||
unittests/util/TestWsServer.cpp
|
||||
unittests/util/TxUtilTests.cpp
|
||||
unittests/util/StringUtils.cpp
|
||||
unittests/util/LedgerUtilsTests.cpp
|
||||
unittests/util/prometheus/CounterTests.cpp
|
||||
unittests/util/prometheus/GaugeTests.cpp
|
||||
unittests/util/prometheus/HistogramTests.cpp
|
||||
unittests/util/prometheus/HttpTests.cpp
|
||||
unittests/util/prometheus/LabelTests.cpp
|
||||
unittests/util/prometheus/MetricBuilderTests.cpp
|
||||
unittests/util/prometheus/MetricsFamilyTests.cpp
|
||||
unittests/util/prometheus/OStreamTests.cpp
|
||||
unittests/util/requests/RequestBuilderTests.cpp
|
||||
unittests/util/requests/SslContextTests.cpp
|
||||
unittests/util/requests/WsConnectionTests.cpp
|
||||
# ETL
|
||||
unittests/etl/ExtractionDataPipeTests.cpp
|
||||
unittests/etl/ExtractorTests.cpp
|
||||
unittests/etl/TransformerTests.cpp
|
||||
unittests/etl/CacheLoaderTests.cpp
|
||||
unittests/etl/AmendmentBlockHandlerTests.cpp
|
||||
unittests/etl/LedgerPublisherTests.cpp
|
||||
unittests/etl/ETLStateTests.cpp
|
||||
# RPC
|
||||
unittests/rpc/ErrorTests.cpp
|
||||
unittests/rpc/BaseTests.cpp
|
||||
unittests/rpc/RPCHelpersTests.cpp
|
||||
unittests/rpc/CountersTests.cpp
|
||||
unittests/rpc/AdminVerificationTests.cpp
|
||||
unittests/rpc/APIVersionTests.cpp
|
||||
unittests/rpc/ForwardingProxyTests.cpp
|
||||
unittests/rpc/WorkQueueTests.cpp
|
||||
@@ -199,6 +254,7 @@ if (tests)
|
||||
unittests/rpc/handlers/RandomTests.cpp
|
||||
unittests/rpc/handlers/NFTInfoTests.cpp
|
||||
unittests/rpc/handlers/NFTBuyOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTsByIssuerTest.cpp
|
||||
unittests/rpc/handlers/NFTSellOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTHistoryTests.cpp
|
||||
unittests/rpc/handlers/SubscribeTests.cpp
|
||||
@@ -208,8 +264,11 @@ if (tests)
|
||||
unittests/rpc/handlers/BookChangesTests.cpp
|
||||
unittests/rpc/handlers/LedgerTests.cpp
|
||||
unittests/rpc/handlers/VersionHandlerTests.cpp
|
||||
unittests/rpc/handlers/AMMInfoTests.cpp
|
||||
# Backend
|
||||
unittests/data/BackendFactoryTests.cpp
|
||||
unittests/data/BackendInterfaceTests.cpp
|
||||
unittests/data/BackendCountersTests.cpp
|
||||
unittests/data/cassandra/BaseTests.cpp
|
||||
unittests/data/cassandra/BackendTests.cpp
|
||||
unittests/data/cassandra/RetryPolicyTests.cpp
|
||||
@@ -217,10 +276,20 @@ if (tests)
|
||||
unittests/data/cassandra/ExecutionStrategyTests.cpp
|
||||
unittests/data/cassandra/AsyncExecutorTests.cpp
|
||||
# Webserver
|
||||
unittests/web/AdminVerificationTests.cpp
|
||||
unittests/web/ServerTests.cpp
|
||||
unittests/web/RPCServerHandlerTests.cpp
|
||||
unittests/web/WhitelistHandlerTests.cpp
|
||||
unittests/web/SweepHandlerTests.cpp)
|
||||
unittests/web/SweepHandlerTests.cpp
|
||||
# Feed
|
||||
unittests/feed/SubscriptionManagerTests.cpp
|
||||
unittests/feed/SingleFeedBaseTests.cpp
|
||||
unittests/feed/ProposedTransactionFeedTests.cpp
|
||||
unittests/feed/BookChangesFeedTests.cpp
|
||||
unittests/feed/LedgerFeedTests.cpp
|
||||
unittests/feed/TransactionFeedTests.cpp
|
||||
unittests/feed/ForwardFeedTests.cpp
|
||||
unittests/feed/TrackableSignalTests.cpp)
|
||||
|
||||
include (CMake/deps/gtest.cmake)
|
||||
|
||||
@@ -234,12 +303,31 @@ if (tests)
|
||||
target_include_directories (${TEST_TARGET} PRIVATE unittests)
|
||||
target_link_libraries (${TEST_TARGET} PUBLIC clio gtest::gtest)
|
||||
|
||||
# Generate `clio_tests-ccov` if coverage is enabled
|
||||
# Note: use `make clio_tests-ccov` to generate report
|
||||
# Generate `coverage_report` target if coverage is enabled
|
||||
if (coverage)
|
||||
target_compile_definitions(${TEST_TARGET} PRIVATE COVERAGE_ENABLED)
|
||||
include (CMake/Coverage.cmake)
|
||||
add_coverage (${TEST_TARGET})
|
||||
if (DEFINED CODE_COVERAGE_REPORT_FORMAT)
|
||||
set(CODE_COVERAGE_FORMAT ${CODE_COVERAGE_REPORT_FORMAT})
|
||||
else()
|
||||
set(CODE_COVERAGE_FORMAT html-details)
|
||||
endif()
|
||||
|
||||
if (DEFINED CODE_COVERAGE_TESTS_ARGS)
|
||||
set(TESTS_ADDITIONAL_ARGS ${CODE_COVERAGE_TESTS_ARGS})
|
||||
separate_arguments(TESTS_ADDITIONAL_ARGS)
|
||||
else()
|
||||
set(TESTS_ADDITIONAL_ARGS "")
|
||||
endif()
|
||||
|
||||
set (GCOVR_ADDITIONAL_ARGS --exclude-throw-branches -s)
|
||||
|
||||
setup_target_for_coverage_gcovr(
|
||||
NAME coverage_report
|
||||
FORMAT ${CODE_COVERAGE_FORMAT}
|
||||
EXECUTABLE clio_tests
|
||||
EXECUTABLE_ARGS --gtest_brief=1 ${TESTS_ADDITIONAL_ARGS}
|
||||
EXCLUDE "unittests"
|
||||
DEPENDENCIES clio_tests
|
||||
)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
|
||||
@@ -62,6 +62,11 @@ git commit --amend -S
|
||||
git push --force
|
||||
```
|
||||
|
||||
## Use ccache (optional)
|
||||
Clio uses ccache to speed up compilation. If you want to use it, please make sure it is installed on your machine.
|
||||
CMake will automatically detect it and use it if it's available.
|
||||
|
||||
|
||||
## Fixing issues found during code review
|
||||
While your code is in review, it's possible that some changes will be requested by reviewer(s).
|
||||
This section describes the process of adding your fixes.
|
||||
@@ -91,7 +96,7 @@ The button for that is near the bottom of the PR's page on GitHub.
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
|
||||
|
||||
## Formatting
|
||||
Code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
|
||||
Code must conform to `clang-format` version 17, unless the result would be unreasonably difficult to read or maintain.
|
||||
To change your code to conform use `clang-format -i <your changed files>`.
|
||||
|
||||
## Avoid
|
||||
@@ -126,6 +131,7 @@ Existing maintainers can resign, or be subject to a vote for removal at the behe
|
||||
|
||||
* [cindyyan317](https://github.com/cindyyan317) (Ripple)
|
||||
* [godexsoft](https://github.com/godexsoft) (Ripple)
|
||||
* [kuznetsss](https://github.com/kuznetsss) (Ripple)
|
||||
* [legleux](https://github.com/legleux) (Ripple)
|
||||
|
||||
## Honorable ex-Maintainers
|
||||
|
||||
2
Doxyfile
2
Doxyfile
@@ -1,6 +1,6 @@
|
||||
PROJECT_NAME = "Clio"
|
||||
INPUT = ../src ../unittests
|
||||
EXCLUDE_PATTERNS = *Test*.cpp *Test*.h
|
||||
EXCLUDE_PATTERNS = *Test*.cpp *Test*.hpp
|
||||
RECURSIVE = YES
|
||||
HAVE_DOT = YES
|
||||
|
||||
|
||||
111
README.md
111
README.md
@@ -1,4 +1,8 @@
|
||||
# Clio
|
||||
[](https://github.com/XRPLF/clio/actions/workflows/build.yml?query=branch%3Adevelop)
|
||||
[](https://github.com/XRPLF/clio/actions/workflows/nightly.yml?query=branch%3Adevelop)
|
||||
[](https://github.com/XRPLF/clio/actions/workflows/clang-tidy.yml?query=branch%3Adevelop)
|
||||
[](https://app.codecov.io/gh/XRPLF/clio)
|
||||
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC.
|
||||
Validated historical ledger and transaction data are stored in a more space-efficient format,
|
||||
@@ -15,6 +19,10 @@ To access non-validated data for *any* request, simply add `ledger_index: "curre
|
||||
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
|
||||
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
|
||||
|
||||
## Help
|
||||
Feel free to open an [issue](https://github.com/XRPLF/clio/issues) if you have a feature request or something doesn't work as expected.
|
||||
If you have any questions about building, running, contributing, using clio or any other, you could always start a new [discussion](https://github.com/XRPLF/clio/discussions).
|
||||
|
||||
## Requirements
|
||||
1. Access to a Cassandra cluster or ScyllaDB cluster. Can be local or remote.
|
||||
2. Access to one or more rippled nodes. Can be local or remote.
|
||||
@@ -32,6 +40,7 @@ It is written in C++20 and therefore requires a modern compiler.
|
||||
- [Conan 1.55](https://conan.io/downloads.html)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html) (needed for code coverage generation)
|
||||
- [**Optional**] [CCache](https://ccache.dev/) (speeds up compilation if you are going to compile Clio often)
|
||||
|
||||
| Compiler | Version |
|
||||
|-------------|---------|
|
||||
@@ -77,10 +86,11 @@ conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactor
|
||||
```
|
||||
Now you should be able to download prebuilt `xrpl` package on some platforms.
|
||||
|
||||
You might need to edit the `~/.conan/remotes.json` file to ensure that this newly added artifactory is listed last. Otherwise you might see compilation errors when building the project with gcc version 13 (or newer).
|
||||
|
||||
2. Remove old packages you may have cached:
|
||||
```sh
|
||||
conan remove -f xrpl
|
||||
conan remove -f cassandra-cpp-driver
|
||||
```
|
||||
|
||||
## Building Clio
|
||||
@@ -88,7 +98,7 @@ conan remove -f cassandra-cpp-driver
|
||||
Navigate to Clio's root directory and perform
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=False
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
|
||||
```
|
||||
@@ -98,6 +108,18 @@ If all goes well, `conan install` will find required packages and `cmake` will d
|
||||
|
||||
> **Tip:** To generate a Code Coverage report, include `-o coverage=True` in the `conan install` command above, along with `-o tests=True` to enable tests. After running the `cmake` commands, execute `make clio_tests-ccov`. The coverage report will be found at `clio_tests-llvm-cov/index.html`.
|
||||
|
||||
## Building Clio with Docker
|
||||
|
||||
It is possible to build Clio using docker if you don't want to install all the dependencies on your machine.
|
||||
```sh
|
||||
docker run -it rippleci/clio_ci:latest
|
||||
git clone https://github.com/XRPLF/clio
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=False
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
|
||||
```
|
||||
|
||||
## Running
|
||||
```sh
|
||||
./clio_server config.json
|
||||
@@ -237,6 +259,91 @@ Clio will fallback to hardcoded defaults when not specified in the config file o
|
||||
of the minimum and maximum supported versions hardcoded in `src/rpc/common/APIVersion.h`.
|
||||
> **Note:** See `example-config.json` for more details.
|
||||
|
||||
## Admin rights for requests
|
||||
|
||||
By default clio checks admin privileges by IP address from request (only `127.0.0.1` is considered to be an admin).
|
||||
It is not very secure because the IP could be spoofed.
|
||||
For a better security `admin_password` could be provided in the `server` section of clio's config:
|
||||
```json
|
||||
"server": {
|
||||
"admin_password": "secret"
|
||||
}
|
||||
```
|
||||
If the password is presented in the config, clio will check the Authorization header (if any) in each request for the password.
|
||||
The Authorization header should contain type `Password` and the password from the config, e.g. `Password secret`.
|
||||
Exactly equal password gains admin rights for the request or a websocket connection.
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports Prometheus metrics collection. It accepts Prometheus requests on the port configured in `server` section of config.
|
||||
Prometheus metrics are enabled by default. To disable it add `"prometheus": { "enabled": false }` to the config.
|
||||
It is important to know that clio responds to Prometheus request only if they are admin requests, so Prometheus should be configured to send admin password in header.
|
||||
There is an example of docker-compose file, Prometheus and Grafana configs in [examples/infrastructure](examples/infrastructure).
|
||||
|
||||
## Using clang-tidy for static analysis
|
||||
|
||||
Minimum clang-tidy version required is 17.0.
|
||||
Clang-tidy could be run by cmake during building the project.
|
||||
For that provide the option `-o lint=True` for `conan install` command:
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
By default cmake will try to find clang-tidy automatically in your system.
|
||||
To force cmake use desired binary set `CLIO_CLANG_TIDY_BIN` environment variable as path to clang-tidy binary.
|
||||
E.g.:
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@17/bin/clang-tidy
|
||||
```
|
||||
|
||||
## Coverage report
|
||||
|
||||
Coverage report is intended for the developers using compilers GCC
|
||||
or Clang (including Apple Clang). It is generated by the build target `coverage_report`,
|
||||
which is only enabled when both `tests` and `coverage` options are set, e.g. with
|
||||
`-o coverage=True -o tests=True` in `conan`
|
||||
|
||||
Prerequisites for the coverage report:
|
||||
|
||||
- [gcovr tool](https://gcovr.com/en/stable/getting-started.html) (can be installed e.g. with `pip install gcovr`)
|
||||
- `gcov` for GCC (installed with the compiler by default) or
|
||||
- `llvm-cov` for Clang (installed with the compiler by default, also on Apple)
|
||||
- `Debug` build type
|
||||
|
||||
Coverage report is created when the following steps are completed, in order:
|
||||
|
||||
1. `clio_tests` binary built with the instrumentation data, enabled by the `coverage`
|
||||
option mentioned above
|
||||
2. completed run of unit tests, which populates coverage capture data
|
||||
3. completed run of `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
|
||||
to assemble both instrumentation data and coverage capture data into a coverage report
|
||||
|
||||
Above steps are automated into a single target `coverage_report`. The instrumented
|
||||
`clio_tests` binary can be also used for running regular unit tests. In case of a
|
||||
spurious failure of unit tests, it is possile to re-run `coverage_report` target without
|
||||
rebuilding the `clio_tests` binary (since it is simply a dependency of the coverage report target).
|
||||
|
||||
The default coverage report format is `html-details`, but the developers
|
||||
can override it to any of the formats listed in `CMake/CodeCoverage.cmake`
|
||||
by setting `CODE_COVERAGE_REPORT_FORMAT` variable in `cmake`. For example, CI
|
||||
is setting this parameter to `xml` for the [codecov](codecov.io) integration.
|
||||
|
||||
In case if some unit tests predictably fail e.g. due to absence of a Cassandra database, it is possible
|
||||
to set unit tests options in `CODE_COVERAGE_TESTS_ARGS` cmake variable, as demonstrated below:
|
||||
|
||||
```
|
||||
cd .build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug -o tests=True -o coverage=True
|
||||
cmake -DCODE_COVERAGE_REPORT_FORMAT=json-details -DCMAKE_BUILD_TYPE=Debug -DCODE_COVERAGE_TESTS_ARGS="--gtest_filter=-BackendCassandra*" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
cmake --build . --target coverage_report
|
||||
```
|
||||
|
||||
After `coverage_report` target is completed, the generated coverage report will be
|
||||
stored inside the build directory, as either of:
|
||||
|
||||
- file named `coverage_report.*`, with a suitable extension for the report format, or
|
||||
- directory named `coverage_report`, with `index.html` and other files inside, for `html-details` or `html-nested` report formats.
|
||||
|
||||
|
||||
## Developing against `rippled` in standalone mode
|
||||
|
||||
If you wish you develop against a `rippled` instance running in standalone
|
||||
|
||||
17
conanfile.py
17
conanfile.py
@@ -1,6 +1,6 @@
|
||||
from conan import ConanFile
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
import re
|
||||
|
||||
|
||||
class Clio(ConanFile):
|
||||
name = 'clio'
|
||||
@@ -11,20 +11,23 @@ class Clio(ConanFile):
|
||||
settings = 'os', 'compiler', 'build_type', 'arch'
|
||||
options = {
|
||||
'fPIC': [True, False],
|
||||
'verbose': [True, False],
|
||||
'verbose': [True, False],
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'docs': [True, False], # doxygen API docs; create custom target 'docs'
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'cassandra-cpp-driver/2.16.2',
|
||||
'fmt/10.0.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/1.12.0',
|
||||
'xrpl/2.2.0-rc3',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
|
||||
default_options = {
|
||||
@@ -33,6 +36,7 @@ class Clio(ConanFile):
|
||||
'tests': False,
|
||||
'packaging': False,
|
||||
'coverage': False,
|
||||
'lint': False,
|
||||
'docs': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
@@ -55,7 +59,7 @@ class Clio(ConanFile):
|
||||
|
||||
def requirements(self):
|
||||
if self.options.tests:
|
||||
self.requires('gtest/1.13.0')
|
||||
self.requires('gtest/1.14.0')
|
||||
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
@@ -73,6 +77,7 @@ class Clio(ConanFile):
|
||||
tc.variables['verbose'] = self.options.verbose
|
||||
tc.variables['tests'] = self.options.tests
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['lint'] = self.options.lint
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.generate()
|
||||
|
||||
67
docker/ci/dockerfile
Normal file
67
docker/ci/dockerfile
Normal file
@@ -0,0 +1,67 @@
|
||||
FROM ubuntu:focal
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
|
||||
USER root
|
||||
WORKDIR /root/
|
||||
|
||||
ENV GCC_VERSION=11 \
|
||||
CCACHE_VERSION=4.8.3 \
|
||||
LLVM_TOOLS_VERSION=17 \
|
||||
GH_VERSION=2.40.0
|
||||
|
||||
# Add repositories
|
||||
RUN apt-get -qq update \
|
||||
&& apt-get -qq install -y --no-install-recommends --no-install-suggests gnupg wget curl software-properties-common \
|
||||
&& add-apt-repository -y ppa:ubuntu-toolchain-r/test \
|
||||
&& wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | apt-key add - \
|
||||
&& apt-add-repository 'deb https://apt.kitware.com/ubuntu/ focal main' \
|
||||
&& echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${LLVM_TOOLS_VERSION} main" >> /etc/apt/sources.list \
|
||||
&& wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||
|
||||
# Install packages
|
||||
RUN apt update -qq \
|
||||
&& apt install -y --no-install-recommends --no-install-suggests cmake python3 python3-pip sudo git \
|
||||
ninja-build make pkg-config libzstd-dev libzstd1 g++-${GCC_VERSION} jq \
|
||||
clang-format-${LLVM_TOOLS_VERSION} clang-tidy-${LLVM_TOOLS_VERSION} clang-tools-${LLVM_TOOLS_VERSION} \
|
||||
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_VERSION} 100 \
|
||||
&& apt-get clean && apt remove -y software-properties-common \
|
||||
&& pip3 install -q --upgrade --no-cache-dir pip \
|
||||
&& pip3 install -q --no-cache-dir conan==1.62 gcovr
|
||||
|
||||
# Install ccache from source
|
||||
WORKDIR /tmp
|
||||
RUN wget "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
|
||||
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
|
||||
&& cd "ccache-${CCACHE_VERSION}" \
|
||||
&& mkdir build && cd build \
|
||||
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
|
||||
&& ninja && cp ./ccache /usr/bin/ccache
|
||||
|
||||
# Install gh
|
||||
RUN wget https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
||||
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
||||
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/bin/gh
|
||||
|
||||
# Clean up
|
||||
RUN rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
WORKDIR /root/
|
||||
# Using root by default is not very secure but github checkout action doesn't work with any other user
|
||||
# https://github.com/actions/checkout/issues/956
|
||||
# And Github Actions doc recommends using root
|
||||
# https://docs.github.com/en/actions/creating-actions/dockerfile-support-for-github-actions#user
|
||||
|
||||
# Setup conan
|
||||
RUN conan profile new default --detect \
|
||||
&& conan profile update settings.compiler.cppstd=20 default \
|
||||
&& conan profile update settings.compiler.libcxx=libstdc++11 default \
|
||||
&& conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
//
|
||||
// Advanced options. USE AT OWN RISK:
|
||||
// ---
|
||||
"core_connections_per_host": 1 // Defaults to 1
|
||||
"core_connections_per_host": 1, // Defaults to 1
|
||||
"write_batch_size": 20 // Defaults to 20
|
||||
//
|
||||
// Below options will use defaults from cassandra driver if left unspecified.
|
||||
// See https://docs.datastax.com/en/developer/cpp-driver/2.17/api/struct.CassCluster/ for details.
|
||||
@@ -26,6 +27,7 @@
|
||||
// ---
|
||||
}
|
||||
},
|
||||
"allow_no_etl": false, // Allow Clio to run without valid ETL source, otherwise Clio will stop if ETL check fails
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
@@ -64,7 +66,13 @@
|
||||
"port": 51233,
|
||||
// Max number of requests to queue up before rejecting further requests.
|
||||
// Defaults to 0, which disables the limit.
|
||||
"max_queue_size": 500
|
||||
"max_queue_size": 500,
|
||||
// If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash
|
||||
// If matches, the request will be considered as admin request
|
||||
"admin_password": "xrp",
|
||||
// If local_admin is true, Clio will consider requests come from 127.0.0.1 as admin requests
|
||||
// It's true by default unless admin_password is set,'local_admin' : true and 'admin_password' can not be set at the same time
|
||||
"local_amdin": false
|
||||
},
|
||||
// Overrides log level on a per logging channel.
|
||||
// Defaults to global "log_level" for each unspecified channel.
|
||||
@@ -94,6 +102,10 @@
|
||||
"log_level": "trace"
|
||||
}
|
||||
],
|
||||
"prometheus": {
|
||||
"enabled": true,
|
||||
"compress_reply": true
|
||||
},
|
||||
"log_level": "info",
|
||||
// Log format (this is the default format)
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%",
|
||||
|
||||
25
examples/infrastructure/README.md
Normal file
25
examples/infrastructure/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Example of clio monitoring infrastructure
|
||||
|
||||
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
|
||||
|
||||
The structure of the directory:
|
||||
- `compose.yaml`
|
||||
Docker-compose file with Prometheus and Grafana set up.
|
||||
- `prometheus.yaml`
|
||||
Defines metrics collection from Clio and Prometheus itself.
|
||||
Demonstrates how to setup Clio target and Clio's admin authorisation in Prometheus.
|
||||
- `grafana/clio_dashboard.json`
|
||||
Json file containing preconfigured dashboard in Grafana format.
|
||||
- `grafana/dashboard_local.yaml`
|
||||
Grafana configuration file defining the directory to search for dashboards json files.
|
||||
- `grafana/datasources.yaml`
|
||||
Grafana configuration file defining Prometheus as a data source for Grafana.
|
||||
|
||||
## How to try
|
||||
|
||||
1. Make sure you have `docker` and `docker-compose` installed.
|
||||
2. Run `docker-compose up -d` from this directory. It will start docker containers with Prometheus and Grafana.
|
||||
3. Open [http://localhost:3000/dashboards](http://localhost:3000/dashboards). Grafana login `admin`, password `grafana`.
|
||||
There will be preconfigured Clio dashboard.
|
||||
|
||||
If Clio is not running yet launch Clio to see metrics. Some of the metrics may appear only after requests to Clio.
|
||||
20
examples/infrastructure/compose.yaml
Normal file
20
examples/infrastructure/compose.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
grafana:
|
||||
image: grafana/grafana
|
||||
ports:
|
||||
- 3000:3000
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=grafana
|
||||
volumes:
|
||||
- ./grafana/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
- ./grafana/dashboard_local.yaml:/etc/grafana/provisioning/dashboards/local.yaml
|
||||
- ./grafana/clio_dashboard.json:/var/lib/grafana/dashboards/clio_dashboard.json
|
||||
1240
examples/infrastructure/grafana/clio_dashboard.json
Normal file
1240
examples/infrastructure/grafana/clio_dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
23
examples/infrastructure/grafana/dashboard_local.yaml
Normal file
23
examples/infrastructure/grafana/dashboard_local.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'Clio dashboard'
|
||||
# <int> Org id. Default to 1
|
||||
orgId: 1
|
||||
# <string> name of the dashboard folder.
|
||||
folder: ''
|
||||
# <string> folder UID. will be automatically generated if not specified
|
||||
folderUid: ''
|
||||
# <string> provider type. Default to 'file'
|
||||
type: file
|
||||
# <bool> disable dashboard deletion
|
||||
disableDeletion: false
|
||||
# <int> how often Grafana will scan for changed dashboards
|
||||
updateIntervalSeconds: 10
|
||||
# <bool> allow updating provisioned dashboards from the UI
|
||||
allowUiUpdates: false
|
||||
options:
|
||||
# <string, required> path to dashboard files on disk. Required when using the 'file' type
|
||||
path: /var/lib/grafana/dashboards
|
||||
# <bool> use folder names from filesystem to create folders in Grafana
|
||||
foldersFromFilesStructure: true
|
||||
8
examples/infrastructure/grafana/datasources.yaml
Normal file
8
examples/infrastructure/grafana/datasources.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
access: proxy
|
||||
19
examples/infrastructure/prometheus.yaml
Normal file
19
examples/infrastructure/prometheus.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
scrape_configs:
|
||||
- job_name: clio
|
||||
scrape_interval: 5s
|
||||
scrape_timeout: 5s
|
||||
authorization:
|
||||
type: Password
|
||||
# sha256sum from password `xrp`
|
||||
# use echo -n 'your_password' | shasum -a 256 to get hash
|
||||
credentials: 0e1dcf1ff020cceabf8f4a60a32e814b5b46ee0bb8cd4af5c814e4071bd86a18
|
||||
static_configs:
|
||||
- targets:
|
||||
- host.docker.internal:51233
|
||||
- job_name: prometheus
|
||||
honor_timestamps: true
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
87
external/cassandra/conanfile.py
vendored
87
external/cassandra/conanfile.py
vendored
@@ -1,87 +0,0 @@
|
||||
from conan import ConanFile, tools
|
||||
from conan.tools.cmake import CMake, CMakeToolchain
|
||||
|
||||
class Cassandra(ConanFile):
|
||||
name = 'cassandra-cpp-driver'
|
||||
version = '2.16.2'
|
||||
license = 'Apache-2.0'
|
||||
url = 'https://github.com/conan-io/conan-center-index'
|
||||
homepage = 'https://docs.datastax.com/en/developer/cpp-driver/'
|
||||
description = 'Cassandra C++ Driver'
|
||||
topics = ('conan', 'cassandra', 'driver')
|
||||
|
||||
settings = 'os', 'arch', 'compiler', 'build_type'
|
||||
options = {
|
||||
'shared': [True, False],
|
||||
'fPIC': [True, False],
|
||||
'install_header_in_subdir': [True, False],
|
||||
'use_atomic': [None, 'boost', 'std'],
|
||||
'with_openssl': [True, False],
|
||||
'with_zlib': [True, False],
|
||||
'with_kerberos': [True, False],
|
||||
'use_timerfd': [True, False],
|
||||
}
|
||||
default_options = {
|
||||
'shared': False,
|
||||
'fPIC': True,
|
||||
'install_header_in_subdir': False,
|
||||
'use_atomic': None,
|
||||
'with_openssl': True,
|
||||
'with_zlib': True,
|
||||
'with_kerberos': False,
|
||||
'use_timerfd': True,
|
||||
}
|
||||
|
||||
def requirements(self):
|
||||
self.requires('libuv/1.44.1')
|
||||
self.requires('http_parser/2.9.4')
|
||||
if self.options.with_openssl:
|
||||
self.requires('openssl/1.1.1q')
|
||||
if self.options.with_zlib:
|
||||
self.requires('minizip/1.2.12')
|
||||
self.requires('zlib/1.2.13')
|
||||
if self.options.use_atomic == 'boost':
|
||||
self.requires('boost/1.79.0')
|
||||
|
||||
exports_sources = ['CMakeLists.txt']
|
||||
|
||||
def config_options(self):
|
||||
if self.settings.os == 'Windows':
|
||||
del self.options.fPIC
|
||||
|
||||
def configure(self):
|
||||
self.options['libuv'].shared = self.options.shared
|
||||
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
if self.settings.get_safe('compiler.cppstd') == '20':
|
||||
tc.blocks['cppstd'].values['cppstd'] = 17
|
||||
tc.variables['CASS_BUILD_STATIC'] = not self.options.shared
|
||||
tc.variables['CASS_USE_STATIC_LIBS'] = not self.options.shared
|
||||
tc.variables['CASS_BUILD_SHARED'] = self.options.shared
|
||||
tc.variables['LIBUV_ROOT_DIR'] = self.deps_cpp_info['libuv'].rootpath
|
||||
if self.options.with_openssl:
|
||||
tc.variables['OPENSSL_ROOT_DIR'] = self.deps_cpp_info['openssl'].rootpath
|
||||
tc.generate()
|
||||
|
||||
def source(self):
|
||||
tools.files.get(self, f'https://github.com/datastax/cpp-driver/archive/refs/tags/{self.version}.tar.gz', strip_root=True)
|
||||
|
||||
def build(self):
|
||||
cmake = CMake(self)
|
||||
cmake.configure()
|
||||
cmake.build()
|
||||
|
||||
def package(self):
|
||||
cmake = CMake(self)
|
||||
cmake.install()
|
||||
|
||||
def package_info(self):
|
||||
if self.options.shared:
|
||||
self.cpp_info.libs = ['cassandra']
|
||||
else:
|
||||
self.cpp_info.libs = ['cassandra_static']
|
||||
if self.settings.os == 'Windows':
|
||||
self.cpp_info.libs.extend(['iphlpapi', 'psapi', 'wsock32', 'crypt32', 'ws2_32', 'userenv'])
|
||||
if not self.options.shared:
|
||||
self.cpp_info.defines = ['CASS_STATIC']
|
||||
183
metrics.py
183
metrics.py
@@ -1,183 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
import argparse
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
def getTime(line):
|
||||
bracketOpen = line.find("[")
|
||||
bracketClose = line.find("]")
|
||||
timestampSub = line[bracketOpen+1:bracketClose]
|
||||
timestamp = datetime.strptime(timestampSub, '%Y-%m-%d %H:%M:%S.%f')
|
||||
return timestamp.timestamp()
|
||||
|
||||
def parseAccountTx(filename):
|
||||
|
||||
|
||||
with open(filename) as f:
|
||||
totalProcTime = 0.0
|
||||
totalTxnTime = 0.0
|
||||
numCalls = 0
|
||||
for line in f:
|
||||
if "executed stored_procedure" in line:
|
||||
idx = line.find("in ")
|
||||
idx = idx + 3
|
||||
idx2 = line.find("num")
|
||||
procTime = float(line[idx:idx2])
|
||||
totalProcTime += procTime
|
||||
if "fetchTransactions fetched" in line:
|
||||
idx = line.find("took ")
|
||||
idx = idx + 5
|
||||
txnTime = float(line[idx:])
|
||||
totalTxnTime += txnTime
|
||||
numCalls = numCalls + 1
|
||||
print(totalProcTime)
|
||||
print(totalProcTime/numCalls)
|
||||
print(totalTxnTime)
|
||||
print(totalTxnTime/numCalls)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def parseLogs(filename, interval, minTxnCount = 0):
|
||||
|
||||
with open(filename) as f:
|
||||
|
||||
totalTime = 0
|
||||
totalTxns = 0
|
||||
totalObjs = 0
|
||||
totalLoadTime = 0
|
||||
|
||||
|
||||
start = 0
|
||||
end = 0
|
||||
totalLedgers = 0
|
||||
|
||||
intervalTime = 0
|
||||
intervalTxns = 0
|
||||
intervalObjs = 0
|
||||
intervalLoadTime = 0
|
||||
|
||||
intervalStart = 0
|
||||
intervalEnd = 0
|
||||
intervalLedgers = 0
|
||||
ledgersPerSecond = 0
|
||||
|
||||
print("ledgers, transactions, objects, loadTime, loadTime/ledger, ledgers/sec, txns/sec, objs/sec")
|
||||
for line in f:
|
||||
if "Load phase" in line:
|
||||
sequenceIdx = line.find("Sequence : ")
|
||||
hashIdx = line.find(" Hash :")
|
||||
sequence = line[sequenceIdx + len("Sequence : "):hashIdx]
|
||||
txnCountSubstr = "txn count = "
|
||||
objCountSubstr = ". object count = "
|
||||
loadTimeSubstr = ". load time = "
|
||||
txnsSubstr = ". load txns per second = "
|
||||
objsSubstr = ". load objs per second = "
|
||||
txnCountIdx = line.find(txnCountSubstr)
|
||||
objCountIdx = line.find(objCountSubstr)
|
||||
loadTimeIdx = line.find(loadTimeSubstr)
|
||||
txnsIdx = line.find(txnsSubstr)
|
||||
objsIdx = line.find(objsSubstr)
|
||||
txnCount = line[txnCountIdx + len(txnCountSubstr):objCountIdx]
|
||||
objCount = line[objCountIdx + len(objCountSubstr):loadTimeIdx]
|
||||
loadTime = line[loadTimeIdx + len(loadTimeSubstr):txnsIdx]
|
||||
txnsPerSecond = line[txnsIdx + len(txnsSubstr):objsIdx]
|
||||
objsPerSecond = line[objsIdx + len(objsSubstr):-1]
|
||||
if int(txnCount) >= minTxnCount:
|
||||
totalTime += float(loadTime);
|
||||
totalTxns += float(txnCount)
|
||||
totalObjs += float(objCount)
|
||||
intervalTime += float(loadTime)
|
||||
intervalTxns += float(txnCount)
|
||||
intervalObjs += float(objCount)
|
||||
|
||||
totalLoadTime += float(loadTime)
|
||||
intervalLoadTime += float(loadTime)
|
||||
|
||||
|
||||
if start == 0:
|
||||
start = getTime(line)
|
||||
|
||||
|
||||
prevEnd = end
|
||||
end = getTime(line)
|
||||
|
||||
if intervalStart == 0:
|
||||
intervalStart = getTime(line)
|
||||
|
||||
intervalEnd = getTime(line)
|
||||
|
||||
totalLedgers+=1
|
||||
intervalLedgers+=1
|
||||
ledgersPerSecond = 0
|
||||
if end != start:
|
||||
ledgersPerSecond = float(totalLedgers) / float((end - start))
|
||||
intervalLedgersPerSecond = 0
|
||||
if intervalEnd != intervalStart:
|
||||
intervalLedgersPerSecond = float(intervalLedgers) / float((intervalEnd - intervalStart))
|
||||
|
||||
|
||||
|
||||
if int(sequence) % interval == 0:
|
||||
|
||||
# print("Sequence = " + sequence + " : [time, txCount, objCount, txPerSec, objsPerSec]")
|
||||
# print(loadTime + " , "
|
||||
# + txnCount + " , "
|
||||
# + objCount + " , "
|
||||
# + txnsPerSecond + " , "
|
||||
# + objsPerSecond)
|
||||
# print("Interval Aggregate ( " + str(interval) + " ) [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]: ")
|
||||
print(str(intervalLedgers) + " , "
|
||||
+ str(intervalTxns) + " , "
|
||||
+ str(intervalObjs) + " , "
|
||||
+ str(intervalLoadTime) + " , "
|
||||
+ str(intervalLoadTime/intervalLedgers) + " , "
|
||||
+ str(intervalLedgers/intervalLoadTime) + " , "
|
||||
+ str(intervalTxns/intervalLoadTime) + " , "
|
||||
+ str(intervalObjs/intervalLoadTime))
|
||||
# print("Total Aggregate: [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]")
|
||||
# print(str(totalLedgers) + " , "
|
||||
# + str(totalTxns) + " , "
|
||||
# + str(totalObjs) + " , "
|
||||
# + str(end-start) + " , "
|
||||
# + str(ledgersPerSecond) + " , "
|
||||
# + str(totalLoadTime/totalLedgers) + " , "
|
||||
# + str(totalTxns/totalTime) + " , "
|
||||
# + str(totalObjs/totalTime))
|
||||
if int(sequence) % interval == 0:
|
||||
intervalTime = 0
|
||||
intervalTxns = 0
|
||||
intervalObjs = 0
|
||||
intervalStart = 0
|
||||
intervalEnd = 0
|
||||
intervalLedgers = 0
|
||||
intervalLoadTime = 0
|
||||
print("Total Aggregate: [ledgers, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]")
|
||||
print(totalLedgers)
|
||||
print(totalLoadTime)
|
||||
print(str(totalLedgers) + " : "
|
||||
+ str(end-start) + " : "
|
||||
+ str(ledgersPerSecond) + " : "
|
||||
+ str(totalLoadTime/totalLedgers) + " : "
|
||||
+ str(totalTxns/totalTime) + " : "
|
||||
+ str(totalObjs/totalTime))
|
||||
|
||||
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='parses logs')
|
||||
parser.add_argument("--filename")
|
||||
parser.add_argument("--interval",default=100000)
|
||||
parser.add_argument("--minTxnCount",default=0)
|
||||
parser.add_argument("--account_tx",default=False)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
def run(args):
|
||||
if args.account_tx:
|
||||
parseAccountTx(args.filename)
|
||||
else:
|
||||
parseLogs(args.filename, int(args.interval))
|
||||
|
||||
run(args)
|
||||
240
src/data/BackendCounters.cpp
Normal file
240
src/data/BackendCounters.cpp
Normal file
@@ -0,0 +1,240 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/BackendCounters.hpp"
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
namespace {
|
||||
|
||||
std::vector<std::int64_t> const histogramBuckets{1, 2, 5, 10, 20, 50, 100, 200, 500, 700, 1000};
|
||||
|
||||
std::int64_t
|
||||
durationInMillisecondsSince(std::chrono::steady_clock::time_point const startTime)
|
||||
{
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - startTime).count();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
using namespace util::prometheus;
|
||||
|
||||
BackendCounters::BackendCounters()
|
||||
: tooBusyCounter_(PrometheusService::counterInt(
|
||||
"backend_too_busy_total_number",
|
||||
Labels(),
|
||||
"The total number of times the backend was too busy to process a request"
|
||||
))
|
||||
, writeSyncCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync"}}),
|
||||
"The total number of times the backend had to write synchronously"
|
||||
))
|
||||
, writeSyncRetryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync_retry"}}),
|
||||
"The total number of times the backend had to retry a synchronous write"
|
||||
))
|
||||
, asyncWriteCounters_{"write_async"}
|
||||
, asyncReadCounters_{"read_async"}
|
||||
, readDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "read"}}),
|
||||
histogramBuckets,
|
||||
"The duration of backend read operations including retries"
|
||||
))
|
||||
, writeDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "write"}}),
|
||||
histogramBuckets,
|
||||
"The duration of backend write operations including retries"
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
BackendCounters::PtrType
|
||||
BackendCounters::make()
|
||||
{
|
||||
struct EnableMakeShared : public BackendCounters {};
|
||||
return std::make_shared<EnableMakeShared>();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerTooBusy()
|
||||
{
|
||||
++tooBusyCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSync(std::chrono::steady_clock::time_point const startTime)
|
||||
{
|
||||
++writeSyncCounter_.get();
|
||||
writeDurationHistogram_.get().observe(durationInMillisecondsSince(startTime));
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSyncRetry()
|
||||
{
|
||||
++writeSyncRetryCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteStarted()
|
||||
{
|
||||
asyncWriteCounters_.registerStarted(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteFinished(std::chrono::steady_clock::time_point const startTime)
|
||||
{
|
||||
asyncWriteCounters_.registerFinished(1u);
|
||||
auto const duration = durationInMillisecondsSince(startTime);
|
||||
writeDurationHistogram_.get().observe(duration);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteRetry()
|
||||
{
|
||||
asyncWriteCounters_.registerRetry(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadStarted(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerStarted(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadFinished(std::chrono::steady_clock::time_point const startTime, std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerFinished(count);
|
||||
auto const duration = durationInMillisecondsSince(startTime);
|
||||
for (std::uint64_t i = 0; i < count; ++i)
|
||||
readDurationHistogram_.get().observe(duration);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadRetry(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerRetry(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadError(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerError(count);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::report() const
|
||||
{
|
||||
boost::json::object result;
|
||||
result["too_busy"] = tooBusyCounter_.get().value();
|
||||
result["write_sync"] = writeSyncCounter_.get().value();
|
||||
result["write_sync_retry"] = writeSyncRetryCounter_.get().value();
|
||||
for (auto const& [key, value] : asyncWriteCounters_.report())
|
||||
result[key] = value;
|
||||
for (auto const& [key, value] : asyncReadCounters_.report())
|
||||
result[key] = value;
|
||||
return result;
|
||||
}
|
||||
|
||||
BackendCounters::AsyncOperationCounters::AsyncOperationCounters(std::string name)
|
||||
: name_(std::move(name))
|
||||
, pendingCounter_(PrometheusService::gaugeInt(
|
||||
"backend_operations_current_number",
|
||||
Labels({{"operation", name_}, {"status", "pending"}}),
|
||||
"The current number of pending " + name_ + " operations"
|
||||
))
|
||||
, completedCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "completed"}}),
|
||||
"The total number of completed " + name_ + " operations"
|
||||
))
|
||||
, retryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "retry"}}),
|
||||
"The total number of retried " + name_ + " operations"
|
||||
))
|
||||
, errorCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "error"}}),
|
||||
"The total number of errored " + name_ + " operations"
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerStarted(std::uint64_t const count)
|
||||
{
|
||||
pendingCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerFinished(std::uint64_t const count)
|
||||
{
|
||||
ASSERT(
|
||||
pendingCounter_.get().value() >= static_cast<std::int64_t>(count),
|
||||
"Finished operations can't be more than pending"
|
||||
);
|
||||
pendingCounter_.get() -= count;
|
||||
completedCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerRetry(std::uint64_t count)
|
||||
{
|
||||
retryCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerError(std::uint64_t count)
|
||||
{
|
||||
ASSERT(
|
||||
pendingCounter_.get().value() >= static_cast<std::int64_t>(count), "Error operations can't be more than pending"
|
||||
);
|
||||
pendingCounter_.get() -= count;
|
||||
errorCounter_.get() += count;
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::AsyncOperationCounters::report() const
|
||||
{
|
||||
return boost::json::object{
|
||||
{name_ + "_pending", pendingCounter_.get().value()},
|
||||
{name_ + "_completed", completedCounter_.get().value()},
|
||||
{name_ + "_retry", retryCounter_.get().value()},
|
||||
{name_ + "_error", errorCounter_.get().value()}
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
164
src/data/BackendCounters.hpp
Normal file
164
src/data/BackendCounters.hpp
Normal file
@@ -0,0 +1,164 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/prometheus/Counter.hpp"
|
||||
#include "util/prometheus/Gauge.hpp"
|
||||
#include "util/prometheus/Histogram.hpp"
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A concept for a class that can be used to count backend operations.
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeBackendCounters = requires(T a) {
|
||||
typename T::PtrType;
|
||||
{
|
||||
a.registerTooBusy()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteSync(std::chrono::steady_clock::time_point{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteSyncRetry()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteStarted()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteFinished(std::chrono::steady_clock::time_point{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteRetry()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadStarted(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadFinished(std::chrono::steady_clock::time_point{}, std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadRetry(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadError(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.report()
|
||||
} -> std::same_as<boost::json::object>;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Holds statistics about the backend.
|
||||
*
|
||||
* @note This class is thread-safe.
|
||||
*/
|
||||
class BackendCounters {
|
||||
public:
|
||||
using PtrType = std::shared_ptr<BackendCounters>;
|
||||
|
||||
static PtrType
|
||||
make();
|
||||
|
||||
void
|
||||
registerTooBusy();
|
||||
|
||||
void
|
||||
registerWriteSync(std::chrono::steady_clock::time_point startTime);
|
||||
|
||||
void
|
||||
registerWriteSyncRetry();
|
||||
|
||||
void
|
||||
registerWriteStarted();
|
||||
|
||||
void
|
||||
registerWriteFinished(std::chrono::steady_clock::time_point startTime);
|
||||
|
||||
void
|
||||
registerWriteRetry();
|
||||
|
||||
void
|
||||
registerReadStarted(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadFinished(std::chrono::steady_clock::time_point startTime, std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadRetry(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadError(std::uint64_t count = 1u);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
BackendCounters();
|
||||
|
||||
class AsyncOperationCounters {
|
||||
public:
|
||||
AsyncOperationCounters(std::string name);
|
||||
|
||||
void
|
||||
registerStarted(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerFinished(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerRetry(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerError(std::uint64_t count);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
std::reference_wrapper<util::prometheus::GaugeInt> pendingCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> completedCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> retryCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> errorCounter_;
|
||||
};
|
||||
|
||||
std::reference_wrapper<util::prometheus::CounterInt> tooBusyCounter_;
|
||||
|
||||
std::reference_wrapper<util::prometheus::CounterInt> writeSyncCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> writeSyncRetryCounter_;
|
||||
|
||||
AsyncOperationCounters asyncWriteCounters_{"write_async"};
|
||||
AsyncOperationCounters asyncReadCounters_{"read_async"};
|
||||
|
||||
std::reference_wrapper<util::prometheus::HistogramInt> readDurationHistogram_;
|
||||
std::reference_wrapper<util::prometheus::HistogramInt> writeDurationHistogram_;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -19,12 +19,18 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/CassandraBackend.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/CassandraBackend.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
namespace data {
|
||||
|
||||
@@ -34,10 +40,10 @@ namespace data {
|
||||
* @param config The clio config to use
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
std::shared_ptr<BackendInterface>
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
make_Backend(util::Config const& config)
|
||||
{
|
||||
static util::Logger log{"Backend"};
|
||||
static util::Logger const log{"Backend"};
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
auto const readOnly = config.valueOr("read_only", false);
|
||||
@@ -46,8 +52,7 @@ make_Backend(util::Config const& config)
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
// TODO: retire `cassandra-new` by next release after 2.0
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new"))
|
||||
{
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new")) {
|
||||
auto cfg = config.section("database." + type);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
}
|
||||
@@ -57,10 +62,7 @@ make_Backend(util::Config const& config)
|
||||
|
||||
auto const rng = backend->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
{
|
||||
backend->updateRange(rng->minSequence);
|
||||
backend->updateRange(rng->maxSequence);
|
||||
}
|
||||
backend->setRange(rng->minSequence, rng->maxSequence);
|
||||
|
||||
LOG(log.info()) << "Constructed BackendInterface Successfully";
|
||||
return backend;
|
||||
@@ -17,11 +17,31 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/Serializer.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
@@ -34,8 +54,7 @@ BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
{
|
||||
LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence;
|
||||
auto commitRes = doFinishWrites();
|
||||
if (commitRes)
|
||||
{
|
||||
if (commitRes) {
|
||||
LOG(gLog.debug()) << "Successfully commited. Updating range now to " << ledgerSequence;
|
||||
updateRange(ledgerSequence);
|
||||
}
|
||||
@@ -44,7 +63,7 @@ BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
void
|
||||
BackendInterface::writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob)
|
||||
{
|
||||
assert(key.size() == sizeof(ripple::uint256));
|
||||
ASSERT(key.size() == sizeof(ripple::uint256), "Key must be 256 bits");
|
||||
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
@@ -59,52 +78,49 @@ std::optional<Blob>
|
||||
BackendInterface::fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj)
|
||||
{
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj)
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
else
|
||||
LOG(gLog.trace()) << "Missed cache but found in db";
|
||||
return dbObj;
|
||||
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj) {
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Missed cache but found in db";
|
||||
}
|
||||
return dbObj;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
BackendInterface::fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
std::vector<ripple::uint256> misses;
|
||||
for (size_t i = 0; i < keys.size(); ++i)
|
||||
{
|
||||
for (size_t i = 0; i < keys.size(); ++i) {
|
||||
auto obj = cache_.get(keys[i], sequence);
|
||||
if (obj)
|
||||
if (obj) {
|
||||
results[i] = *obj;
|
||||
else
|
||||
} else {
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
}
|
||||
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
|
||||
if (misses.size())
|
||||
{
|
||||
if (!misses.empty()) {
|
||||
auto objs = doFetchLedgerObjects(misses, sequence, yield);
|
||||
for (size_t i = 0, j = 0; i < results.size(); ++i)
|
||||
{
|
||||
if (results[i].size() == 0)
|
||||
{
|
||||
for (size_t i = 0, j = 0; i < results.size(); ++i) {
|
||||
if (results[i].empty()) {
|
||||
results[i] = objs[j];
|
||||
++j;
|
||||
}
|
||||
@@ -113,18 +129,21 @@ BackendInterface::fetchLedgerObjects(
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// Fetches the successor to key/index
|
||||
std::optional<ripple::uint256>
|
||||
BackendInterface::fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
if (succ)
|
||||
if (succ) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
else
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
}
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
|
||||
@@ -132,11 +151,11 @@ std::optional<LedgerObject>
|
||||
BackendInterface::fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
|
||||
if (succ)
|
||||
{
|
||||
if (succ) {
|
||||
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
|
||||
if (!obj)
|
||||
return {{*succ, {}}};
|
||||
@@ -151,12 +170,13 @@ BackendInterface::fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
// TODO try to speed this up. This can take a few seconds. The goal is
|
||||
// to get it down to a few hundred milliseconds.
|
||||
BookOffersPage page;
|
||||
const ripple::uint256 bookEnd = ripple::getQualityNext(book);
|
||||
ripple::uint256 const bookEnd = ripple::getQualityNext(book);
|
||||
ripple::uint256 uTipIndex = book;
|
||||
std::vector<ripple::uint256> keys;
|
||||
auto getMillis = [](auto diff) { return std::chrono::duration_cast<std::chrono::milliseconds>(diff).count(); };
|
||||
@@ -165,34 +185,32 @@ BackendInterface::fetchBookOffers(
|
||||
std::uint32_t numPages = 0;
|
||||
long succMillis = 0;
|
||||
long pageMillis = 0;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
while (keys.size() < limit) {
|
||||
auto mid1 = std::chrono::system_clock::now();
|
||||
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
|
||||
auto mid2 = std::chrono::system_clock::now();
|
||||
numSucc++;
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd)
|
||||
{
|
||||
if (!offerDir || offerDir->key >= bookEnd) {
|
||||
LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
while (keys.size() < limit) {
|
||||
++numPages;
|
||||
ripple::STLedgerEntry sle{ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
|
||||
ripple::STLedgerEntry const sle{
|
||||
ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key
|
||||
};
|
||||
auto indexes = sle.getFieldV256(ripple::sfIndexes);
|
||||
keys.insert(keys.end(), indexes.begin(), indexes.end());
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (!next)
|
||||
{
|
||||
if (next == 0u) {
|
||||
LOG(gLog.trace()) << "Next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||
auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence, yield);
|
||||
assert(nextDir);
|
||||
ASSERT(nextDir.has_value(), "Next dir must exist");
|
||||
offerDir->blob = *nextDir;
|
||||
offerDir->key = nextKey.key;
|
||||
}
|
||||
@@ -201,11 +219,10 @@ BackendInterface::fetchBookOffers(
|
||||
}
|
||||
auto mid = std::chrono::system_clock::now();
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i) {
|
||||
LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
assert(objs[i].size());
|
||||
ASSERT(!objs[i].empty(), "Ledger object can't be empty");
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
@@ -231,19 +248,40 @@ BackendInterface::hardFetchLedgerRange() const
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock lck(rngMtx_);
|
||||
std::shared_lock const lck(rngMtx_);
|
||||
return range;
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock lck(rngMtx_);
|
||||
assert(!range || newMax >= range->maxSequence);
|
||||
if (!range)
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
|
||||
ASSERT(
|
||||
!range || newMax >= range->maxSequence,
|
||||
"Range shouldn't exist yet or newMax should be greater. newMax = {}, range->maxSequence = {}",
|
||||
newMax,
|
||||
range->maxSequence
|
||||
);
|
||||
|
||||
if (!range) {
|
||||
range = {newMax, newMax};
|
||||
else
|
||||
} else {
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::setRange(uint32_t min, uint32_t max, bool force)
|
||||
{
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
|
||||
if (!force) {
|
||||
ASSERT(min <= max, "Range min must be less than or equal to max");
|
||||
ASSERT(not range.has_value(), "Range was already set");
|
||||
}
|
||||
|
||||
range = {min, max};
|
||||
}
|
||||
|
||||
LedgerPage
|
||||
@@ -252,41 +290,39 @@ BackendInterface::fetchLedgerPage(
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
LedgerPage page;
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
bool reachedEnd = false;
|
||||
while (keys.size() < limit && !reachedEnd)
|
||||
{
|
||||
ripple::uint256 const& curCursor = keys.size() ? keys.back() : cursor ? *cursor : firstKey;
|
||||
while (keys.size() < limit && !reachedEnd) {
|
||||
ripple::uint256 const& curCursor = !keys.empty() ? keys.back() : (cursor ? *cursor : firstKey);
|
||||
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
if (!succ)
|
||||
if (!succ) {
|
||||
reachedEnd = true;
|
||||
else
|
||||
keys.push_back(std::move(*succ));
|
||||
} else {
|
||||
keys.push_back(*succ);
|
||||
}
|
||||
}
|
||||
|
||||
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < objects.size(); ++i)
|
||||
{
|
||||
if (objects[i].size())
|
||||
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
|
||||
else if (!outOfOrder)
|
||||
{
|
||||
for (size_t i = 0; i < objects.size(); ++i) {
|
||||
if (!objects[i].empty()) {
|
||||
page.objects.push_back({keys[i], std::move(objects[i])});
|
||||
} else if (!outOfOrder) {
|
||||
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
<< " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j)
|
||||
{
|
||||
for (size_t j = 0; j < objects.size(); ++j) {
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
LOG(gLog.error()) << msg.str();
|
||||
}
|
||||
}
|
||||
if (keys.size() && !reachedEnd)
|
||||
if (!keys.empty() && !reachedEnd)
|
||||
page.cursor = keys.back();
|
||||
|
||||
return page;
|
||||
@@ -300,23 +336,50 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
|
||||
auto key = ripple::keylet::fees().key;
|
||||
auto bytes = fetchLedgerObject(key, seq, yield);
|
||||
|
||||
if (!bytes)
|
||||
{
|
||||
if (!bytes) {
|
||||
LOG(gLog.error()) << "Could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
ripple::SerialIter it(bytes->data(), bytes->size());
|
||||
ripple::SLE sle{it, key};
|
||||
ripple::SLE const sle{it, key};
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
|
||||
fees.base = sle.getFieldU64(ripple::sfBaseFee);
|
||||
// XRPFees amendment introduced new fields for fees calculations.
|
||||
// New fields are set and the old fields are removed via `set_fees` tx.
|
||||
// Fallback to old fields if `set_fees` was not yet used to update the fields on this tx.
|
||||
auto hasNewFields = false;
|
||||
{
|
||||
auto const baseFeeXRP = sle.at(~ripple::sfBaseFeeDrops);
|
||||
auto const reserveBaseXRP = sle.at(~ripple::sfReserveBaseDrops);
|
||||
auto const reserveIncrementXRP = sle.at(~ripple::sfReserveIncrementDrops);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReserveBase) != -1)
|
||||
fees.reserve = sle.getFieldU32(ripple::sfReserveBase);
|
||||
if (baseFeeXRP)
|
||||
fees.base = baseFeeXRP->xrp();
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReserveIncrement) != -1)
|
||||
fees.increment = sle.getFieldU32(ripple::sfReserveIncrement);
|
||||
if (reserveBaseXRP)
|
||||
fees.reserve = reserveBaseXRP->xrp();
|
||||
|
||||
if (reserveIncrementXRP)
|
||||
fees.increment = reserveIncrementXRP->xrp();
|
||||
|
||||
hasNewFields = baseFeeXRP || reserveBaseXRP || reserveIncrementXRP;
|
||||
}
|
||||
|
||||
if (not hasNewFields) {
|
||||
// Fallback to old fields
|
||||
auto const baseFee = sle.at(~ripple::sfBaseFee);
|
||||
auto const reserveBase = sle.at(~ripple::sfReserveBase);
|
||||
auto const reserveIncrement = sle.at(~ripple::sfReserveIncrement);
|
||||
|
||||
if (baseFee)
|
||||
fees.base = baseFee.value();
|
||||
|
||||
if (reserveBase)
|
||||
fees.reserve = reserveBase.value();
|
||||
|
||||
if (reserveIncrement)
|
||||
fees.increment = reserveIncrement.value();
|
||||
}
|
||||
|
||||
return fees;
|
||||
}
|
||||
|
||||
@@ -19,35 +19,48 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/DBHelpers.h>
|
||||
#include <data/LedgerCache.h>
|
||||
#include <data/Types.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/utility/result_of.hpp>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Represents a database timeout error.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception
|
||||
{
|
||||
class DatabaseTimeout : public std::exception {
|
||||
public:
|
||||
const char*
|
||||
char const*
|
||||
what() const throw() override
|
||||
{
|
||||
return "Database read timed out. Please retry the request";
|
||||
}
|
||||
};
|
||||
|
||||
static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
/**
|
||||
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
|
||||
*
|
||||
@@ -58,18 +71,14 @@ public:
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
retryOnTimeout(FnType func, size_t waitMs = 500)
|
||||
retryOnTimeout(FnType func, size_t waitMs = DEFAULT_WAIT_BETWEEN_RETRY)
|
||||
{
|
||||
static util::Logger log{"Backend"};
|
||||
static util::Logger const log{"Backend"};
|
||||
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
while (true) {
|
||||
try {
|
||||
return func();
|
||||
}
|
||||
catch (DatabaseTimeout const&)
|
||||
{
|
||||
} catch (DatabaseTimeout const&) {
|
||||
LOG(log.error()) << "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
}
|
||||
@@ -90,17 +99,15 @@ synchronous(FnType&& func)
|
||||
boost::asio::io_context ctx;
|
||||
|
||||
using R = typename boost::result_of<FnType(boost::asio::yield_context)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value)
|
||||
{
|
||||
if constexpr (!std::is_same_v<R, void>) {
|
||||
R res;
|
||||
boost::asio::spawn(
|
||||
ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) { res = func(yield); });
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) {
|
||||
res = func(yield);
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
|
||||
ctx.run();
|
||||
}
|
||||
@@ -123,8 +130,7 @@ synchronousAndRetryOnTimeout(FnType&& func)
|
||||
/**
|
||||
* @brief The interface to the database used by Clio.
|
||||
*/
|
||||
class BackendInterface
|
||||
{
|
||||
class BackendInterface {
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
@@ -161,7 +167,7 @@ public:
|
||||
* @return The ripple::LedgerHeader if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const = 0;
|
||||
fetchLedgerBySequence(std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by hash.
|
||||
@@ -198,6 +204,16 @@ public:
|
||||
void
|
||||
updateRange(uint32_t newMax);
|
||||
|
||||
/**
|
||||
* @brief Sets the range of sequences that are stored in the DB.
|
||||
*
|
||||
* @param min The new minimum sequence available
|
||||
* @param max The new maximum sequence available
|
||||
* @param force If set to true, the range will be set even if it's already set
|
||||
*/
|
||||
void
|
||||
setRange(uint32_t min, uint32_t max, bool force = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch the fees from a specific ledger sequence.
|
||||
*
|
||||
@@ -206,7 +222,7 @@ public:
|
||||
* @return ripple::Fees if fees are found; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t const seq, boost::asio::yield_context yield) const;
|
||||
fetchFees(std::uint32_t seq, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
@@ -241,10 +257,11 @@ public:
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
@@ -254,7 +271,7 @@ public:
|
||||
* @return Results as a vector of TransactionAndMetadata
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
fetchAllTransactionsInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
@@ -264,7 +281,7 @@ public:
|
||||
* @return Hashes as ripple::uint256 in a vector
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific NFT.
|
||||
@@ -275,8 +292,7 @@ public:
|
||||
* @return NFT object on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
@@ -291,10 +307,33 @@ public:
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all NFTs issued by a given address.
|
||||
*
|
||||
* @param issuer AccountID of issuer you wish you query.
|
||||
* @param taxon Optional taxon of NFTs by which you wish to filter.
|
||||
* @param limit Paging limit.
|
||||
* @param cursorIn Optional cursor to allow us to pick up from where we
|
||||
* last left off.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<NFT> of NFTs issued by this account, or
|
||||
* this issuer/taxon combination if taxon is passed and an optional marker
|
||||
*/
|
||||
virtual NFTsAndCursor
|
||||
fetchNFTsByIssuer(
|
||||
ripple::AccountID const& issuer,
|
||||
std::optional<std::uint32_t> const& taxon,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger object.
|
||||
@@ -308,7 +347,7 @@ public:
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield) const;
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects by their keys.
|
||||
@@ -324,8 +363,9 @@ public:
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const;
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching a ledger object.
|
||||
@@ -336,8 +376,7 @@ public:
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching ledger objects.
|
||||
@@ -350,8 +389,9 @@ public:
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers.
|
||||
@@ -361,7 +401,7 @@ public:
|
||||
* @return A vector of LedgerObject representing the diff
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
fetchLedgerDiff(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
@@ -376,10 +416,11 @@ public:
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield) const;
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor object.
|
||||
@@ -390,8 +431,7 @@ public:
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const;
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor key.
|
||||
@@ -405,7 +445,7 @@ public:
|
||||
* @return The sucessor key on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const;
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Database-specific implementation of fetching the successor key
|
||||
@@ -416,8 +456,7 @@ public:
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
@@ -431,9 +470,10 @@ public:
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context yield) const;
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Synchronously fetches the ledger range from DB.
|
||||
@@ -478,7 +518,7 @@ public:
|
||||
* @param blob The data to write
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob);
|
||||
writeLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
@@ -492,10 +532,11 @@ public:
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::uint32_t seq,
|
||||
std::uint32_t date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) = 0;
|
||||
std::string&& metadata
|
||||
) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes NFTs to the database.
|
||||
@@ -503,7 +544,7 @@ public:
|
||||
* @param data A vector of NFTsData objects representing the NFTs
|
||||
*/
|
||||
virtual void
|
||||
writeNFTs(std::vector<NFTsData>&& data) = 0;
|
||||
writeNFTs(std::vector<NFTsData> const& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new set of account transactions.
|
||||
@@ -511,7 +552,7 @@ public:
|
||||
* @param data A vector of AccountTransactionsData objects representing the account transactions
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write NFTs transactions.
|
||||
@@ -519,7 +560,7 @@ public:
|
||||
* @param data A vector of NFTTransactionsData objects
|
||||
*/
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
@@ -529,7 +570,7 @@ public:
|
||||
* @param successor The successor data to write
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0;
|
||||
writeSuccessor(std::string&& key, std::uint32_t seq, std::string&& successor) = 0;
|
||||
|
||||
/**
|
||||
* @brief Starts a write transaction with the DB. No-op for cassandra.
|
||||
@@ -548,7 +589,7 @@ public:
|
||||
* @return true on success; false otherwise
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t const ledgerSequence);
|
||||
finishWrites(std::uint32_t ledgerSequence);
|
||||
|
||||
/**
|
||||
* @return true if database is overwhelmed; false otherwise
|
||||
@@ -556,9 +597,15 @@ public:
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
/**
|
||||
* @return json object containing backend usage statistics
|
||||
*/
|
||||
virtual boost::json::object
|
||||
stats() const = 0;
|
||||
|
||||
private:
|
||||
virtual void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0;
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
@@ -19,19 +19,42 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/cassandra/Concepts.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Schema.h>
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/impl/ExecutionStrategy.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Schema.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ExecutionStrategy.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <ripple/basics/Blob.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
#include <ripple/protocol/nft.h>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
@@ -44,8 +67,7 @@ namespace data::cassandra {
|
||||
* @tparam ExecutionStrategyType The execution strategy type to use
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType, SomeExecutionStrategy ExecutionStrategyType>
|
||||
class BasicCassandraBackend : public BackendInterface
|
||||
{
|
||||
class BasicCassandraBackend : public BackendInterface {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
SettingsProviderType settingsProvider_;
|
||||
@@ -73,12 +95,10 @@ public:
|
||||
if (auto const res = handle_.connect(); not res)
|
||||
throw std::runtime_error("Could not connect to Cassandra: " + res.error());
|
||||
|
||||
if (not readOnly)
|
||||
{
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res)
|
||||
{
|
||||
// on datastax, creation of keyspaces can be configured to only be done thru the admin interface.
|
||||
// this does not mean that the keyspace does not already exist tho.
|
||||
if (not readOnly) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
|
||||
// on datastax, creation of keyspaces can be configured to only be done thru the admin
|
||||
// interface. this does not mean that the keyspace does not already exist tho.
|
||||
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
|
||||
throw std::runtime_error("Could not create keyspace: " + res.error());
|
||||
}
|
||||
@@ -87,12 +107,9 @@ public:
|
||||
throw std::runtime_error("Could not create schema: " + res.error());
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
try {
|
||||
schema_.prepareStatements(handle_);
|
||||
}
|
||||
catch (std::runtime_error const& ex)
|
||||
{
|
||||
} catch (std::runtime_error const& ex) {
|
||||
LOG(log_.error()) << "Failed to prepare the statements: " << ex.what() << "; readOnly: " << readOnly;
|
||||
throw;
|
||||
}
|
||||
@@ -106,28 +123,26 @@ public:
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield) const override
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
|
||||
Statement statement = [this, forward, &account]() {
|
||||
Statement const statement = [this, forward, &account]() {
|
||||
if (forward)
|
||||
return schema_->selectAccountTxForward.bind(account);
|
||||
else
|
||||
return schema_->selectAccountTx.bind(account);
|
||||
|
||||
return schema_->selectAccountTx.bind(account);
|
||||
}();
|
||||
|
||||
auto cursor = cursorIn;
|
||||
if (cursor)
|
||||
{
|
||||
if (cursor) {
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
auto const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||
auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
@@ -142,8 +157,7 @@ public:
|
||||
statement.bindAt(2, Limit{limit});
|
||||
auto const res = executor_.read(yield, statement);
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
@@ -152,11 +166,9 @@ public:
|
||||
auto numRows = results.numRows();
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results))
|
||||
{
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0)
|
||||
{
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
@@ -170,8 +182,7 @@ public:
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
LOG(log_.debug()) << "Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit)
|
||||
{
|
||||
if (txns.size() == limit) {
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
@@ -185,13 +196,11 @@ public:
|
||||
// wait for other threads to finish their writes
|
||||
executor_.sync();
|
||||
|
||||
if (!range)
|
||||
{
|
||||
if (!range) {
|
||||
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
|
||||
}
|
||||
|
||||
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1)))
|
||||
{
|
||||
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
|
||||
LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
|
||||
return false;
|
||||
}
|
||||
@@ -213,10 +222,8 @@ public:
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res)
|
||||
{
|
||||
if (auto const& result = res.value(); result)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return maybeValue;
|
||||
|
||||
@@ -225,9 +232,7 @@ public:
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
|
||||
}
|
||||
|
||||
@@ -238,12 +243,9 @@ public:
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
|
||||
if (res)
|
||||
{
|
||||
if (auto const& result = res.value(); result)
|
||||
{
|
||||
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue)
|
||||
{
|
||||
if (res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
|
||||
return util::deserializeHeader(ripple::makeSlice(*maybeValue));
|
||||
}
|
||||
|
||||
@@ -252,9 +254,7 @@ public:
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
|
||||
}
|
||||
|
||||
@@ -264,10 +264,8 @@ public:
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res)
|
||||
{
|
||||
if (auto const& result = res.value(); result)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return fetchLedgerBySequence(*maybeValue, yield);
|
||||
|
||||
@@ -276,9 +274,7 @@ public:
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
|
||||
}
|
||||
|
||||
@@ -288,11 +284,10 @@ public:
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerRange); res)
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerRange);
|
||||
if (res) {
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "Could not fetch ledger range - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -302,12 +297,12 @@ public:
|
||||
// least use tuple<int, int>?
|
||||
LedgerRange range;
|
||||
std::size_t idx = 0;
|
||||
for (auto [seq] : extract<uint32_t>(results))
|
||||
{
|
||||
if (idx == 0)
|
||||
for (auto [seq] : extract<uint32_t>(results)) {
|
||||
if (idx == 0) {
|
||||
range.maxSequence = range.minSequence = seq;
|
||||
else if (idx == 1)
|
||||
} else if (idx == 1) {
|
||||
range.maxSequence = seq;
|
||||
}
|
||||
|
||||
++idx;
|
||||
}
|
||||
@@ -319,10 +314,7 @@ public:
|
||||
<< range.maxSequence;
|
||||
return range;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
|
||||
}
|
||||
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -341,15 +333,13 @@ public:
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
|
||||
|
||||
if (not res)
|
||||
{
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& result = res.value();
|
||||
if (not result.hasRows())
|
||||
{
|
||||
if (not result.hasRows()) {
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes - no rows; ledger = "
|
||||
<< std::to_string(ledgerSequence);
|
||||
return {};
|
||||
@@ -375,8 +365,7 @@ public:
|
||||
if (not res)
|
||||
return std::nullopt;
|
||||
|
||||
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow)
|
||||
{
|
||||
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
|
||||
|
||||
@@ -392,8 +381,7 @@ public:
|
||||
// even though we are returning a blank URI, the NFT might have had
|
||||
// one.
|
||||
auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
|
||||
if (uriRes)
|
||||
{
|
||||
if (uriRes) {
|
||||
if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
|
||||
result->uri = *maybeUri;
|
||||
}
|
||||
@@ -411,28 +399,26 @@ public:
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield) const override
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
|
||||
Statement statement = [this, forward, &tokenID]() {
|
||||
Statement const statement = [this, forward, &tokenID]() {
|
||||
if (forward)
|
||||
return schema_->selectNFTTxForward.bind(tokenID);
|
||||
else
|
||||
return schema_->selectNFTTx.bind(tokenID);
|
||||
|
||||
return schema_->selectNFTTx.bind(tokenID);
|
||||
}();
|
||||
|
||||
auto cursor = cursorIn;
|
||||
if (cursor)
|
||||
{
|
||||
if (cursor) {
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
auto const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||
auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
@@ -445,8 +431,7 @@ public:
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
@@ -455,11 +440,9 @@ public:
|
||||
auto numRows = results.numRows();
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results))
|
||||
{
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0)
|
||||
{
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
@@ -473,8 +456,7 @@ public:
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
LOG(log_.debug()) << "NFT Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit)
|
||||
{
|
||||
if (txns.size() == limit) {
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
@@ -482,25 +464,107 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
NFTsAndCursor
|
||||
fetchNFTsByIssuer(
|
||||
ripple::AccountID const& issuer,
|
||||
std::optional<std::uint32_t> const& taxon,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
NFTsAndCursor ret;
|
||||
|
||||
Statement const idQueryStatement = [&taxon, &issuer, &cursorIn, &limit, this]() {
|
||||
if (taxon.has_value()) {
|
||||
auto r = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
r.bindAt(1, *taxon);
|
||||
r.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
|
||||
r.bindAt(3, Limit{limit});
|
||||
return r;
|
||||
}
|
||||
|
||||
auto r = schema_->selectNFTIDsByIssuer.bind(issuer);
|
||||
r.bindAt(
|
||||
1,
|
||||
std::make_tuple(
|
||||
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
|
||||
cursorIn.value_or(ripple::uint256(0))
|
||||
)
|
||||
);
|
||||
r.bindAt(2, Limit{limit});
|
||||
return r;
|
||||
}();
|
||||
|
||||
// Query for all the NFTs issued by the account, potentially filtered by the taxon
|
||||
auto const res = executor_.read(yield, idQueryStatement);
|
||||
|
||||
auto const& idQueryResults = res.value();
|
||||
if (not idQueryResults.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
for (auto const [nftID] : extract<ripple::uint256>(idQueryResults))
|
||||
nftIDs.push_back(nftID);
|
||||
|
||||
if (nftIDs.empty())
|
||||
return ret;
|
||||
|
||||
if (nftIDs.size() == limit)
|
||||
ret.cursor = nftIDs.back();
|
||||
|
||||
std::vector<Statement> selectNFTStatements;
|
||||
selectNFTStatements.reserve(nftIDs.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(nftIDs),
|
||||
std::cend(nftIDs),
|
||||
std::back_inserter(selectNFTStatements),
|
||||
[&](auto const& nftID) { return schema_->selectNFT.bind(nftID, ledgerSequence); }
|
||||
);
|
||||
|
||||
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
|
||||
|
||||
std::vector<Statement> selectNFTURIStatements;
|
||||
selectNFTURIStatements.reserve(nftIDs.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(nftIDs),
|
||||
std::cend(nftIDs),
|
||||
std::back_inserter(selectNFTURIStatements),
|
||||
[&](auto const& nftID) { return schema_->selectNFTURI.bind(nftID, ledgerSequence); }
|
||||
);
|
||||
|
||||
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
|
||||
|
||||
for (auto i = 0u; i < nftIDs.size(); i++) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res)
|
||||
{
|
||||
if (auto const result = res->template get<Blob>(); result)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
if (auto const result = res->template get<Blob>(); result) {
|
||||
if (result->size())
|
||||
return *result;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.debug()) << "Could not fetch ledger object - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
|
||||
}
|
||||
|
||||
@@ -510,20 +574,14 @@ public:
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res)
|
||||
{
|
||||
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
|
||||
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
|
||||
auto [transaction, meta, seq, date] = *maybeValue;
|
||||
return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.debug()) << "Could not fetch transaction - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch transaction - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch transaction: " << res.error();
|
||||
}
|
||||
|
||||
@@ -534,21 +592,15 @@ public:
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res)
|
||||
{
|
||||
if (auto const result = res->template get<ripple::uint256>(); result)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
|
||||
if (auto const result = res->template get<ripple::uint256>(); result) {
|
||||
if (*result == lastKey)
|
||||
return std::nullopt;
|
||||
return *result;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.debug()) << "Could not fetch successor - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch successor - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch successor: " << res.error();
|
||||
}
|
||||
|
||||
@@ -558,7 +610,7 @@ public:
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (hashes.size() == 0)
|
||||
if (hashes.empty())
|
||||
return {};
|
||||
|
||||
auto const numHashes = hashes.size();
|
||||
@@ -571,9 +623,11 @@ public:
|
||||
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
|
||||
// TODO: seems like a job for "hash IN (list of hashes)" instead?
|
||||
std::transform(
|
||||
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
|
||||
return schema_->selectTransaction.bind(hash);
|
||||
});
|
||||
std::cbegin(hashes),
|
||||
std::cend(hashes),
|
||||
std::back_inserter(statements),
|
||||
[this](auto const& hash) { return schema_->selectTransaction.bind(hash); }
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
@@ -583,12 +637,13 @@ public:
|
||||
[](auto const& res) -> TransactionAndMetadata {
|
||||
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
|
||||
return *maybeRow;
|
||||
else
|
||||
return {};
|
||||
});
|
||||
|
||||
return {};
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
assert(numHashes == results.size());
|
||||
ASSERT(numHashes == results.size(), "Number of hashes and results must match");
|
||||
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from Cassandra in " << timeDiff
|
||||
<< " milliseconds";
|
||||
return results;
|
||||
@@ -598,9 +653,10 @@ public:
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const override
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
if (keys.size() == 0)
|
||||
if (keys.empty())
|
||||
return {};
|
||||
|
||||
auto const numKeys = keys.size();
|
||||
@@ -614,18 +670,24 @@ public:
|
||||
|
||||
// TODO: seems like a job for "key IN (list of keys)" instead?
|
||||
std::transform(
|
||||
std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
|
||||
return schema_->selectObject.bind(key, sequence);
|
||||
});
|
||||
std::cbegin(keys),
|
||||
std::cend(keys),
|
||||
std::back_inserter(statements),
|
||||
[this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
|
||||
std::cbegin(entries),
|
||||
std::cend(entries),
|
||||
std::back_inserter(results),
|
||||
[](auto const& res) -> Blob {
|
||||
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
|
||||
return *maybeValue;
|
||||
else
|
||||
return {};
|
||||
});
|
||||
|
||||
return {};
|
||||
}
|
||||
);
|
||||
|
||||
LOG(log_.trace()) << "Fetched " << numKeys << " objects";
|
||||
return results;
|
||||
@@ -636,15 +698,13 @@ public:
|
||||
{
|
||||
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
|
||||
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
|
||||
if (not res)
|
||||
{
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& results = res.value();
|
||||
if (not results)
|
||||
{
|
||||
if (not results) {
|
||||
LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
@@ -674,7 +734,8 @@ public:
|
||||
std::back_inserter(results),
|
||||
[](auto const& key, auto const& obj) {
|
||||
return LedgerObject{key, obj};
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
return results;
|
||||
}
|
||||
@@ -695,44 +756,46 @@ public:
|
||||
{
|
||||
LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
|
||||
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
|
||||
assert(key.size() != 0);
|
||||
assert(successor.size() != 0);
|
||||
ASSERT(!key.empty(), "Key must not be empty");
|
||||
ASSERT(!successor.empty(), "Successor must not be empty");
|
||||
|
||||
executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
|
||||
}
|
||||
|
||||
void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) override
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size() * 10); // assume 10 transactions avg
|
||||
|
||||
for (auto& record : data)
|
||||
{
|
||||
for (auto& record : data) {
|
||||
std::transform(
|
||||
std::begin(record.accounts),
|
||||
std::end(record.accounts),
|
||||
std::back_inserter(statements),
|
||||
[this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::move(account),
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash);
|
||||
});
|
||||
record.txHash
|
||||
);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) override
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
|
||||
std::transform(std::cbegin(data), std::cend(data), std::back_inserter(statements), [this](auto const& record) {
|
||||
return schema_->insertNFTTx.bind(
|
||||
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash);
|
||||
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
|
||||
);
|
||||
});
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
@@ -744,39 +807,42 @@ public:
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) override
|
||||
std::string&& metadata
|
||||
) override
|
||||
{
|
||||
LOG(log_.trace()) << "Writing txn to cassandra";
|
||||
|
||||
executor_.write(schema_->insertLedgerTransaction, seq, hash);
|
||||
executor_.write(
|
||||
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata));
|
||||
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
writeNFTs(std::vector<NFTsData>&& data) override
|
||||
writeNFTs(std::vector<NFTsData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size() * 3);
|
||||
|
||||
for (NFTsData const& record : data)
|
||||
{
|
||||
for (NFTsData const& record : data) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned));
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri)
|
||||
{
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID));
|
||||
record.tokenID
|
||||
));
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value()));
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -796,20 +862,24 @@ public:
|
||||
return executor_.isTooBusy();
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
stats() const override
|
||||
{
|
||||
return executor_.stats();
|
||||
}
|
||||
|
||||
private:
|
||||
bool
|
||||
executeSyncUpdate(Statement statement)
|
||||
{
|
||||
auto const res = executor_.writeSync(statement);
|
||||
auto maybeSuccess = res->template get<bool>();
|
||||
if (not maybeSuccess)
|
||||
{
|
||||
if (not maybeSuccess) {
|
||||
LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (not maybeSuccess.value())
|
||||
{
|
||||
if (not maybeSuccess.value()) {
|
||||
LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
|
||||
|
||||
// error may indicate that another writer wrote something.
|
||||
@@ -20,24 +20,32 @@
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <ripple/basics/Blob.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/Serializer.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
#include <data/Types.h>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
/**
|
||||
* @brief Struct used to keep track of what to write to account_transactions/account_tx tables.
|
||||
*/
|
||||
struct AccountTransactionsData
|
||||
{
|
||||
struct AccountTransactionsData {
|
||||
boost::container::flat_set<ripple::AccountID> accounts;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
std::uint32_t ledgerSequence{};
|
||||
std::uint32_t transactionIndex{};
|
||||
ripple::uint256 txHash;
|
||||
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
|
||||
@@ -56,8 +64,7 @@ struct AccountTransactionsData
|
||||
*
|
||||
* Gets written to nf_token_transactions table and the like.
|
||||
*/
|
||||
struct NFTTransactionsData
|
||||
{
|
||||
struct NFTTransactionsData {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
@@ -74,8 +81,7 @@ struct NFTTransactionsData
|
||||
*
|
||||
* Gets written to nf_tokens table and the like.
|
||||
*/
|
||||
struct NFTsData
|
||||
{
|
||||
struct NFTsData {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
|
||||
@@ -107,7 +113,8 @@ struct NFTsData
|
||||
ripple::uint256 const& tokenID,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::Blob const& uri,
|
||||
ripple::TxMeta const& meta)
|
||||
ripple::TxMeta const& meta
|
||||
)
|
||||
: tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
@@ -133,7 +140,8 @@ struct NFTsData
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::Blob const& uri)
|
||||
ripple::Blob const& uri
|
||||
)
|
||||
: tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
@@ -149,8 +157,11 @@ template <class T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
{
|
||||
short offer_bytes = (object[1] << 8) | object[2];
|
||||
return offer_bytes == 0x006f;
|
||||
static constexpr short OFFER_OFFSET = 0x006f;
|
||||
static constexpr short SHIFT = 8;
|
||||
|
||||
short offer_bytes = (object[1] << SHIFT) | object[2];
|
||||
return offer_bytes == OFFER_OFFSET;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -179,8 +190,9 @@ template <class T>
|
||||
inline bool
|
||||
isDirNode(T const& object)
|
||||
{
|
||||
short spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == 0x0064;
|
||||
static constexpr short DIR_NODE_SPACE_KEY = 0x0064;
|
||||
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == DIR_NODE_SPACE_KEY;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -212,7 +224,7 @@ inline ripple::uint256
|
||||
getBook(T const& offer)
|
||||
{
|
||||
ripple::SerialIter it{offer.data(), offer.size()};
|
||||
ripple::SLE sle{it, {}};
|
||||
ripple::SLE const sle{it, {}};
|
||||
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
|
||||
|
||||
return book;
|
||||
@@ -228,10 +240,12 @@ template <class T>
|
||||
inline ripple::uint256
|
||||
getBookBase(T const& key)
|
||||
{
|
||||
assert(key.size() == ripple::uint256::size());
|
||||
static constexpr size_t KEY_SIZE = 24;
|
||||
|
||||
ASSERT(key.size() == ripple::uint256::size(), "Invalid key size {}", key.size());
|
||||
|
||||
ripple::uint256 ret;
|
||||
for (size_t i = 0; i < 24; ++i)
|
||||
for (size_t i = 0; i < KEY_SIZE; ++i)
|
||||
ret.data()[i] = key.data()[i];
|
||||
|
||||
return ret;
|
||||
@@ -246,7 +260,7 @@ getBookBase(T const& key)
|
||||
inline std::string
|
||||
uint256ToString(ripple::uint256 const& input)
|
||||
{
|
||||
return {reinterpret_cast<const char*>(input.data()), input.size()};
|
||||
return {reinterpret_cast<char const*>(input.data()), ripple::uint256::size()};
|
||||
}
|
||||
|
||||
/** @brief The ripple epoch start timestamp. Midnight on 1st January 2000. */
|
||||
@@ -17,17 +17,39 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/LedgerCache.h>
|
||||
#include "data/LedgerCache.hpp"
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
uint32_t
|
||||
LedgerCache::latestLedgerSequence() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
return latestSeq_;
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCache::waitUntilCacheContainsSeq(uint32_t seq)
|
||||
{
|
||||
if (disabled_)
|
||||
return;
|
||||
std::unique_lock lock(mtx_);
|
||||
cv_.wait(lock, [this, seq] { return latestSeq_ >= seq; });
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground)
|
||||
{
|
||||
@@ -35,32 +57,32 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
return;
|
||||
|
||||
{
|
||||
std::scoped_lock lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
{
|
||||
assert(seq == latestSeq_ + 1 || latestSeq_ == 0);
|
||||
std::scoped_lock const lck{mtx_};
|
||||
if (seq > latestSeq_) {
|
||||
ASSERT(
|
||||
seq == latestSeq_ + 1 || latestSeq_ == 0,
|
||||
"New sequense must be either next or first. seq = {}, latestSeq_ = {}",
|
||||
seq,
|
||||
latestSeq_
|
||||
);
|
||||
latestSeq_ = seq;
|
||||
}
|
||||
for (auto const& obj : objs)
|
||||
{
|
||||
if (obj.blob.size())
|
||||
{
|
||||
if (isBackground && deletes_.count(obj.key))
|
||||
for (auto const& obj : objs) {
|
||||
if (!obj.blob.empty()) {
|
||||
if (isBackground && deletes_.contains(obj.key))
|
||||
continue;
|
||||
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq)
|
||||
{
|
||||
if (seq > e.seq) {
|
||||
e = {seq, obj.blob};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
map_.erase(obj.key);
|
||||
if (!full_ && !isBackground)
|
||||
deletes_.insert(obj.key);
|
||||
}
|
||||
}
|
||||
cv_.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,14 +91,14 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock lck{mtx_};
|
||||
successorReqCounter_++;
|
||||
std::shared_lock const lck{mtx_};
|
||||
++successorReqCounter_.get();
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.upper_bound(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
successorHitCounter_++;
|
||||
++successorHitCounter_.get();
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
|
||||
@@ -85,7 +107,7 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.lower_bound(key);
|
||||
@@ -98,16 +120,16 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
std::optional<Blob>
|
||||
LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
return {};
|
||||
objectReqCounter_++;
|
||||
++objectReqCounter_.get();
|
||||
auto e = map_.find(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
if (seq < e->second.seq)
|
||||
return {};
|
||||
objectHitCounter_++;
|
||||
++objectHitCounter_.get();
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
@@ -124,7 +146,7 @@ LedgerCache::setFull()
|
||||
return;
|
||||
|
||||
full_ = true;
|
||||
std::scoped_lock lck{mtx_};
|
||||
std::scoped_lock const lck{mtx_};
|
||||
deletes_.clear();
|
||||
}
|
||||
|
||||
@@ -137,24 +159,24 @@ LedgerCache::isFull() const
|
||||
size_t
|
||||
LedgerCache::size() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
return map_.size();
|
||||
}
|
||||
|
||||
float
|
||||
LedgerCache::getObjectHitRate() const
|
||||
{
|
||||
if (!objectReqCounter_)
|
||||
if (objectReqCounter_.get().value() == 0u)
|
||||
return 1;
|
||||
return static_cast<float>(objectHitCounter_) / objectReqCounter_;
|
||||
return static_cast<float>(objectHitCounter_.get().value()) / objectReqCounter_.get().value();
|
||||
}
|
||||
|
||||
float
|
||||
LedgerCache::getSuccessorHitRate() const
|
||||
{
|
||||
if (!successorReqCounter_)
|
||||
if (successorReqCounter_.get().value() == 0u)
|
||||
return 1;
|
||||
return static_cast<float>(successorHitCounter_) / successorReqCounter_;
|
||||
return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value();
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
|
||||
@@ -19,13 +19,23 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "util/prometheus/Counter.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
#include <data/Types.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <utility>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
@@ -33,25 +43,38 @@ namespace data {
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCache
|
||||
{
|
||||
struct CacheEntry
|
||||
{
|
||||
class LedgerCache {
|
||||
struct CacheEntry {
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
|
||||
// counters for fetchLedgerObject(s) hit rate
|
||||
mutable std::atomic_uint32_t objectReqCounter_ = 0;
|
||||
mutable std::atomic_uint32_t objectHitCounter_ = 0;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "request"}, {"fetch", "ledger_objects"}}),
|
||||
"LedgerCache statistics"
|
||||
)};
|
||||
std::reference_wrapper<util::prometheus::CounterInt> objectHitCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "ledger_objects"}})
|
||||
)};
|
||||
|
||||
// counters for fetchSuccessorKey hit rate
|
||||
mutable std::atomic_uint32_t successorReqCounter_ = 0;
|
||||
mutable std::atomic_uint32_t successorHitCounter_ = 0;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> successorReqCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "request"}, {"fetch", "successor_key"}}),
|
||||
"ledgerCache"
|
||||
)};
|
||||
std::reference_wrapper<util::prometheus::CounterInt> successorHitCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
|
||||
)};
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
std::condition_variable_any cv_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
@@ -68,7 +91,7 @@ public:
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
void
|
||||
update(std::vector<LedgerObject> const& blobs, uint32_t seq, bool isBackground = false);
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
@@ -149,6 +172,9 @@ public:
|
||||
*/
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
|
||||
void
|
||||
waitUntilCacheContainsSeq(uint32_t seq);
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -22,8 +22,11 @@
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
@@ -33,13 +36,12 @@ using Blob = std::vector<unsigned char>;
|
||||
/**
|
||||
* @brief Represents an object in the ledger.
|
||||
*/
|
||||
struct LedgerObject
|
||||
{
|
||||
struct LedgerObject {
|
||||
ripple::uint256 key;
|
||||
Blob blob;
|
||||
|
||||
bool
|
||||
operator==(const LedgerObject& other) const
|
||||
operator==(LedgerObject const& other) const
|
||||
{
|
||||
return key == other.key && blob == other.blob;
|
||||
}
|
||||
@@ -48,8 +50,7 @@ struct LedgerObject
|
||||
/**
|
||||
* @brief Represents a page of LedgerObjects.
|
||||
*/
|
||||
struct LedgerPage
|
||||
{
|
||||
struct LedgerPage {
|
||||
std::vector<LedgerObject> objects;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
@@ -57,8 +58,7 @@ struct LedgerPage
|
||||
/**
|
||||
* @brief Represents a page of book offer objects.
|
||||
*/
|
||||
struct BookOffersPage
|
||||
{
|
||||
struct BookOffersPage {
|
||||
std::vector<LedgerObject> offers;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
@@ -66,20 +66,15 @@ struct BookOffersPage
|
||||
/**
|
||||
* @brief Represents a transaction and its metadata bundled together.
|
||||
*/
|
||||
struct TransactionAndMetadata
|
||||
{
|
||||
struct TransactionAndMetadata {
|
||||
Blob transaction;
|
||||
Blob metadata;
|
||||
std::uint32_t ledgerSequence = 0;
|
||||
std::uint32_t date = 0;
|
||||
|
||||
TransactionAndMetadata() = default;
|
||||
TransactionAndMetadata(
|
||||
Blob const& transaction,
|
||||
Blob const& metadata,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t date)
|
||||
: transaction{transaction}, metadata{metadata}, ledgerSequence{ledgerSequence}, date{date}
|
||||
TransactionAndMetadata(Blob transaction, Blob metadata, std::uint32_t ledgerSequence, std::uint32_t date)
|
||||
: transaction{std::move(transaction)}, metadata{std::move(metadata)}, ledgerSequence{ledgerSequence}, date{date}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -92,7 +87,7 @@ struct TransactionAndMetadata
|
||||
}
|
||||
|
||||
bool
|
||||
operator==(const TransactionAndMetadata& other) const
|
||||
operator==(TransactionAndMetadata const& other) const
|
||||
{
|
||||
return transaction == other.transaction && metadata == other.metadata &&
|
||||
ledgerSequence == other.ledgerSequence && date == other.date;
|
||||
@@ -102,8 +97,7 @@ struct TransactionAndMetadata
|
||||
/**
|
||||
* @brief Represents a cursor into the transactions table.
|
||||
*/
|
||||
struct TransactionsCursor
|
||||
{
|
||||
struct TransactionsCursor {
|
||||
std::uint32_t ledgerSequence = 0;
|
||||
std::uint32_t transactionIndex = 0;
|
||||
|
||||
@@ -118,11 +112,6 @@ struct TransactionsCursor
|
||||
{
|
||||
}
|
||||
|
||||
TransactionsCursor(TransactionsCursor const&) = default;
|
||||
|
||||
TransactionsCursor&
|
||||
operator=(TransactionsCursor const&) = default;
|
||||
|
||||
bool
|
||||
operator==(TransactionsCursor const& other) const = default;
|
||||
|
||||
@@ -136,8 +125,7 @@ struct TransactionsCursor
|
||||
/**
|
||||
* @brief Represests a bundle of transactions with metadata and a cursor to the next page.
|
||||
*/
|
||||
struct TransactionsAndCursor
|
||||
{
|
||||
struct TransactionsAndCursor {
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
std::optional<TransactionsCursor> cursor;
|
||||
};
|
||||
@@ -145,21 +133,20 @@ struct TransactionsAndCursor
|
||||
/**
|
||||
* @brief Represents a NFToken.
|
||||
*/
|
||||
struct NFT
|
||||
{
|
||||
struct NFT {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t ledgerSequence{};
|
||||
ripple::AccountID owner;
|
||||
Blob uri;
|
||||
bool isBurned;
|
||||
bool isBurned{};
|
||||
|
||||
NFT() = default;
|
||||
NFT(ripple::uint256 const& tokenID,
|
||||
std::uint32_t ledgerSequence,
|
||||
ripple::AccountID const& owner,
|
||||
Blob const& uri,
|
||||
Blob uri,
|
||||
bool isBurned)
|
||||
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{uri}, isBurned{isBurned}
|
||||
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{std::move(uri)}, isBurned{isBurned}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -177,11 +164,15 @@ struct NFT
|
||||
}
|
||||
};
|
||||
|
||||
struct NFTsAndCursor {
|
||||
std::vector<NFT> nfts;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Stores a range of sequences as a min and max pair.
|
||||
*/
|
||||
struct LedgerRange
|
||||
{
|
||||
struct LedgerRange {
|
||||
std::uint32_t minSequence = 0;
|
||||
std::uint32_t maxSequence = 0;
|
||||
};
|
||||
@@ -19,70 +19,113 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Types.h>
|
||||
#include "data/cassandra/Types.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief The requirements of a settings provider.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeSettingsProvider = requires(T a) {
|
||||
{ a.getSettings() } -> std::same_as<Settings>;
|
||||
{ a.getKeyspace() } -> std::same_as<std::string>;
|
||||
{ a.getTablePrefix() } -> std::same_as<std::optional<std::string>>;
|
||||
{ a.getReplicationFactor() } -> std::same_as<uint16_t>;
|
||||
{ a.getTtl() } -> std::same_as<uint16_t>;
|
||||
{
|
||||
a.getSettings()
|
||||
} -> std::same_as<Settings>;
|
||||
{
|
||||
a.getKeyspace()
|
||||
} -> std::same_as<std::string>;
|
||||
{
|
||||
a.getTablePrefix()
|
||||
} -> std::same_as<std::optional<std::string>>;
|
||||
{
|
||||
a.getReplicationFactor()
|
||||
} -> std::same_as<uint16_t>;
|
||||
{
|
||||
a.getTtl()
|
||||
} -> std::same_as<uint16_t>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief The requirements of an execution strategy.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeExecutionStrategy = requires(
|
||||
T a,
|
||||
Settings settings,
|
||||
Handle handle,
|
||||
Statement statement,
|
||||
T a,
|
||||
Settings settings,
|
||||
Handle handle,
|
||||
Statement statement,
|
||||
std::vector<Statement> statements,
|
||||
PreparedStatement prepared,
|
||||
boost::asio::yield_context token
|
||||
) {
|
||||
{ T(settings, handle) };
|
||||
{ a.sync() } -> std::same_as<void>;
|
||||
{ a.isTooBusy() } -> std::same_as<bool>;
|
||||
{ a.writeSync(statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.writeSync(prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.write(prepared) } -> std::same_as<void>;
|
||||
{ a.write(std::move(statements)) } -> std::same_as<void>;
|
||||
{ a.read(token, prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statements) } -> std::same_as<ResultOrError>;
|
||||
{ a.readEach(token, statements) } -> std::same_as<std::vector<Result>>;
|
||||
{
|
||||
T(settings, handle)
|
||||
};
|
||||
{
|
||||
a.sync()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.isTooBusy()
|
||||
} -> std::same_as<bool>;
|
||||
{
|
||||
a.writeSync(statement)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.writeSync(prepared)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.write(prepared)
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.write(std::move(statements))
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.read(token, prepared)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.read(token, statement)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.read(token, statements)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.readEach(token, statements)
|
||||
} -> std::same_as<std::vector<Result>>;
|
||||
{
|
||||
a.stats()
|
||||
} -> std::same_as<boost::json::object>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief The requirements of a retry policy.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
|
||||
{ T(ioc) };
|
||||
{ a.shouldRetry(err) } -> std::same_as<bool>;
|
||||
{ a.retry([](){}) } -> std::same_as<void>;
|
||||
{ a.calculateDelay(attempt) } -> std::same_as<std::chrono::milliseconds>;
|
||||
{
|
||||
T(ioc)
|
||||
};
|
||||
{
|
||||
a.shouldRetry(err)
|
||||
} -> std::same_as<bool>;
|
||||
{
|
||||
a.retry([]() {})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.calculateDelay(attempt)
|
||||
} -> std::same_as<std::chrono::milliseconds>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
} // namespace data::cassandra
|
||||
@@ -21,34 +21,38 @@
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief A simple container for both error message and error code.
|
||||
*/
|
||||
class CassandraError
|
||||
{
|
||||
class CassandraError {
|
||||
std::string message_;
|
||||
uint32_t code_;
|
||||
uint32_t code_{};
|
||||
|
||||
public:
|
||||
CassandraError() = default; // default constructible required by Expected
|
||||
CassandraError(std::string message, uint32_t code) : message_{message}, code_{code}
|
||||
CassandraError(std::string message, uint32_t code) : message_{std::move(message)}, code_{code}
|
||||
{
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend std::string
|
||||
operator+(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
|
||||
operator+(T const& lhs, CassandraError const& rhs)
|
||||
requires std::is_convertible_v<T, std::string>
|
||||
{
|
||||
return lhs + rhs.message();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend bool
|
||||
operator==(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
|
||||
operator==(T const& lhs, CassandraError const& rhs)
|
||||
requires std::is_convertible_v<T, std::string>
|
||||
{
|
||||
return lhs == rhs.message();
|
||||
}
|
||||
@@ -91,11 +95,9 @@ public:
|
||||
bool
|
||||
isTimeout() const
|
||||
{
|
||||
if (code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
|
||||
return code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
|
||||
code_ == CASS_ERROR_SERVER_UNAVAILABLE or code_ == CASS_ERROR_SERVER_OVERLOADED or
|
||||
code_ == CASS_ERROR_SERVER_READ_TIMEOUT)
|
||||
return true;
|
||||
return false;
|
||||
code_ == CASS_ERROR_SERVER_READ_TIMEOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -17,7 +17,18 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
|
||||
#include "data/cassandra/Types.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <functional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
@@ -88,17 +99,17 @@ std::vector<Handle::FutureType>
|
||||
Handle::asyncExecuteEach(std::vector<Statement> const& statements) const
|
||||
{
|
||||
std::vector<Handle::FutureType> futures;
|
||||
futures.reserve(statements.size());
|
||||
for (auto const& statement : statements)
|
||||
futures.push_back(cass_session_execute(session_, statement));
|
||||
futures.emplace_back(cass_session_execute(session_, statement));
|
||||
return futures;
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::executeEach(std::vector<Statement> const& statements) const
|
||||
{
|
||||
for (auto futures = asyncExecuteEach(statements); auto const& future : futures)
|
||||
{
|
||||
if (auto const rc = future.await(); not rc)
|
||||
for (auto futures = asyncExecuteEach(statements); auto const& future : futures) {
|
||||
if (auto rc = future.await(); not rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -145,11 +156,12 @@ Handle::asyncExecute(std::vector<Statement> const& statements, std::function<voi
|
||||
Handle::PreparedStatementType
|
||||
Handle::prepare(std::string_view query) const
|
||||
{
|
||||
Handle::FutureType future = cass_session_prepare(session_, query.data());
|
||||
if (auto const rc = future.await(); rc)
|
||||
Handle::FutureType const future = cass_session_prepare(session_, query.data());
|
||||
auto const rc = future.await();
|
||||
if (rc)
|
||||
return cass_future_get_prepared(future);
|
||||
else
|
||||
throw std::runtime_error(rc.error().message());
|
||||
|
||||
throw std::runtime_error(rc.error().message());
|
||||
}
|
||||
|
||||
} // namespace data::cassandra
|
||||
|
||||
@@ -19,22 +19,20 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/Batch.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/Future.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
#include <data/cassandra/impl/Session.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
#include "data/cassandra/Error.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/Batch.hpp"
|
||||
#include "data/cassandra/impl/Cluster.hpp"
|
||||
#include "data/cassandra/impl/Future.hpp"
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/Result.hpp"
|
||||
#include "data/cassandra/impl/Session.hpp"
|
||||
#include "data/cassandra/impl/Statement.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <compare>
|
||||
#include <iterator>
|
||||
#include <functional>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra {
|
||||
@@ -42,8 +40,7 @@ namespace data::cassandra {
|
||||
/**
|
||||
* @brief Represents a handle to the cassandra database cluster
|
||||
*/
|
||||
class Handle
|
||||
{
|
||||
class Handle {
|
||||
detail::Cluster cluster_;
|
||||
detail::Session session_;
|
||||
|
||||
@@ -19,16 +19,19 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Concepts.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <fmt/compile.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
@@ -41,8 +44,7 @@ template <SomeSettingsProvider SettingsProviderType>
|
||||
* @brief Manages the DB schema and provides access to prepared statements.
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
class Schema
|
||||
{
|
||||
class Schema {
|
||||
util::Logger log_{"Backend"};
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
|
||||
@@ -62,7 +64,8 @@ public:
|
||||
AND durable_writes = true
|
||||
)",
|
||||
settingsProvider_.get().getKeyspace(),
|
||||
settingsProvider_.get().getReplicationFactor());
|
||||
settingsProvider_.get().getReplicationFactor()
|
||||
);
|
||||
}();
|
||||
|
||||
// =======================
|
||||
@@ -85,7 +88,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -100,7 +104,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -113,7 +118,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -127,7 +133,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -140,7 +147,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -155,7 +163,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -167,7 +176,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -179,7 +189,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -189,7 +200,8 @@ public:
|
||||
sequence bigint
|
||||
)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -205,7 +217,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -220,7 +233,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -235,7 +249,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -250,7 +265,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
return statements;
|
||||
}();
|
||||
@@ -258,8 +274,7 @@ public:
|
||||
/**
|
||||
* @brief Prepared statements holder.
|
||||
*/
|
||||
class Statements
|
||||
{
|
||||
class Statements {
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
std::reference_wrapper<Handle const> handle_;
|
||||
|
||||
@@ -280,7 +295,8 @@ public:
|
||||
(key, sequence, object)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertTransaction = [this]() {
|
||||
@@ -290,7 +306,8 @@ public:
|
||||
(hash, ledger_sequence, date, transaction, metadata)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerTransaction = [this]() {
|
||||
@@ -300,7 +317,8 @@ public:
|
||||
(ledger_sequence, hash)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertSuccessor = [this]() {
|
||||
@@ -310,7 +328,8 @@ public:
|
||||
(key, seq, next)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")));
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertDiff = [this]() {
|
||||
@@ -320,7 +339,8 @@ public:
|
||||
(seq, key)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")));
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertAccountTx = [this]() {
|
||||
@@ -330,7 +350,8 @@ public:
|
||||
(account, seq_idx, hash)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFT = [this]() {
|
||||
@@ -340,7 +361,8 @@ public:
|
||||
(token_id, sequence, owner, is_burned)
|
||||
VALUES (?, ?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertIssuerNFT = [this]() {
|
||||
@@ -350,7 +372,8 @@ public:
|
||||
(issuer, taxon, token_id)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")));
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFTURI = [this]() {
|
||||
@@ -360,7 +383,8 @@ public:
|
||||
(token_id, sequence, uri)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFTTx = [this]() {
|
||||
@@ -370,7 +394,8 @@ public:
|
||||
(token_id, seq_idx, hash)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHeader = [this]() {
|
||||
@@ -380,7 +405,8 @@ public:
|
||||
(sequence, header)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHash = [this]() {
|
||||
@@ -390,7 +416,8 @@ public:
|
||||
(hash, sequence)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
|
||||
));
|
||||
}();
|
||||
|
||||
//
|
||||
@@ -405,7 +432,8 @@ public:
|
||||
WHERE is_latest = ?
|
||||
IF sequence IN (?, null)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement deleteLedgerRange = [this]() {
|
||||
@@ -415,7 +443,8 @@ public:
|
||||
SET sequence = ?
|
||||
WHERE is_latest = false
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
//
|
||||
@@ -432,7 +461,8 @@ public:
|
||||
ORDER BY seq DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")));
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectDiff = [this]() {
|
||||
@@ -442,7 +472,8 @@ public:
|
||||
FROM {}
|
||||
WHERE seq = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")));
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectObject = [this]() {
|
||||
@@ -455,7 +486,8 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectTransaction = [this]() {
|
||||
@@ -465,7 +497,8 @@ public:
|
||||
FROM {}
|
||||
WHERE hash = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAllTransactionHashesInLedger = [this]() {
|
||||
@@ -475,7 +508,8 @@ public:
|
||||
FROM {}
|
||||
WHERE ledger_sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPageKeys = [this]() {
|
||||
@@ -489,7 +523,8 @@ public:
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPage = [this]() {
|
||||
@@ -503,7 +538,8 @@ public:
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement getToken = [this]() {
|
||||
@@ -514,7 +550,8 @@ public:
|
||||
WHERE key = ?
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTx = [this]() {
|
||||
@@ -526,7 +563,8 @@ public:
|
||||
AND seq_idx < ?
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTxForward = [this]() {
|
||||
@@ -539,7 +577,8 @@ public:
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFT = [this]() {
|
||||
@@ -552,7 +591,8 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTURI = [this]() {
|
||||
@@ -565,7 +605,8 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTTx = [this]() {
|
||||
@@ -578,7 +619,8 @@ public:
|
||||
ORDER BY seq_idx DESC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTTxForward = [this]() {
|
||||
@@ -591,7 +633,37 @@ public:
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuer = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND (taxon, token_id) > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND taxon = ?
|
||||
AND token_id > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerByHash = [this]() {
|
||||
@@ -602,7 +674,8 @@ public:
|
||||
WHERE hash = ?
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerBySeq = [this]() {
|
||||
@@ -612,7 +685,8 @@ public:
|
||||
FROM {}
|
||||
WHERE sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLatestLedger = [this]() {
|
||||
@@ -622,7 +696,8 @@ public:
|
||||
FROM {}
|
||||
WHERE is_latest = true
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerRange = [this]() {
|
||||
@@ -631,7 +706,8 @@ public:
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
};
|
||||
|
||||
@@ -17,16 +17,28 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/config/Config.h>
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/Cluster.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
|
||||
#include <cerrno>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <ios>
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <system_error>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
@@ -34,12 +46,11 @@ namespace detail {
|
||||
inline Settings::ContactPoints
|
||||
tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_object())
|
||||
throw std::runtime_error(
|
||||
"Feed entire Cassandra section to parse "
|
||||
"Settings::ContactPoints instead");
|
||||
if (not value.is_object()) {
|
||||
throw std::runtime_error("Feed entire Cassandra section to parse Settings::ContactPoints instead");
|
||||
}
|
||||
|
||||
util::Config obj{value};
|
||||
util::Config const obj{value};
|
||||
Settings::ContactPoints out;
|
||||
|
||||
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
|
||||
@@ -76,18 +87,15 @@ SettingsProvider::getSettings() const
|
||||
std::optional<std::string>
|
||||
SettingsProvider::parseOptionalCertificate() const
|
||||
{
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath)
|
||||
{
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath) {
|
||||
auto const path = std::filesystem::path(*certPath);
|
||||
std::ifstream fileStream(path.string(), std::ios::in);
|
||||
if (!fileStream)
|
||||
{
|
||||
if (!fileStream) {
|
||||
throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string());
|
||||
}
|
||||
|
||||
std::string contents(std::istreambuf_iterator<char>{fileStream}, std::istreambuf_iterator<char>{});
|
||||
if (fileStream.bad())
|
||||
{
|
||||
if (fileStream.bad()) {
|
||||
throw std::system_error(errno, std::generic_category(), "Reading certificate " + path.string());
|
||||
}
|
||||
|
||||
@@ -101,12 +109,9 @@ Settings
|
||||
SettingsProvider::parseSettings() const
|
||||
{
|
||||
auto settings = Settings::defaultSettings();
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle)
|
||||
{
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle) {
|
||||
settings.connectionInfo = *bundle;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
settings.connectionInfo =
|
||||
config_.valueOrThrow<Settings::ContactPoints>("Missing contact_points in Cassandra config");
|
||||
}
|
||||
@@ -118,16 +123,16 @@ SettingsProvider::parseSettings() const
|
||||
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
|
||||
settings.coreConnectionsPerHost =
|
||||
config_.valueOr<uint32_t>("core_connections_per_host", settings.coreConnectionsPerHost);
|
||||
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
settings.writeBatchSize = config_.valueOr<std::size_t>("write_batch_size", settings.writeBatchSize);
|
||||
|
||||
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
|
||||
if (connectTimeoutSecond)
|
||||
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * 1000};
|
||||
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
|
||||
auto const requestTimeoutSecond = config_.maybeValue<uint32_t>("request_timeout");
|
||||
if (requestTimeoutSecond)
|
||||
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * 1000};
|
||||
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
|
||||
settings.certificate = parseOptionalCertificate();
|
||||
settings.username = config_.maybeValue<std::string>("username");
|
||||
|
||||
@@ -19,19 +19,21 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Provides settings for @ref BasicCassandraBackend.
|
||||
*/
|
||||
class SettingsProvider
|
||||
{
|
||||
class SettingsProvider {
|
||||
util::Config config_;
|
||||
|
||||
std::string keyspace_;
|
||||
@@ -19,9 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <util/Expected.h>
|
||||
#include "util/Expected.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <cstdint>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
@@ -52,8 +52,7 @@ using Batch = detail::Batch;
|
||||
* because clio uses bigint (int64) everywhere except for when one need
|
||||
* to specify LIMIT, which needs an int32 :-/
|
||||
*/
|
||||
struct Limit
|
||||
{
|
||||
struct Limit {
|
||||
int32_t limit;
|
||||
};
|
||||
|
||||
@@ -19,17 +19,20 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Concepts.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/RetryPolicy.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/RetryPolicy.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
@@ -48,16 +51,17 @@ template <
|
||||
typename StatementType,
|
||||
typename HandleType = Handle,
|
||||
SomeRetryPolicy RetryPolicyType = ExponentialBackoffRetryPolicy>
|
||||
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>>
|
||||
{
|
||||
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>> {
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using CallbackType = std::function<void(typename HandleType::ResultOrErrorType)>;
|
||||
using RetryCallbackType = std::function<void()>;
|
||||
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
StatementType data_;
|
||||
RetryPolicyType retryPolicy_;
|
||||
CallbackType onComplete_;
|
||||
RetryCallbackType onRetry_;
|
||||
|
||||
// does not exist during initial construction, hence optional
|
||||
std::optional<FutureWithCallbackType> future_;
|
||||
@@ -68,24 +72,37 @@ public:
|
||||
* @brief Create a new instance of the AsyncExecutor and execute it.
|
||||
*/
|
||||
static void
|
||||
run(boost::asio::io_context& ioc, HandleType const& handle, StatementType&& data, CallbackType&& onComplete)
|
||||
run(boost::asio::io_context& ioc,
|
||||
HandleType const& handle,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry)
|
||||
{
|
||||
// this is a helper that allows us to use std::make_shared below
|
||||
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType>
|
||||
{
|
||||
EnableMakeShared(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete))
|
||||
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType> {
|
||||
EnableMakeShared(
|
||||
boost::asio::io_context& ioc,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry
|
||||
)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete), std::move(onRetry))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete));
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete), std::move(onRetry));
|
||||
ptr->execute(handle);
|
||||
}
|
||||
|
||||
private:
|
||||
AsyncExecutor(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}
|
||||
AsyncExecutor(
|
||||
boost::asio::io_context& ioc,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry
|
||||
)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}, onRetry_{std::move(onRetry)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -96,22 +113,21 @@ private:
|
||||
|
||||
// lifetime is extended by capturing self ptr
|
||||
auto handler = [this, &handle, self](auto&& res) mutable {
|
||||
if (res)
|
||||
{
|
||||
onComplete_(std::move(res));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (retryPolicy_.shouldRetry(res.error()))
|
||||
if (res) {
|
||||
onComplete_(std::forward<decltype(res)>(res));
|
||||
} else {
|
||||
if (retryPolicy_.shouldRetry(res.error())) {
|
||||
onRetry_();
|
||||
retryPolicy_.retry([self, &handle]() { self->execute(handle); });
|
||||
else
|
||||
onComplete_(std::move(res)); // report error
|
||||
} else {
|
||||
onComplete_(std::forward<decltype(res)>(res)); // report error
|
||||
}
|
||||
}
|
||||
|
||||
self = nullptr; // explicitly decrement refcount
|
||||
};
|
||||
|
||||
std::scoped_lock lck{mtx_};
|
||||
std::scoped_lock const lck{mtx_};
|
||||
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
}
|
||||
};
|
||||
@@ -17,17 +17,22 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/impl/Batch.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
#include "data/cassandra/impl/Batch.hpp"
|
||||
|
||||
#include <exception>
|
||||
#include "data/cassandra/Error.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/Statement.hpp"
|
||||
#include "util/Expected.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
};
|
||||
constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
@@ -37,16 +42,16 @@ Batch::Batch(std::vector<Statement> const& statements)
|
||||
{
|
||||
cass_batch_set_is_idempotent(*this, cass_true);
|
||||
|
||||
for (auto const& statement : statements)
|
||||
for (auto const& statement : statements) {
|
||||
if (auto const res = add(statement); not res)
|
||||
throw std::runtime_error("Failed to add statement to batch: " + res.error());
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError
|
||||
Batch::add(Statement const& statement)
|
||||
{
|
||||
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK) {
|
||||
return Error{CassandraError{cass_error_desc(rc), rc}};
|
||||
}
|
||||
return {};
|
||||
|
||||
@@ -19,15 +19,16 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Batch : public ManagedObject<CassBatch>
|
||||
{
|
||||
struct Batch : public ManagedObject<CassBatch> {
|
||||
Batch(std::vector<Statement> const& statements);
|
||||
|
||||
MaybeError
|
||||
@@ -17,22 +17,24 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/SslContext.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
#include "data/cassandra/impl/Cluster.hpp"
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/SslContext.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
namespace {
|
||||
static constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
|
||||
template <class... Ts>
|
||||
struct overloadSet : Ts...
|
||||
{
|
||||
struct overloadSet : Ts... {
|
||||
using Ts::operator()...;
|
||||
};
|
||||
|
||||
@@ -48,16 +50,15 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), c
|
||||
using std::to_string;
|
||||
|
||||
cass_cluster_set_token_aware_routing(*this, cass_true);
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(
|
||||
fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc)));
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Error setting cassandra io threads to {}: {}", settings.threads, cass_error_desc(rc)));
|
||||
fmt::format("Error setting cassandra io threads to {}: {}", settings.threads, cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
|
||||
cass_log_set_level(settings.enableLog ? CASS_LOG_TRACE : CASS_LOG_DISABLED);
|
||||
@@ -65,15 +66,13 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), c
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
|
||||
rc != CASS_OK)
|
||||
{
|
||||
rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
auto const queueSize =
|
||||
settings.queueSizeIO.value_or(settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(*this, queueSize); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(*this, queueSize); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Could not set queue size for IO per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
@@ -84,6 +83,7 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), c
|
||||
LOG(log_.info()) << "Threads: " << settings.threads;
|
||||
LOG(log_.info()) << "Core connections per host: " << settings.coreConnectionsPerHost;
|
||||
LOG(log_.info()) << "IO queue size: " << queueSize;
|
||||
LOG(log_.info()) << "Batched writes auto-chunk size: " << settings.writeBatchSize;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -92,8 +92,10 @@ Cluster::setupConnection(Settings const& settings)
|
||||
std::visit(
|
||||
overloadSet{
|
||||
[this](Settings::ContactPoints const& points) { setupContactPoints(points); },
|
||||
[this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }},
|
||||
settings.connectionInfo);
|
||||
[this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }
|
||||
},
|
||||
settings.connectionInfo
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -101,9 +103,11 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string const& label, std::string const& value) {
|
||||
if (rc != CASS_OK)
|
||||
if (rc != CASS_OK) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Cassandra: Error setting {} [{}]: {}", label, value, cass_error_desc(rc)));
|
||||
fmt::format("Cassandra: Error setting {} [{}]: {}", label, value, cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
{
|
||||
@@ -112,8 +116,7 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
throwErrorIfNeeded(rc, "contact_points", points.contactPoints);
|
||||
}
|
||||
|
||||
if (points.port)
|
||||
{
|
||||
if (points.port) {
|
||||
auto const rc = cass_cluster_set_port(*this, points.port.value());
|
||||
throwErrorIfNeeded(rc, "port", to_string(points.port.value()));
|
||||
}
|
||||
@@ -123,8 +126,7 @@ void
|
||||
Cluster::setupSecureBundle(Settings::SecureConnectionBundle const& bundle)
|
||||
{
|
||||
LOG(log_.debug()) << "Attempt connection using secure bundle";
|
||||
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK) {
|
||||
throw std::runtime_error("Failed to connect using secure connection bundle " + bundle.bundle);
|
||||
}
|
||||
}
|
||||
@@ -136,7 +138,7 @@ Cluster::setupCertificate(Settings const& settings)
|
||||
return;
|
||||
|
||||
LOG(log_.debug()) << "Configure SSL context";
|
||||
SslContext context = SslContext(*settings.certificate);
|
||||
SslContext const context = SslContext(*settings.certificate);
|
||||
cass_cluster_set_ssl(*this, context);
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
@@ -38,13 +38,16 @@ namespace data::cassandra::detail {
|
||||
/**
|
||||
* @brief Bundles all cassandra settings in one place.
|
||||
*/
|
||||
struct Settings
|
||||
{
|
||||
struct Settings {
|
||||
static constexpr std::size_t DEFAULT_CONNECTION_TIMEOUT = 10000;
|
||||
static constexpr uint32_t DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t DEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
static constexpr std::size_t DEFAULT_BATCH_SIZE = 20;
|
||||
|
||||
/**
|
||||
* @brief Represents the configuration of contact points for cassandra.
|
||||
*/
|
||||
struct ContactPoints
|
||||
{
|
||||
struct ContactPoints {
|
||||
std::string contactPoints = "127.0.0.1"; // defaults to localhost
|
||||
std::optional<uint16_t> port = {};
|
||||
};
|
||||
@@ -52,8 +55,7 @@ struct Settings
|
||||
/**
|
||||
* @brief Represents the configuration of a secure connection bundle.
|
||||
*/
|
||||
struct SecureConnectionBundle
|
||||
{
|
||||
struct SecureConnectionBundle {
|
||||
std::string bundle; // no meaningful default
|
||||
};
|
||||
|
||||
@@ -61,7 +63,7 @@ struct Settings
|
||||
bool enableLog = false;
|
||||
|
||||
/** @brief Connect timeout specified in milliseconds */
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{10000};
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{DEFAULT_CONNECTION_TIMEOUT};
|
||||
|
||||
/** @brief Request timeout specified in milliseconds */
|
||||
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
|
||||
@@ -73,25 +75,28 @@ struct Settings
|
||||
uint32_t threads = std::thread::hardware_concurrency();
|
||||
|
||||
/** @brief The maximum number of outstanding write requests at any given moment */
|
||||
uint32_t maxWriteRequestsOutstanding = 10'000u;
|
||||
uint32_t maxWriteRequestsOutstanding = DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The maximum number of outstanding read requests at any given moment */
|
||||
uint32_t maxReadRequestsOutstanding = 100'000u;
|
||||
uint32_t maxReadRequestsOutstanding = DEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The number of connection per host to always have active */
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
|
||||
/** @brief Size of batches when writing */
|
||||
std::size_t writeBatchSize = DEFAULT_BATCH_SIZE;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO;
|
||||
std::optional<uint32_t> queueSizeIO{};
|
||||
|
||||
/** @brief SSL certificate */
|
||||
std::optional<std::string> certificate; // ssl context
|
||||
std::optional<std::string> certificate{}; // ssl context
|
||||
|
||||
/** @brief Username/login */
|
||||
std::optional<std::string> username;
|
||||
std::optional<std::string> username{};
|
||||
|
||||
/** @brief Password to match the `username` */
|
||||
std::optional<std::string> password;
|
||||
std::optional<std::string> password{};
|
||||
|
||||
/**
|
||||
* @brief Creates a new Settings object as a copy of the current one with overridden contact points.
|
||||
@@ -114,8 +119,7 @@ struct Settings
|
||||
}
|
||||
};
|
||||
|
||||
class Cluster : public ManagedObject<CassCluster>
|
||||
{
|
||||
class Cluster : public ManagedObject<CassCluster> {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
public:
|
||||
90
src/data/cassandra/impl/Collection.hpp
Normal file
90
src/data/cassandra/impl/Collection.hpp
Normal file
@@ -0,0 +1,90 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Collection : public ManagedObject<CassCollection> {
|
||||
static constexpr auto deleter = [](CassCollection* ptr) { cass_collection_free(ptr); };
|
||||
|
||||
static void
|
||||
throwErrorIfNeeded(CassError const rc, std::string_view const label)
|
||||
{
|
||||
if (rc == CASS_OK)
|
||||
return;
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
|
||||
public:
|
||||
/* implicit */ Collection(CassCollection* ptr);
|
||||
|
||||
template <typename Type>
|
||||
explicit Collection(std::vector<Type> const& value)
|
||||
: ManagedObject{cass_collection_new(CASS_COLLECTION_TYPE_LIST, value.size()), deleter}
|
||||
{
|
||||
bind(value);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
void
|
||||
bind(std::vector<Type> const& values) const
|
||||
{
|
||||
for (auto const& value : values)
|
||||
append(value);
|
||||
}
|
||||
|
||||
void
|
||||
append(bool const value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_bool(*this, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
|
||||
void
|
||||
append(int64_t const value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_int64(*this, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
|
||||
void
|
||||
append(ripple::uint256 const& value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_bytes(
|
||||
*this,
|
||||
static_cast<cass_byte_t const*>(static_cast<unsigned char const*>(value.data())),
|
||||
ripple::uint256::size()
|
||||
);
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
}
|
||||
};
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,22 +19,35 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/AsyncExecutor.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendCounters.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/AsyncExecutor.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Batching.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/associated_executor.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/io_service.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
@@ -46,9 +59,8 @@ namespace data::cassandra::detail {
|
||||
* Note: A lot of the code that uses yield is repeated below.
|
||||
* This is ok for now because we are hopefully going to be getting rid of it entirely later on.
|
||||
*/
|
||||
template <typename HandleType = Handle>
|
||||
class DefaultExecutionStrategy
|
||||
{
|
||||
template <typename HandleType = Handle, SomeBackendCounters BackendCountersType = BackendCounters>
|
||||
class DefaultExecutionStrategy {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
std::uint32_t maxWriteRequestsOutstanding_;
|
||||
@@ -57,6 +69,8 @@ class DefaultExecutionStrategy
|
||||
std::uint32_t maxReadRequestsOutstanding_;
|
||||
std::atomic_uint32_t numReadRequestsOutstanding_ = 0;
|
||||
|
||||
std::size_t writeBatchSize_;
|
||||
|
||||
std::mutex throttleMutex_;
|
||||
std::condition_variable throttleCv_;
|
||||
|
||||
@@ -69,6 +83,8 @@ class DefaultExecutionStrategy
|
||||
std::reference_wrapper<HandleType const> handle_;
|
||||
std::thread thread_;
|
||||
|
||||
typename BackendCountersType::PtrType counters_;
|
||||
|
||||
public:
|
||||
using ResultOrErrorType = typename HandleType::ResultOrErrorType;
|
||||
using StatementType = typename HandleType::StatementType;
|
||||
@@ -82,12 +98,18 @@ public:
|
||||
* @param settings The settings to use
|
||||
* @param handle A handle to the cassandra database
|
||||
*/
|
||||
DefaultExecutionStrategy(Settings const& settings, HandleType const& handle)
|
||||
DefaultExecutionStrategy(
|
||||
Settings const& settings,
|
||||
HandleType const& handle,
|
||||
typename BackendCountersType::PtrType counters = BackendCountersType::make()
|
||||
)
|
||||
: maxWriteRequestsOutstanding_{settings.maxWriteRequestsOutstanding}
|
||||
, maxReadRequestsOutstanding_{settings.maxReadRequestsOutstanding}
|
||||
, writeBatchSize_{settings.writeBatchSize}
|
||||
, work_{ioc_}
|
||||
, handle_{std::cref(handle)}
|
||||
, thread_{[this]() { ioc_.run(); }}
|
||||
, counters_{std::move(counters)}
|
||||
{
|
||||
LOG(log_.info()) << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
|
||||
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
|
||||
@@ -118,7 +140,10 @@ public:
|
||||
bool
|
||||
isTooBusy() const
|
||||
{
|
||||
return numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
bool const result = numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
if (result)
|
||||
counters_->registerTooBusy();
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -129,17 +154,17 @@ public:
|
||||
ResultOrErrorType
|
||||
writeSync(StatementType const& statement)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
if (auto res = handle_.get().execute(statement); res)
|
||||
{
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
while (true) {
|
||||
auto res = handle_.get().execute(statement);
|
||||
if (res) {
|
||||
counters_->registerWriteSync(startTime);
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
|
||||
counters_->registerWriteSyncRetry();
|
||||
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,12 +193,24 @@ public:
|
||||
void
|
||||
write(PreparedStatementType const& preparedStatement, Args&&... args)
|
||||
{
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statement)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statement), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
ioc_,
|
||||
handle_,
|
||||
std::move(statement),
|
||||
[this, startTime](auto const&) {
|
||||
decrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteFinished(startTime);
|
||||
},
|
||||
[this]() { counters_->registerWriteRetry(); }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -190,11 +227,28 @@ public:
|
||||
if (statements.empty())
|
||||
return;
|
||||
|
||||
incrementOutstandingRequestCount();
|
||||
util::forEachBatch(std::move(statements), writeBatchSize_, [this](auto begin, auto end) {
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
auto chunk = std::vector<StatementType>{};
|
||||
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statements)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statements), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
chunk.reserve(std::distance(begin, end));
|
||||
std::move(begin, end, std::back_inserter(chunk));
|
||||
|
||||
incrementOutstandingRequestCount();
|
||||
counters_->registerWriteStarted();
|
||||
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(chunk)>, HandleType>::run(
|
||||
ioc_,
|
||||
handle_,
|
||||
std::move(chunk),
|
||||
[this, startTime](auto const&) {
|
||||
decrementOutstandingRequestCount();
|
||||
counters_->registerWriteFinished(startTime);
|
||||
},
|
||||
[this]() { counters_->registerWriteRetry(); }
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -228,12 +282,14 @@ public:
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
auto const numStatements = statements.size();
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
counters_->registerReadStarted(numStatements);
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
numReadRequestsOutstanding_ += numStatements;
|
||||
|
||||
auto init = [this, &statements, &future]<typename Self>(Self& self) {
|
||||
@@ -242,23 +298,29 @@ public:
|
||||
future.emplace(handle_.get().asyncExecute(statements, [sself](auto&& res) mutable {
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself),
|
||||
[sself, res = std::move(res)]() mutable { sself->complete(std::move(res)); });
|
||||
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); }
|
||||
);
|
||||
}));
|
||||
};
|
||||
|
||||
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
numReadRequestsOutstanding_ -= numStatements;
|
||||
|
||||
if (res)
|
||||
{
|
||||
if (res) {
|
||||
counters_->registerReadFinished(startTime, numStatements);
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
|
||||
|
||||
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
|
||||
try {
|
||||
throwErrorIfNeeded(res.error());
|
||||
} catch (...) {
|
||||
counters_->registerReadError(numStatements);
|
||||
throw;
|
||||
}
|
||||
counters_->registerReadRetry(numStatements);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,11 +337,13 @@ public:
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, StatementType const& statement)
|
||||
{
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
counters_->registerReadStarted();
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
++numReadRequestsOutstanding_;
|
||||
auto init = [this, &statement, &future]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
@@ -287,23 +351,29 @@ public:
|
||||
future.emplace(handle_.get().asyncExecute(statement, [sself](auto&& res) mutable {
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself),
|
||||
[sself, res = std::move(res)]() mutable { sself->complete(std::move(res)); });
|
||||
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); }
|
||||
);
|
||||
}));
|
||||
};
|
||||
|
||||
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
--numReadRequestsOutstanding_;
|
||||
|
||||
if (res)
|
||||
{
|
||||
if (res) {
|
||||
counters_->registerReadFinished(startTime);
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
|
||||
|
||||
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
|
||||
try {
|
||||
throwErrorIfNeeded(res.error());
|
||||
} catch (...) {
|
||||
counters_->registerReadError();
|
||||
throw;
|
||||
}
|
||||
counters_->registerReadRetry();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,23 +391,28 @@ public:
|
||||
std::vector<ResultType>
|
||||
readEach(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
std::atomic_bool hadError = false;
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
std::atomic_uint64_t errorsCount = 0u;
|
||||
std::atomic_int numOutstanding = statements.size();
|
||||
numReadRequestsOutstanding_ += statements.size();
|
||||
|
||||
auto futures = std::vector<FutureWithCallbackType>{};
|
||||
futures.reserve(numOutstanding);
|
||||
counters_->registerReadStarted(statements.size());
|
||||
|
||||
auto init = [this, &statements, &futures, &hadError, &numOutstanding]<typename Self>(Self& self) {
|
||||
auto init = [this, &statements, &futures, &errorsCount, &numOutstanding]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
auto executionHandler = [&hadError, &numOutstanding, sself](auto const& res) mutable {
|
||||
auto executionHandler = [&errorsCount, &numOutstanding, sself](auto const& res) mutable {
|
||||
if (not res)
|
||||
hadError = true;
|
||||
++errorsCount;
|
||||
|
||||
// when all async operations complete unblock the result
|
||||
if (--numOutstanding == 0)
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself), [sself]() mutable { sself->complete(); });
|
||||
if (--numOutstanding == 0) {
|
||||
boost::asio::post(boost::asio::get_associated_executor(*sself), [sself]() mutable {
|
||||
sself->complete();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
std::transform(
|
||||
@@ -346,15 +421,22 @@ public:
|
||||
std::back_inserter(futures),
|
||||
[this, &executionHandler](auto const& statement) {
|
||||
return handle_.get().asyncExecute(statement, executionHandler);
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
boost::asio::async_compose<CompletionTokenType, void()>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
numReadRequestsOutstanding_ -= statements.size();
|
||||
|
||||
if (hadError)
|
||||
if (errorsCount > 0) {
|
||||
ASSERT(errorsCount <= statements.size(), "Errors number cannot exceed statements number");
|
||||
counters_->registerReadError(errorsCount);
|
||||
counters_->registerReadFinished(startTime, statements.size() - errorsCount);
|
||||
throw DatabaseTimeout{};
|
||||
}
|
||||
counters_->registerReadFinished(startTime, statements.size());
|
||||
|
||||
std::vector<ResultType> results;
|
||||
results.reserve(futures.size());
|
||||
@@ -368,21 +450,40 @@ public:
|
||||
auto entry = future.get();
|
||||
auto&& res = entry.value();
|
||||
return std::move(res);
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
assert(futures.size() == statements.size());
|
||||
assert(results.size() == statements.size());
|
||||
ASSERT(
|
||||
futures.size() == statements.size(),
|
||||
"Futures size must be equal to statements size. Got {} and {}",
|
||||
futures.size(),
|
||||
statements.size()
|
||||
);
|
||||
ASSERT(
|
||||
results.size() == statements.size(),
|
||||
"Results size must be equal to statements size. Got {} and {}",
|
||||
results.size(),
|
||||
statements.size()
|
||||
);
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get statistics about the backend.
|
||||
*/
|
||||
boost::json::object
|
||||
stats() const
|
||||
{
|
||||
return counters_->report();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
incrementOutstandingRequestCount()
|
||||
{
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(throttleMutex_);
|
||||
if (!canAddWriteRequest())
|
||||
{
|
||||
if (!canAddWriteRequest()) {
|
||||
LOG(log_.trace()) << "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
throttleCv_.wait(lck, [this]() { return canAddWriteRequest(); });
|
||||
@@ -395,23 +496,18 @@ private:
|
||||
decrementOutstandingRequestCount()
|
||||
{
|
||||
// sanity check
|
||||
if (numWriteRequestsOutstanding_ == 0)
|
||||
{
|
||||
assert(false);
|
||||
throw std::runtime_error("decrementing num outstanding below 0");
|
||||
}
|
||||
size_t cur = (--numWriteRequestsOutstanding_);
|
||||
ASSERT(numWriteRequestsOutstanding_ > 0, "Decrementing num outstanding below 0");
|
||||
size_t const cur = (--numWriteRequestsOutstanding_);
|
||||
{
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
std::lock_guard lck(throttleMutex_);
|
||||
std::lock_guard const lck(throttleMutex_);
|
||||
throttleCv_.notify_one();
|
||||
}
|
||||
if (cur == 0)
|
||||
{
|
||||
if (cur == 0) {
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
std::lock_guard lck(syncMutex_);
|
||||
std::lock_guard const lck(syncMutex_);
|
||||
syncCv_.notify_one();
|
||||
}
|
||||
}
|
||||
@@ -17,15 +17,22 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/impl/Future.h>
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
#include "data/cassandra/impl/Future.hpp"
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
#include "data/cassandra/Error.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/Result.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace {
|
||||
static constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
@@ -37,11 +44,10 @@ namespace data::cassandra::detail {
|
||||
MaybeError
|
||||
Future::await() const
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc)
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc) {
|
||||
auto errMsg = [this](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(*this, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}(cass_error_desc(rc));
|
||||
@@ -53,20 +59,17 @@ Future::await() const
|
||||
ResultOrError
|
||||
Future::get() const
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc)
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc) {
|
||||
auto const errMsg = [this](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(*this, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("future::get()");
|
||||
return Error{CassandraError{errMsg, rc}};
|
||||
}
|
||||
else
|
||||
{
|
||||
return Result{cass_future_get_result(*this)};
|
||||
}
|
||||
|
||||
return Result{cass_future_get_result(*this)};
|
||||
}
|
||||
|
||||
void
|
||||
@@ -77,18 +80,15 @@ invokeHelper(CassFuture* ptr, void* cbPtr)
|
||||
// stackoverflow.com/questions/77004137/boost-asio-async-compose-gets-stuck-under-load
|
||||
auto* cb = static_cast<FutureWithCallback::FnType*>(cbPtr);
|
||||
auto local = std::make_unique<FutureWithCallback::FnType>(std::move(*cb));
|
||||
if (auto const rc = cass_future_error_code(ptr); rc)
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(ptr); rc) {
|
||||
auto const errMsg = [&ptr](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(ptr, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("invokeHelper");
|
||||
(*local)(Error{CassandraError{errMsg, rc}});
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
(*local)(Result{cass_future_get_result(ptr)});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,15 +19,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Future : public ManagedObject<CassFuture>
|
||||
{
|
||||
struct Future : public ManagedObject<CassFuture> {
|
||||
/* implicit */ Future(CassFuture* ptr);
|
||||
|
||||
MaybeError
|
||||
@@ -38,10 +40,9 @@ struct Future : public ManagedObject<CassFuture>
|
||||
};
|
||||
|
||||
void
|
||||
invokeHelper(CassFuture* ptr, void* self);
|
||||
invokeHelper(CassFuture* ptr, void* cbPtr);
|
||||
|
||||
class FutureWithCallback : public Future
|
||||
{
|
||||
class FutureWithCallback : public Future {
|
||||
public:
|
||||
using FnType = std::function<void(ResultOrError)>;
|
||||
using FnPtrType = std::unique_ptr<FnType>;
|
||||
@@ -20,12 +20,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
template <typename Managed>
|
||||
class ManagedObject
|
||||
{
|
||||
class ManagedObject {
|
||||
protected:
|
||||
std::unique_ptr<Managed, void (*)(Managed*)> ptr_;
|
||||
|
||||
@@ -36,7 +36,6 @@ public:
|
||||
if (rawPtr == nullptr)
|
||||
throw std::runtime_error("Could not create DB object - got nullptr");
|
||||
}
|
||||
ManagedObject(ManagedObject&&) = default;
|
||||
|
||||
operator Managed*() const
|
||||
{
|
||||
@@ -17,11 +17,17 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
#include "data/cassandra/impl/Result.hpp"
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
namespace {
|
||||
static constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
static constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
@@ -43,7 +49,7 @@ Result::hasRows() const
|
||||
}
|
||||
|
||||
/* implicit */ ResultIterator::ResultIterator(CassIterator* ptr)
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr)}
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr) != 0u}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -56,7 +62,7 @@ ResultIterator::fromResult(Result const& result)
|
||||
[[maybe_unused]] bool
|
||||
ResultIterator::moveForward()
|
||||
{
|
||||
hasMore_ = cass_iterator_next(*this);
|
||||
hasMore_ = (cass_iterator_next(*this) != 0u);
|
||||
return hasMore_;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,17 +19,25 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/Tuple.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <compare>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
@@ -44,8 +52,7 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
Type output;
|
||||
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
@@ -55,60 +62,46 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
using UintTupleType = std::tuple<uint32_t, uint32_t>;
|
||||
using UCharVectorType = std::vector<unsigned char>;
|
||||
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract ripple::uint256");
|
||||
output = ripple::uint256::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract ripple::AccountID");
|
||||
output = ripple::AccountID::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, UCharVectorType>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
} else if constexpr (std::is_same_v<DecayedType, UCharVectorType>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract vector<unsigned char>");
|
||||
output = UCharVectorType{buf, buf + bufSize};
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, UintTupleType>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType>) {
|
||||
auto const* tuple = cass_row_get_column(row, idx);
|
||||
output = TupleIterator::fromTuple(tuple).extract<uint32_t, uint32_t>();
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<DecayedType, std::string>)
|
||||
{
|
||||
char const* value;
|
||||
std::size_t len;
|
||||
} else if constexpr (std::is_convertible_v<DecayedType, std::string>) {
|
||||
char const* value = nullptr;
|
||||
std::size_t len = 0;
|
||||
auto const rc = cass_value_get_string(cass_row_get_column(row, idx), &value, &len);
|
||||
throwErrorIfNeeded(rc, "Extract string");
|
||||
output = std::string{value, len};
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
cass_bool_t flag;
|
||||
} else if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
cass_bool_t flag = cass_bool_t::cass_false;
|
||||
auto const rc = cass_value_get_bool(cass_row_get_column(row, idx), &flag);
|
||||
throwErrorIfNeeded(rc, "Extract bool");
|
||||
output = flag ? true : false;
|
||||
output = flag != cass_bool_t::cass_false;
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
int64_t out = 0;
|
||||
auto const rc = cass_value_get_int64(cass_row_get_column(row, idx), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64");
|
||||
output = static_cast<DecayedType>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
@@ -116,8 +109,7 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
return output;
|
||||
}
|
||||
|
||||
struct Result : public ManagedObject<CassResult const>
|
||||
{
|
||||
struct Result : public ManagedObject<CassResult const> {
|
||||
/* implicit */ Result(CassResult const* ptr);
|
||||
|
||||
[[nodiscard]] std::size_t
|
||||
@@ -128,7 +120,8 @@ struct Result : public ManagedObject<CassResult const>
|
||||
|
||||
template <typename... RowTypes>
|
||||
std::optional<std::tuple<RowTypes...>>
|
||||
get() const requires(std::tuple_size<std::tuple<RowTypes...>>{} > 1)
|
||||
get() const
|
||||
requires(std::tuple_size<std::tuple<RowTypes...>>{} > 1)
|
||||
{
|
||||
// row managed internally by cassandra driver, hence no ManagedObject.
|
||||
auto const* row = cass_result_first_row(*this);
|
||||
@@ -153,8 +146,7 @@ struct Result : public ManagedObject<CassResult const>
|
||||
}
|
||||
};
|
||||
|
||||
class ResultIterator : public ManagedObject<CassIterator>
|
||||
{
|
||||
class ResultIterator : public ManagedObject<CassIterator> {
|
||||
bool hasMore_ = false;
|
||||
|
||||
public:
|
||||
@@ -185,17 +177,13 @@ public:
|
||||
};
|
||||
|
||||
template <typename... Types>
|
||||
class ResultExtractor
|
||||
{
|
||||
class ResultExtractor {
|
||||
std::reference_wrapper<Result const> ref_;
|
||||
|
||||
public:
|
||||
struct Sentinel
|
||||
{
|
||||
};
|
||||
struct Sentinel {};
|
||||
|
||||
struct Iterator
|
||||
{
|
||||
struct Iterator {
|
||||
using iterator_category = std::input_iterator_tag;
|
||||
using difference_type = std::size_t; // rows count
|
||||
using value_type = std::tuple<Types...>;
|
||||
@@ -19,10 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "util/Expected.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
@@ -35,8 +35,7 @@ namespace data::cassandra::detail {
|
||||
/**
|
||||
* @brief A retry policy that employs exponential backoff
|
||||
*/
|
||||
class ExponentialBackoffRetryPolicy
|
||||
{
|
||||
class ExponentialBackoffRetryPolicy {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
boost::asio::steady_timer timer_;
|
||||
@@ -75,7 +74,7 @@ public:
|
||||
retry(Fn&& fn)
|
||||
{
|
||||
timer_.expires_after(calculateDelay(attempt_++));
|
||||
timer_.async_wait([fn = std::forward<Fn>(fn)]([[maybe_unused]] const auto& err) {
|
||||
timer_.async_wait([fn = std::forward<Fn>(fn)]([[maybe_unused]] auto const& err) {
|
||||
// todo: deal with cancellation (thru err)
|
||||
fn();
|
||||
});
|
||||
@@ -84,7 +83,7 @@ public:
|
||||
/**
|
||||
* @brief Calculates the wait time before attempting another retry
|
||||
*/
|
||||
std::chrono::milliseconds
|
||||
static std::chrono::milliseconds
|
||||
calculateDelay(uint32_t attempt)
|
||||
{
|
||||
return std::chrono::milliseconds{lround(std::pow(2, std::min(10u, attempt)))};
|
||||
@@ -19,14 +19,13 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Session : public ManagedObject<CassSession>
|
||||
{
|
||||
class Session : public ManagedObject<CassSession> {
|
||||
static constexpr auto deleter = [](CassSession* ptr) { cass_session_free(ptr); };
|
||||
|
||||
public:
|
||||
@@ -17,10 +17,17 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/cassandra/impl/SslContext.h>
|
||||
#include "data/cassandra/impl/SslContext.hpp"
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
namespace {
|
||||
static constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
@@ -28,8 +35,7 @@ namespace data::cassandra::detail {
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), contextDeleter}
|
||||
{
|
||||
cass_ssl_set_verify_flags(*this, CASS_SSL_VERIFY_NONE);
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK) {
|
||||
throw std::runtime_error(std::string{"Error setting Cassandra SSL Context: "} + cass_error_desc(rc));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
@@ -27,8 +27,7 @@
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct SslContext : public ManagedObject<CassSsl>
|
||||
{
|
||||
struct SslContext : public ManagedObject<CassSsl> {
|
||||
explicit SslContext(std::string const& certificate);
|
||||
};
|
||||
|
||||
@@ -19,24 +19,29 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/Collection.hpp"
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/Tuple.hpp"
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <compare>
|
||||
#include <iterator>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Statement : public ManagedObject<CassStatement>
|
||||
{
|
||||
class Statement : public ManagedObject<CassStatement> {
|
||||
static constexpr auto deleter = [](CassStatement* ptr) { cass_statement_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
@@ -64,8 +69,6 @@ public:
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
}
|
||||
|
||||
Statement(Statement&&) = default;
|
||||
|
||||
/**
|
||||
* @brief Binds the given arguments to the statement.
|
||||
*
|
||||
@@ -75,7 +78,7 @@ public:
|
||||
void
|
||||
bind(Args&&... args) const
|
||||
{
|
||||
std::size_t idx = 0;
|
||||
std::size_t idx = 0; // NOLINT(misc-const-correctness)
|
||||
(this->bindAt<Args>(idx++, std::forward<Args>(args)), ...);
|
||||
}
|
||||
|
||||
@@ -102,51 +105,40 @@ public:
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
using UCharVectorType = std::vector<unsigned char>;
|
||||
using UintTupleType = std::tuple<uint32_t, uint32_t>;
|
||||
using UintByteTupleType = std::tuple<uint32_t, ripple::uint256>;
|
||||
using ByteVectorType = std::vector<ripple::uint256>;
|
||||
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>)
|
||||
{
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::AccountID");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, UCharVectorType>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, UCharVectorType>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind vector<unsigned char>");
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<DecayedType, std::string>)
|
||||
{
|
||||
} else if constexpr (std::is_convertible_v<DecayedType, std::string>) {
|
||||
// reinterpret_cast is needed here :'(
|
||||
auto const rc = bindBytes(reinterpret_cast<unsigned char const*>(value.data()), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as bytes)");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, UintTupleType>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::move(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32>");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType> || std::is_same_v<DecayedType, UintByteTupleType>) {
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::forward<Type>(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32> or <uint32_t, ripple::uint256>");
|
||||
} else if constexpr (std::is_same_v<DecayedType, ByteVectorType>) {
|
||||
auto const rc = cass_statement_bind_collection(*this, idx, Collection{std::forward<Type>(value)});
|
||||
throwErrorIfNeeded(rc, "Bind collection");
|
||||
} else if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
auto const rc = cass_statement_bind_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, Limit>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, Limit>) {
|
||||
auto const rc = cass_statement_bind_int32(*this, idx, value.limit);
|
||||
throwErrorIfNeeded(rc, "Bind limit (int32)");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
auto const rc = cass_statement_bind_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
@@ -158,8 +150,7 @@ public:
|
||||
*
|
||||
* This is used to produce Statement objects that can be executed.
|
||||
*/
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const>
|
||||
{
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const> {
|
||||
static constexpr auto deleter = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
|
||||
public:
|
||||
@@ -17,11 +17,15 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
#include "data/cassandra/impl/Tuple.hpp"
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
static constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
@@ -19,19 +19,23 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Tuple : public ManagedObject<CassTuple>
|
||||
{
|
||||
class Tuple : public ManagedObject<CassTuple> {
|
||||
static constexpr auto deleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
@@ -61,8 +65,7 @@ public:
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [idx](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + " at idx " + to_string(idx) + ": " + cass_error_desc(rc));
|
||||
}
|
||||
@@ -70,27 +73,30 @@ public:
|
||||
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
|
||||
if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
auto const rc = cass_tuple_set_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
auto const rc = cass_tuple_set_int64(*this, idx, value);
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
auto const rc = cass_tuple_set_int64(*this, idx, std::forward<Type>(value));
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
else
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
auto const rc = cass_tuple_set_bytes(
|
||||
*this,
|
||||
idx,
|
||||
static_cast<cass_byte_t const*>(static_cast<unsigned char const*>(value.data())),
|
||||
value.size()
|
||||
);
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
} else {
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class TupleIterator : public ManagedObject<CassIterator>
|
||||
{
|
||||
class TupleIterator : public ManagedObject<CassIterator> {
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
@@ -119,8 +125,7 @@ private:
|
||||
throw std::logic_error("Could not extract next value from tuple iterator");
|
||||
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
@@ -129,15 +134,12 @@ private:
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
int64_t out = 0;
|
||||
auto const rc = cass_value_get_int64(cass_iterator_get_value(*this), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64 from tuple");
|
||||
output = static_cast<DecayedType>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
@@ -20,12 +20,19 @@
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <queue>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
/**
|
||||
@@ -36,8 +43,7 @@ namespace etl {
|
||||
* Any later calls to methods of this datastructure will not wait. Once the datastructure is stopped, the datastructure
|
||||
* remains stopped for the rest of its lifetime.
|
||||
*/
|
||||
class NetworkValidatedLedgers
|
||||
{
|
||||
class NetworkValidatedLedgers {
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
|
||||
@@ -62,7 +68,7 @@ public:
|
||||
void
|
||||
push(uint32_t idx)
|
||||
{
|
||||
std::lock_guard lck(m_);
|
||||
std::lock_guard const lck(m_);
|
||||
if (!max_ || idx > *max_)
|
||||
max_ = idx;
|
||||
cv_.notify_all();
|
||||
@@ -95,10 +101,11 @@ public:
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
|
||||
if (maxWaitMs)
|
||||
if (maxWaitMs) {
|
||||
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
|
||||
else
|
||||
} else {
|
||||
cv_.wait(lck, pred);
|
||||
}
|
||||
return pred();
|
||||
}
|
||||
};
|
||||
@@ -111,8 +118,7 @@ public:
|
||||
* added or removed from the queue. These waits are blocking calls.
|
||||
*/
|
||||
template <class T>
|
||||
class ThreadSafeQueue
|
||||
{
|
||||
class ThreadSafeQueue {
|
||||
std::queue<T> queue_;
|
||||
|
||||
mutable std::mutex m_;
|
||||
@@ -190,7 +196,7 @@ public:
|
||||
std::optional<T>
|
||||
tryPop()
|
||||
{
|
||||
std::scoped_lock lck(m_);
|
||||
std::scoped_lock const lck(m_);
|
||||
if (queue_.empty())
|
||||
return {};
|
||||
|
||||
@@ -210,18 +216,17 @@ public:
|
||||
inline std::vector<ripple::uint256>
|
||||
getMarkers(size_t numMarkers)
|
||||
{
|
||||
assert(numMarkers <= 256);
|
||||
ASSERT(numMarkers <= 256, "Number of markers must be <= 256. Got: {}", numMarkers);
|
||||
|
||||
unsigned char incr = 256 / numMarkers;
|
||||
unsigned char const incr = 256 / numMarkers;
|
||||
|
||||
std::vector<ripple::uint256> markers;
|
||||
markers.reserve(numMarkers);
|
||||
ripple::uint256 base{0};
|
||||
for (size_t i = 0; i < numMarkers; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < numMarkers; ++i) {
|
||||
markers.push_back(base);
|
||||
base.data()[0] += incr;
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
} // namespace etl
|
||||
} // namespace etl
|
||||
@@ -17,10 +17,28 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ETLService.h>
|
||||
#include "etl/ETLService.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
// Database must be populated when this starts
|
||||
std::optional<uint32_t>
|
||||
@@ -29,23 +47,31 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
if (finishSequence_ && startSequence > *finishSequence_)
|
||||
return {};
|
||||
|
||||
LOG(log_.debug()) << "Wait for cache containing seq " << startSequence - 1
|
||||
<< " current cache last seq =" << backend_->cache().latestLedgerSequence();
|
||||
backend_->cache().waitUntilCacheContainsSeq(startSequence - 1);
|
||||
|
||||
LOG(log_.debug()) << "Starting etl pipeline";
|
||||
state_.isWriting = true;
|
||||
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng || rng->maxSequence < startSequence - 1)
|
||||
{
|
||||
assert(false);
|
||||
throw std::runtime_error("runETLPipeline: parent ledger is null");
|
||||
}
|
||||
auto const rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
ASSERT(rng.has_value(), "Parent ledger range can't be null");
|
||||
ASSERT(
|
||||
rng->maxSequence >= startSequence - 1,
|
||||
"Got not parent ledger. rnd->maxSequence = {}, startSequence = {}",
|
||||
rng->maxSequence,
|
||||
startSequence
|
||||
);
|
||||
|
||||
auto const begin = std::chrono::system_clock::now();
|
||||
auto extractors = std::vector<std::unique_ptr<ExtractorType>>{};
|
||||
auto pipe = DataPipeType{numExtractors, startSequence};
|
||||
|
||||
for (auto i = 0u; i < numExtractors; ++i)
|
||||
for (auto i = 0u; i < numExtractors; ++i) {
|
||||
extractors.push_back(std::make_unique<ExtractorType>(
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_));
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_
|
||||
));
|
||||
}
|
||||
|
||||
auto transformer =
|
||||
TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, startSequence, state_};
|
||||
@@ -58,8 +84,9 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
|
||||
static constexpr auto NANOSECONDS_PER_SECOND = 1'000'000'000.0;
|
||||
LOG(log_.debug()) << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / 1000000000.0;
|
||||
<< ((end - begin).count()) / NANOSECONDS_PER_SECOND;
|
||||
|
||||
state_.isWriting = false;
|
||||
|
||||
@@ -79,54 +106,40 @@ void
|
||||
ETLService::monitor()
|
||||
{
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng)
|
||||
{
|
||||
if (!rng) {
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
std::optional<ripple::LedgerHeader> ledger;
|
||||
|
||||
try
|
||||
{
|
||||
if (startSequence_)
|
||||
{
|
||||
try {
|
||||
if (startSequence_) {
|
||||
LOG(log_.info()) << "ledger sequence specified in config. "
|
||||
<< "Will begin ETL process starting with ledger " << *startSequence_;
|
||||
ledger = ledgerLoader_.loadInitialLedger(*startSequence_);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> mostRecentValidated = networkValidatedLedgers_->getMostRecent();
|
||||
|
||||
if (mostRecentValidated)
|
||||
{
|
||||
if (mostRecentValidated) {
|
||||
LOG(log_.info()) << "Ledger " << *mostRecentValidated << " has been validated. Downloading...";
|
||||
ledger = ledgerLoader_.loadInitialLedger(*mostRecentValidated);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. Exiting monitor loop";
|
||||
} else {
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. "
|
||||
"Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
|
||||
return amendmentBlockHandler_.onAmendmentBlock();
|
||||
}
|
||||
|
||||
if (ledger)
|
||||
{
|
||||
if (ledger) {
|
||||
rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Failed to load initial ledger. Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
if (startSequence_)
|
||||
LOG(log_.warn()) << "start sequence specified but db is already populated";
|
||||
|
||||
@@ -134,14 +147,13 @@ ETLService::monitor()
|
||||
cacheLoader_.load(rng->maxSequence);
|
||||
}
|
||||
|
||||
assert(rng);
|
||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||
uint32_t nextSequence = rng->maxSequence + 1;
|
||||
|
||||
LOG(log_.debug()) << "Database is populated. "
|
||||
<< "Starting monitor loop. sequence = " << nextSequence;
|
||||
|
||||
while (true)
|
||||
{
|
||||
while (not isStopping()) {
|
||||
nextSequence = publishNextSequence(nextSequence);
|
||||
}
|
||||
}
|
||||
@@ -149,13 +161,10 @@ ETLService::monitor()
|
||||
uint32_t
|
||||
ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence) {
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
}
|
||||
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
|
||||
{
|
||||
} else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::MILLISECONDS_PER_SECOND)) {
|
||||
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
@@ -166,10 +175,9 @@ ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
bool const success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
if (!success) {
|
||||
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
|
||||
// returns the most recent sequence published empty optional if no sequence was published
|
||||
@@ -179,9 +187,7 @@ ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
++nextSequence;
|
||||
}
|
||||
}
|
||||
@@ -193,24 +199,20 @@ ETLService::monitorReadOnly()
|
||||
{
|
||||
LOG(log_.debug()) << "Starting reporting in strict read only mode";
|
||||
|
||||
const auto latestSequenceOpt = [this]() -> std::optional<uint32_t> {
|
||||
auto const latestSequenceOpt = [this]() -> std::optional<uint32_t> {
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
|
||||
if (!rng)
|
||||
{
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent())
|
||||
if (!rng) {
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent()) {
|
||||
return *net;
|
||||
else
|
||||
return std::nullopt;
|
||||
}
|
||||
else
|
||||
{
|
||||
return rng->maxSequence;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return rng->maxSequence;
|
||||
}();
|
||||
|
||||
if (!latestSequenceOpt.has_value())
|
||||
{
|
||||
if (!latestSequenceOpt.has_value()) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -219,18 +221,15 @@ ETLService::monitorReadOnly()
|
||||
cacheLoader_.load(latestSequence);
|
||||
latestSequence++;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence)
|
||||
{
|
||||
while (not isStopping()) {
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence) {
|
||||
ledgerPublisher_.publish(latestSequence, {});
|
||||
latestSequence = latestSequence + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs first.
|
||||
// Even if we don't hear from rippled, if ledgers are being written to the db, we publish them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, 1000);
|
||||
} else {
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs
|
||||
// first. Even if we don't hear from rippled, if ledgers are being written to the db, we publish
|
||||
// them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::MILLISECONDS_PER_SECOND);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -250,10 +249,11 @@ ETLService::doWork()
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("ETLService worker");
|
||||
|
||||
if (state_.isReadOnly)
|
||||
if (state_.isReadOnly) {
|
||||
monitorReadOnly();
|
||||
else
|
||||
} else {
|
||||
monitor();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -263,14 +263,15 @@ ETLService::ETLService(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
)
|
||||
: backend_(backend)
|
||||
, loadBalancer_(balancer)
|
||||
, networkValidatedLedgers_(ledgers)
|
||||
, networkValidatedLedgers_(std::move(ledgers))
|
||||
, cacheLoader_(config, ioc, backend, backend->cache())
|
||||
, ledgerFetcher_(backend, balancer)
|
||||
, ledgerLoader_(backend, balancer, ledgerFetcher_, state_)
|
||||
, ledgerPublisher_(ioc, backend, subscriptions, state_)
|
||||
, ledgerPublisher_(ioc, backend, backend->cache(), subscriptions, state_)
|
||||
, amendmentBlockHandler_(ioc, state_)
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
|
||||
@@ -19,34 +19,44 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/LedgerCache.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/Source.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/AmendmentBlock.h>
|
||||
#include <etl/impl/CacheLoader.h>
|
||||
#include <etl/impl/ExtractionDataPipe.h>
|
||||
#include <etl/impl/Extractor.h>
|
||||
#include <etl/impl/LedgerFetcher.h>
|
||||
#include <etl/impl/LedgerLoader.h>
|
||||
#include <etl/impl/LedgerPublisher.h>
|
||||
#include <etl/impl/Transformer.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etl/impl/AmendmentBlock.hpp"
|
||||
#include "etl/impl/CacheLoader.hpp"
|
||||
#include "etl/impl/ExtractionDataPipe.hpp"
|
||||
#include "etl/impl/Extractor.hpp"
|
||||
#include "etl/impl/LedgerFetcher.hpp"
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etl/impl/LedgerPublisher.hpp"
|
||||
#include "etl/impl/Transformer.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
struct AccountTransactionsData;
|
||||
struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
}
|
||||
} // namespace feed
|
||||
|
||||
/**
|
||||
* @brief This namespace contains everything to do with the ETL and ETL sources.
|
||||
@@ -66,18 +76,18 @@ namespace etl {
|
||||
* the others will fall back to monitoring/publishing. In this sense, this class dynamically transitions from monitoring
|
||||
* to writing and from writing to monitoring, based on the activity of other processes running on different machines.
|
||||
*/
|
||||
class ETLService
|
||||
{
|
||||
class ETLService {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using SubscriptionManagerType = feed::SubscriptionManager;
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using NetworkValidatedLedgersType = NetworkValidatedLedgers;
|
||||
using DataPipeType = etl::detail::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheLoaderType = etl::detail::CacheLoader<data::LedgerCache>;
|
||||
using CacheType = data::LedgerCache;
|
||||
using CacheLoaderType = etl::detail::CacheLoader<CacheType>;
|
||||
using LedgerFetcherType = etl::detail::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = etl::detail::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::detail::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::detail::LedgerPublisher<SubscriptionManagerType>;
|
||||
using LedgerPublisherType = etl::detail::LedgerPublisher<SubscriptionManagerType, CacheType>;
|
||||
using AmendmentBlockHandlerType = etl::detail::AmendmentBlockHandler<>;
|
||||
using TransformerType =
|
||||
etl::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
@@ -121,7 +131,8 @@ public:
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers);
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief A factory function to spawn new ETLService instances.
|
||||
@@ -142,7 +153,8 @@ public:
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
)
|
||||
{
|
||||
auto etl = std::make_shared<ETLService>(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
etl->run();
|
||||
@@ -204,6 +216,16 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the etl nodes' state
|
||||
* @return the etl nodes' state, nullopt if etl nodes are not connected
|
||||
*/
|
||||
std::optional<etl::ETLState>
|
||||
getETLState() const noexcept
|
||||
{
|
||||
return loadBalancer_->getETLState();
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Run the ETL pipeline.
|
||||
@@ -252,7 +274,7 @@ private:
|
||||
* @return true if stopping; false otherwise
|
||||
*/
|
||||
bool
|
||||
isStopping()
|
||||
isStopping() const
|
||||
{
|
||||
return state_.isStopping;
|
||||
}
|
||||
@@ -265,7 +287,7 @@ private:
|
||||
* @return the number of markers
|
||||
*/
|
||||
std::uint32_t
|
||||
getNumMarkers()
|
||||
getNumMarkers() const
|
||||
{
|
||||
return numMarkers_;
|
||||
}
|
||||
50
src/etl/ETLState.cpp
Normal file
50
src/etl/ETLState.cpp
Normal file
@@ -0,0 +1,50 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "etl/ETLState.hpp"
|
||||
|
||||
#include "rpc/JS.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <ripple/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace etl {
|
||||
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv)
|
||||
{
|
||||
ETLState state;
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
if (!jsonObject.contains(JS(error))) {
|
||||
if (jsonObject.contains(JS(result)) && jsonObject.at(JS(result)).as_object().contains(JS(info))) {
|
||||
auto const rippledInfo = jsonObject.at(JS(result)).as_object().at(JS(info)).as_object();
|
||||
if (rippledInfo.contains(JS(network_id)))
|
||||
state.networkID.emplace(boost::json::value_to<int64_t>(rippledInfo.at(JS(network_id))));
|
||||
}
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
63
src/etl/ETLState.hpp
Normal file
63
src/etl/ETLState.hpp
Normal file
@@ -0,0 +1,63 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for fetching and storing the state of the ETL information, such as the network id
|
||||
*/
|
||||
struct ETLState {
|
||||
std::optional<uint32_t> networkID;
|
||||
|
||||
/**
|
||||
* @brief Fetch the ETL state from the rippled server
|
||||
* @param source The source to fetch the state from
|
||||
* @return The ETL state, nullopt if source not available
|
||||
*/
|
||||
template <typename Forward>
|
||||
static std::optional<ETLState>
|
||||
fetchETLStateFromSource(Forward const& source) noexcept
|
||||
{
|
||||
auto const serverInfoRippled = data::synchronous([&source](auto yield) {
|
||||
return source.forwardToRippled({{"command", "server_info"}}, std::nullopt, yield);
|
||||
});
|
||||
|
||||
if (serverInfoRippled)
|
||||
return boost::json::value_to<ETLState>(boost::json::value(*serverInfoRippled));
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv);
|
||||
|
||||
} // namespace etl
|
||||
@@ -17,24 +17,36 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/DBHelpers.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <etl/Source.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/beast/http.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/src.hpp>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/ProbingSource.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Random.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/array.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <fmt/core.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace util;
|
||||
|
||||
@@ -47,7 +59,8 @@ LoadBalancer::make_Source(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer)
|
||||
LoadBalancer& balancer
|
||||
)
|
||||
{
|
||||
auto src = std::make_unique<ProbingSource>(config, ioc, backend, subscriptions, validatedLedgers, balancer);
|
||||
src->run();
|
||||
@@ -61,7 +74,8 @@ LoadBalancer::make_LoadBalancer(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
)
|
||||
{
|
||||
return std::make_shared<LoadBalancer>(config, ioc, backend, subscriptions, validatedLedgers);
|
||||
}
|
||||
@@ -71,20 +85,54 @@ LoadBalancer::LoadBalancer(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
)
|
||||
{
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value)
|
||||
downloadRanges_ = std::clamp(*value, 1u, 256u);
|
||||
else if (backend->fetchLedgerRange())
|
||||
static constexpr std::uint32_t MAX_DOWNLOAD = 256;
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value) {
|
||||
downloadRanges_ = std::clamp(*value, 1u, MAX_DOWNLOAD);
|
||||
} else if (backend->fetchLedgerRange()) {
|
||||
downloadRanges_ = 4;
|
||||
}
|
||||
|
||||
for (auto const& entry : config.array("etl_sources"))
|
||||
{
|
||||
auto const allowNoEtl = config.valueOr("allow_no_etl", false);
|
||||
|
||||
auto const checkOnETLFailure = [this, allowNoEtl](std::string const& log) {
|
||||
LOG(log_.error()) << log;
|
||||
|
||||
if (!allowNoEtl) {
|
||||
LOG(log_.error()) << "Set allow_no_etl as true in config to allow clio run without valid ETL sources.";
|
||||
throw std::logic_error("ETL configuration error.");
|
||||
}
|
||||
};
|
||||
|
||||
for (auto const& entry : config.array("etl_sources")) {
|
||||
std::unique_ptr<Source> source = make_Source(entry, ioc, backend, subscriptions, validatedLedgers, *this);
|
||||
|
||||
// checking etl node validity
|
||||
auto const stateOpt = ETLState::fetchETLStateFromSource(*source);
|
||||
|
||||
if (!stateOpt) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"Failed to fetch ETL state from source = {} Please check the configuration and network",
|
||||
source->toString()
|
||||
));
|
||||
} else if (etlState_ && etlState_->networkID && stateOpt->networkID && etlState_->networkID != stateOpt->networkID) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"ETL sources must be on the same network. Source network id = {} does not match others network id = {}",
|
||||
*(stateOpt->networkID),
|
||||
*(etlState_->networkID)
|
||||
));
|
||||
} else {
|
||||
etlState_ = stateOpt;
|
||||
}
|
||||
|
||||
sources_.push_back(std::move(source));
|
||||
LOG(log_.info()) << "Added etl source - " << sources_.back()->toString();
|
||||
}
|
||||
|
||||
if (sources_.empty())
|
||||
checkOnETLFailure("No ETL sources configured. Please check the configuration");
|
||||
}
|
||||
|
||||
LoadBalancer::~LoadBalancer()
|
||||
@@ -100,15 +148,17 @@ LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
|
||||
[this, &response, &sequence, cacheOnly](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
|
||||
if (!res)
|
||||
if (!res) {
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
else
|
||||
} else {
|
||||
response = std::move(data);
|
||||
}
|
||||
|
||||
return res;
|
||||
},
|
||||
sequence);
|
||||
sequence
|
||||
);
|
||||
return {std::move(response), success};
|
||||
}
|
||||
|
||||
@@ -116,43 +166,43 @@ LoadBalancer::OptionalGetLedgerResponseType
|
||||
LoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
GetLedgerResponseType response;
|
||||
bool success = execute(
|
||||
bool const success = execute(
|
||||
[&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](auto& source) {
|
||||
auto [status, data] = source->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
response = std::move(data);
|
||||
if (status.ok() && response.validated())
|
||||
{
|
||||
if (status.ok() && response.validated()) {
|
||||
LOG(log.info()) << "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log.warn()) << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG(log.warn()) << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
},
|
||||
ledgerSequence);
|
||||
if (success)
|
||||
ledgerSequence
|
||||
);
|
||||
if (success) {
|
||||
return response;
|
||||
else
|
||||
return {};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
LoadBalancer::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
srand(static_cast<unsigned>(time(0)));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
std::size_t sourceIdx = 0;
|
||||
if (!sources_.empty())
|
||||
sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0u;
|
||||
|
||||
while (numAttempts < sources_.size())
|
||||
{
|
||||
while (numAttempts < sources_.size()) {
|
||||
if (auto res = sources_[sourceIdx]->forwardToRippled(request, clientIp, yield))
|
||||
return res;
|
||||
|
||||
@@ -166,9 +216,8 @@ LoadBalancer::forwardToRippled(
|
||||
bool
|
||||
LoadBalancer::shouldPropagateTxnStream(Source* in) const
|
||||
{
|
||||
for (auto& src : sources_)
|
||||
{
|
||||
assert(src);
|
||||
for (auto& src : sources_) {
|
||||
ASSERT(src != nullptr, "Source is nullptr");
|
||||
|
||||
// We pick the first Source encountered that is connected
|
||||
if (src->isConnected())
|
||||
@@ -193,12 +242,13 @@ template <class Func>
|
||||
bool
|
||||
LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
{
|
||||
srand(static_cast<unsigned>(time(0)));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
std::size_t sourceIdx = 0;
|
||||
if (!sources_.empty())
|
||||
sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0;
|
||||
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
auto& source = sources_[sourceIdx];
|
||||
|
||||
LOG(log_.debug()) << "Attempting to execute func. ledger sequence = " << ledgerSequence
|
||||
@@ -207,30 +257,23 @@ LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
/* Sometimes rippled has ledger but doesn't actually know. However,
|
||||
but this does NOT happen in the normal case and is safe to remove
|
||||
This || true is only needed when loading full history standalone */
|
||||
if (source->hasLedger(ledgerSequence))
|
||||
{
|
||||
bool res = f(source);
|
||||
if (res)
|
||||
{
|
||||
if (source->hasLedger(ledgerSequence)) {
|
||||
bool const res = f(source);
|
||||
if (res) {
|
||||
LOG(log_.debug()) << "Successfully executed func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.warn()) << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
LOG(log_.warn()) << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
} else {
|
||||
LOG(log_.warn()) << "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
numAttempts++;
|
||||
if (numAttempts % sources_.size() == 0)
|
||||
{
|
||||
if (numAttempts % sources_.size() == 0) {
|
||||
LOG(log_.info()) << "Ledger sequence " << ledgerSequence
|
||||
<< " is not yet available from any configured sources. "
|
||||
<< "Sleeping and trying again";
|
||||
@@ -239,4 +282,15 @@ LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<ETLState>
|
||||
LoadBalancer::getETLState() noexcept
|
||||
{
|
||||
if (!etlState_) {
|
||||
// retry ETLState fetch
|
||||
etlState_ = ETLState::fetchETLStateFromSource(*this);
|
||||
}
|
||||
return etlState_;
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,15 +19,29 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <org/xrpl/rpc/v1/ledger.pb.h>
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
class Source;
|
||||
@@ -47,17 +61,20 @@ namespace etl {
|
||||
* which ledgers have been validated by the network, and the range of ledgers each etl source has). This class also
|
||||
* allows requests for ledger data to be load balanced across all possible ETL sources.
|
||||
*/
|
||||
class LoadBalancer
|
||||
{
|
||||
class LoadBalancer {
|
||||
public:
|
||||
using RawLedgerObjectType = org::xrpl::rpc::v1::RawLedgerObject;
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
using OptionalGetLedgerResponseType = std::optional<GetLedgerResponseType>;
|
||||
|
||||
private:
|
||||
static constexpr std::uint32_t DEFAULT_DOWNLOAD_RANGES = 16;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
std::vector<std::unique_ptr<Source>> sources_;
|
||||
std::uint32_t downloadRanges_ = 16; /*< The number of markers to use when downloading intial ledger */
|
||||
std::optional<ETLState> etlState_;
|
||||
std::uint32_t downloadRanges_ =
|
||||
DEFAULT_DOWNLOAD_RANGES; /*< The number of markers to use when downloading intial ledger */
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -74,7 +91,8 @@ public:
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers);
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief A factory function for the load balancer.
|
||||
@@ -91,26 +109,8 @@ public:
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers);
|
||||
|
||||
/**
|
||||
* @brief A factory function for the ETL source.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc The io_context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
* @param balancer The load balancer
|
||||
*/
|
||||
static std::unique_ptr<Source>
|
||||
make_Source(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer);
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
);
|
||||
|
||||
~LoadBalancer();
|
||||
|
||||
@@ -161,15 +161,45 @@ public:
|
||||
* @brief Forward a JSON RPC request to a randomly selected rippled node.
|
||||
*
|
||||
* @param request JSON-RPC request to forward
|
||||
* @param clientIp The IP address of the peer
|
||||
* @param clientIp The IP address of the peer, if known
|
||||
* @param yield The coroutine context
|
||||
* @return Response received from rippled node as JSON object on success; nullopt on failure
|
||||
*/
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const;
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Return state of ETL nodes.
|
||||
* @return ETL state, nullopt if etl nodes not available
|
||||
*/
|
||||
std::optional<ETLState>
|
||||
getETLState() noexcept;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief A factory function for the ETL source.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc The io_context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
* @param balancer The load balancer
|
||||
*/
|
||||
static std::unique_ptr<Source>
|
||||
make_Source(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Execute a function on a randomly selected source.
|
||||
*
|
||||
@@ -17,15 +17,33 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/protocol/STBase.h>
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
#include <vector>
|
||||
#include "data/DBHelpers.hpp"
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/DBHelpers.h>
|
||||
#include <data/Types.h>
|
||||
#include <fmt/core.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/LedgerFormats.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STArray.h>
|
||||
#include <ripple/protocol/STBase.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/STObject.h>
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/Serializer.h>
|
||||
#include <ripple/protocol/TER.h>
|
||||
#include <ripple/protocol/TxFormats.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
|
||||
@@ -45,27 +63,26 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// that were changed.
|
||||
std::optional<ripple::AccountID> owner;
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE)
|
||||
continue;
|
||||
|
||||
if (!owner)
|
||||
owner = ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
{
|
||||
if (node.getFName() == ripple::sfCreatedNode) {
|
||||
ripple::STArray const& toAddNFTs =
|
||||
node.peekAtField(ripple::sfNewFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(finalIDs), [](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
}
|
||||
// Else it's modified, as there should never be a deleted NFToken page
|
||||
// as a result of a mint.
|
||||
else
|
||||
{
|
||||
else {
|
||||
// When a mint results in splitting an existing page,
|
||||
// it results in a created page and a modified node. Sometimes,
|
||||
// the created node needs to be linked to a third page, resulting
|
||||
@@ -82,9 +99,11 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
|
||||
ripple::STArray const& toAddNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(prevIDs), [](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(prevIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
|
||||
ripple::STArray const& toAddFinalNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
@@ -92,7 +111,8 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
toAddFinalNFTs.begin(),
|
||||
toAddFinalNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); });
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,13 +125,15 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
|
||||
// There should always be a difference so the returned finalIDs
|
||||
// iterator should never be end(). But better safe than sorry.
|
||||
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner)
|
||||
throw std::runtime_error(
|
||||
fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID())));
|
||||
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner) {
|
||||
throw std::runtime_error(fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID()))
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
{NFTTransactionsData(*diff.first, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(*diff.first, *owner, sttx.getFieldVL(ripple::sfURI), txMeta)};
|
||||
NFTsData(*diff.first, *owner, sttx.getFieldVL(ripple::sfURI), txMeta)
|
||||
};
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
@@ -123,8 +145,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// Determine who owned the token when it was burned by finding an
|
||||
// NFTokenPage that was deleted or modified that contains this
|
||||
// tokenID.
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfCreatedNode)
|
||||
continue;
|
||||
@@ -139,16 +160,15 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// need to look in the FinalFields.
|
||||
std::optional<ripple::STArray> prevNFTs;
|
||||
|
||||
if (node.isFieldPresent(ripple::sfPreviousFields))
|
||||
{
|
||||
if (node.isFieldPresent(ripple::sfPreviousFields)) {
|
||||
ripple::STObject const& previousFields =
|
||||
node.peekAtField(ripple::sfPreviousFields).downcast<ripple::STObject>();
|
||||
if (previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
|
||||
} else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode) {
|
||||
prevNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
|
||||
if (!prevNFTs)
|
||||
continue;
|
||||
@@ -157,14 +177,14 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
std::find_if(prevNFTs->begin(), prevNFTs->end(), [&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != prevNFTs->end())
|
||||
if (nft != prevNFTs->end()) {
|
||||
return std::make_pair(
|
||||
txs,
|
||||
NFTsData(
|
||||
tokenID,
|
||||
ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()),
|
||||
txMeta,
|
||||
true));
|
||||
tokenID, ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()), txMeta, true
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
@@ -178,14 +198,12 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
// If we have the buy offer from this tx, we can determine the owner
|
||||
// more easily by just looking at the owner of the accepted NFTokenOffer
|
||||
// object.
|
||||
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer))
|
||||
{
|
||||
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer)) {
|
||||
auto const affectedBuyOffer =
|
||||
std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
|
||||
});
|
||||
if (affectedBuyOffer == txMeta.getNodes().end())
|
||||
{
|
||||
if (affectedBuyOffer == txMeta.getNodes().end()) {
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
@@ -199,7 +217,8 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
return {
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}, NFTsData(tokenID, owner, txMeta, false)};
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())}, NFTsData(tokenID, owner, txMeta, false)
|
||||
};
|
||||
}
|
||||
|
||||
// Otherwise we have to infer the new owner from the affected nodes.
|
||||
@@ -207,8 +226,7 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenSellOffer);
|
||||
});
|
||||
if (affectedSellOffer == txMeta.getNodes().end())
|
||||
{
|
||||
if (affectedSellOffer == txMeta.getNodes().end()) {
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
@@ -222,8 +240,7 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfDeletedNode)
|
||||
continue;
|
||||
@@ -234,10 +251,11 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
continue;
|
||||
|
||||
ripple::STArray const& nfts = [&node] {
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
if (node.getFName() == ripple::sfCreatedNode) {
|
||||
return node.peekAtField(ripple::sfNewFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
return node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
@@ -246,10 +264,12 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
auto const nft = std::find_if(nfts.begin(), nfts.end(), [&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != nfts.end())
|
||||
if (nft != nfts.end()) {
|
||||
return {
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenID, nodeOwner, txMeta, false)};
|
||||
NFTsData(tokenID, nodeOwner, txMeta, false)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
@@ -265,8 +285,7 @@ std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenCancelOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
std::vector<NFTTransactionsData> txs;
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_OFFER)
|
||||
continue;
|
||||
|
||||
@@ -302,8 +321,7 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
if (txMeta.getResultTER() != ripple::tesSUCCESS)
|
||||
return {{}, {}};
|
||||
|
||||
switch (sttx.getTxnType())
|
||||
{
|
||||
switch (sttx.getTxnType()) {
|
||||
case ripple::TxType::ttNFTOKEN_MINT:
|
||||
return getNFTokenMintData(txMeta, sttx);
|
||||
|
||||
@@ -340,4 +358,4 @@ getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string c
|
||||
|
||||
return nfts;
|
||||
}
|
||||
} // namespace etl
|
||||
} // namespace etl
|
||||
|
||||
@@ -20,11 +20,17 @@
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <data/DBHelpers.h>
|
||||
#include "data/DBHelpers.hpp"
|
||||
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
@@ -46,6 +52,6 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||
* @return The NFT data as a vector
|
||||
*/
|
||||
std::vector<NFTsData>
|
||||
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob);
|
||||
getNFTDataFromObj(std::uint32_t seq, std::string const& key, std::string const& blob);
|
||||
|
||||
} // namespace etl
|
||||
} // namespace etl
|
||||
@@ -17,7 +17,32 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ProbingSource.h>
|
||||
#include "etl/ProbingSource.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/ssl/context.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/uuid/nil_generator.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <grpcpp/support/status.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
|
||||
@@ -28,7 +53,8 @@ ProbingSource::ProbingSource(
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx)
|
||||
boost::asio::ssl::context sslCtx
|
||||
)
|
||||
: sslCtx_{std::move(sslCtx)}
|
||||
, sslSrc_{make_shared<
|
||||
SslSource>(config, ioc, std::ref(sslCtx_), backend, subscriptions, nwvl, balancer, make_SSLHooks())}
|
||||
@@ -74,8 +100,7 @@ ProbingSource::hasLedger(uint32_t sequence) const
|
||||
boost::json::object
|
||||
ProbingSource::toJson() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
{
|
||||
if (!currentSrc_) {
|
||||
boost::json::object sourcesJson = {
|
||||
{"ws", plainSrc_->toJson()},
|
||||
{"wss", sslSrc_->toJson()},
|
||||
@@ -123,19 +148,26 @@ ProbingSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNei
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
if (!currentSrc_) // Source may connect to rippled before the connection built to check the validity
|
||||
{
|
||||
if (auto res = plainSrc_->forwardToRippled(request, clientIp, yield))
|
||||
return res;
|
||||
|
||||
return sslSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
return currentSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
@@ -147,12 +179,11 @@ ProbingSource::make_SSLHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return SourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
if (!ec) {
|
||||
plainSrc_->pause();
|
||||
currentSrc_ = sslSrc_;
|
||||
LOG(log_.info()) << "Selected WSS as the main source: " << currentSrc_->toString();
|
||||
@@ -161,14 +192,14 @@ ProbingSource::make_SSLHooks() noexcept
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto /* ec */) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_) {
|
||||
currentSrc_ = nullptr;
|
||||
plainSrc_->resume();
|
||||
}
|
||||
return SourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
SourceHooks
|
||||
@@ -176,12 +207,11 @@ ProbingSource::make_PlainHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return SourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
if (!ec) {
|
||||
sslSrc_->pause();
|
||||
currentSrc_ = plainSrc_;
|
||||
LOG(log_.info()) << "Selected Plain WS as the main source: " << currentSrc_->toString();
|
||||
@@ -190,13 +220,13 @@ ProbingSource::make_PlainHooks() noexcept
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto /* ec */) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_) {
|
||||
currentSrc_ = nullptr;
|
||||
sslSrc_->resume();
|
||||
}
|
||||
return SourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
};
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,17 +19,33 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/Source.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/Source.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/ssl/context.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <grpcpp/support/status.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl {
|
||||
|
||||
@@ -39,8 +55,7 @@ namespace etl {
|
||||
* First to connect pauses the other and the probing is considered done at this point.
|
||||
* If however the connected source loses connection the probing is kickstarted again.
|
||||
*/
|
||||
class ProbingSource : public Source
|
||||
{
|
||||
class ProbingSource : public Source {
|
||||
public:
|
||||
// TODO: inject when unit tests will be written for ProbingSource
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
@@ -73,9 +88,10 @@ public:
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12});
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12}
|
||||
);
|
||||
|
||||
~ProbingSource() = default;
|
||||
~ProbingSource() override = default;
|
||||
|
||||
void
|
||||
run() override;
|
||||
@@ -105,8 +121,11 @@ public:
|
||||
fetchLedger(uint32_t sequence, bool getObjects = true, bool getObjectNeighbors = false) override;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const override;
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override;
|
||||
|
||||
boost::uuids::uuid
|
||||
token() const override;
|
||||
@@ -115,8 +134,9 @@ private:
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const override;
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override;
|
||||
|
||||
SourceHooks
|
||||
make_SSLHooks() noexcept;
|
||||
@@ -124,4 +144,4 @@ private:
|
||||
SourceHooks
|
||||
make_PlainHooks() noexcept;
|
||||
};
|
||||
} // namespace etl
|
||||
} // namespace etl
|
||||
@@ -17,22 +17,22 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/DBHelpers.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <etl/Source.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
#include "etl/Source.hpp"
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/beast/http.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <thread>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/post.hpp>
|
||||
#include <boost/asio/ssl/stream_base.hpp>
|
||||
#include <boost/beast/core/error.hpp>
|
||||
#include <boost/beast/core/role.hpp>
|
||||
#include <boost/beast/core/stream_traits.hpp>
|
||||
#include <boost/beast/http/field.hpp>
|
||||
#include <boost/beast/websocket/rfc6455.hpp>
|
||||
#include <boost/beast/websocket/stream_base.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace etl {
|
||||
|
||||
@@ -50,27 +50,22 @@ PlainSource::close(bool startAgain)
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
if (derived().ws().is_open()) {
|
||||
// onStop() also calls close(). If the async_close is called twice,
|
||||
// an assertion fails. Using closing_ makes sure async_close is only
|
||||
// called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "async_close: error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
} else if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_);
|
||||
run();
|
||||
}
|
||||
@@ -85,26 +80,21 @@ SslSource::close(bool startAgain)
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
// onStop() also calls close(). If the async_close is called twice, an assertion fails. Using closing_ makes
|
||||
// sure async_close is only called once
|
||||
if (derived().ws().is_open()) {
|
||||
// onStop() also calls close(). If the async_close is called twice, an assertion fails. Using closing_
|
||||
// makes sure async_close is only called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "async_close: error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_, *sslCtx_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
} else if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_, *sslCtx_);
|
||||
run();
|
||||
}
|
||||
@@ -114,15 +104,13 @@ SslSource::close(bool startAgain)
|
||||
void
|
||||
PlainSource::onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint
|
||||
)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
connected_ = true;
|
||||
numFailures_ = 0;
|
||||
|
||||
@@ -134,7 +122,8 @@ PlainSource::onConnect(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
@@ -147,13 +136,10 @@ PlainSource::onConnect(
|
||||
void
|
||||
SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
connected_ = true;
|
||||
numFailures_ = 0;
|
||||
|
||||
@@ -165,28 +151,28 @@ SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
ws().next_layer().async_handshake(
|
||||
boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) { onSslHandshake(ec, endpoint); });
|
||||
ws().next_layer().async_handshake(boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) {
|
||||
onSslHandshake(ec, endpoint);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SslSource::onSslHandshake(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint
|
||||
)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
|
||||
@@ -19,30 +19,81 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/impl/AsyncData.h>
|
||||
#include <etl/impl/ForwardCache.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/impl/AsyncData.hpp"
|
||||
#include "etl/impl/ForwardCache.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/associated_executor.hpp>
|
||||
#include <boost/asio/buffer.hpp>
|
||||
#include <boost/asio/error.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/address.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/ssl/context.hpp>
|
||||
#include <boost/asio/ssl/error.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/buffers_to_string.hpp>
|
||||
#include <boost/beast/core/error.hpp>
|
||||
#include <boost/beast/core/flat_buffer.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/core/tcp_stream.hpp>
|
||||
#include <boost/beast/http/field.hpp>
|
||||
#include <boost/beast/ssl.hpp>
|
||||
#include <boost/beast/ssl/ssl_stream.hpp>
|
||||
#include <boost/beast/version.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <boost/beast/websocket/rfc6455.hpp>
|
||||
#include <boost/beast/websocket/stream.hpp>
|
||||
#include <boost/beast/websocket/stream_base.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
#include <boost/json/serialize.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/uuid/random_generator.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <boost/uuid/uuid_generators.hpp>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <grpcpp/security/credentials.h>
|
||||
#include <grpcpp/support/channel_arguments.h>
|
||||
#include <grpcpp/support/status.h>
|
||||
#include <openssl/err.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
class ProbingSource;
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
}
|
||||
} // namespace feed
|
||||
|
||||
// TODO: we use Source so that we can store a vector of Sources
|
||||
// but we also use CRTP for implementation of the common logic - this is a bit strange because CRTP as used here is
|
||||
@@ -52,14 +103,15 @@ class SubscriptionManager;
|
||||
|
||||
namespace etl {
|
||||
|
||||
class ProbingSource;
|
||||
|
||||
/**
|
||||
* @brief Base class for all ETL sources.
|
||||
*
|
||||
* Note: Since sources below are implemented via CRTP, it sort of makes no sense to have a virtual base class.
|
||||
* We should consider using a vector of ProbingSources instead of vector of unique ptrs to this virtual base.
|
||||
*/
|
||||
class Source
|
||||
{
|
||||
class Source {
|
||||
public:
|
||||
/** @return true if source is connected; false otherwise */
|
||||
virtual bool
|
||||
@@ -123,13 +175,16 @@ public:
|
||||
* @brief Forward a request to rippled.
|
||||
*
|
||||
* @param request The request to forward
|
||||
* @param clientIp IP of the client forwarding this request
|
||||
* @param clientIp IP of the client forwarding this request if known
|
||||
* @param yield The coroutine context
|
||||
* @return Response wrapped in an optional on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& forwardToRippledclientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @return A token that uniquely identifies this source instance.
|
||||
@@ -161,15 +216,15 @@ private:
|
||||
virtual std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Hooks for source events such as connects and disconnects.
|
||||
*/
|
||||
struct SourceHooks
|
||||
{
|
||||
struct SourceHooks {
|
||||
enum class Action { STOP, PROCEED };
|
||||
|
||||
std::function<Action(boost::beast::error_code)> onConnected;
|
||||
@@ -182,8 +237,7 @@ struct SourceHooks
|
||||
* @tparam Derived The derived class for CRTP
|
||||
*/
|
||||
template <class Derived>
|
||||
class SourceImpl : public Source
|
||||
{
|
||||
class SourceImpl : public Source {
|
||||
std::string wsPort_;
|
||||
std::string grpcPort_;
|
||||
|
||||
@@ -206,7 +260,7 @@ class SourceImpl : public Source
|
||||
LoadBalancer& balancer_;
|
||||
|
||||
etl::detail::ForwardCache forwardCache_;
|
||||
boost::uuids::uuid uuid_;
|
||||
boost::uuids::uuid uuid_{};
|
||||
|
||||
protected:
|
||||
std::string ip_;
|
||||
@@ -244,16 +298,17 @@ public:
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
: networkValidatedLedgers_(validatedLedgers)
|
||||
, backend_(backend)
|
||||
, subscriptions_(subscriptions)
|
||||
SourceHooks hooks
|
||||
)
|
||||
: networkValidatedLedgers_(std::move(validatedLedgers))
|
||||
, backend_(std::move(backend))
|
||||
, subscriptions_(std::move(subscriptions))
|
||||
, balancer_(balancer)
|
||||
, forwardCache_(config, ioc, *this)
|
||||
, strand_(boost::asio::make_strand(ioc))
|
||||
, timer_(strand_)
|
||||
, resolver_(strand_)
|
||||
, hooks_(hooks)
|
||||
, hooks_(std::move(hooks))
|
||||
{
|
||||
static boost::uuids::random_generator uuidGenerator;
|
||||
uuid_ = uuidGenerator();
|
||||
@@ -261,28 +316,25 @@ public:
|
||||
ip_ = config.valueOr<std::string>("ip", {});
|
||||
wsPort_ = config.valueOr<std::string>("ws_port", {});
|
||||
|
||||
if (auto value = config.maybeValue<std::string>("grpc_port"); value)
|
||||
{
|
||||
if (auto value = config.maybeValue<std::string>("grpc_port"); value) {
|
||||
grpcPort_ = *value;
|
||||
try
|
||||
{
|
||||
boost::asio::ip::tcp::endpoint endpoint{boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||
try {
|
||||
boost::asio::ip::tcp::endpoint const endpoint{boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||
std::stringstream ss;
|
||||
ss << endpoint;
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(ss.str(), grpc::InsecureChannelCredentials(), chArgs));
|
||||
grpc::CreateCustomChannel(ss.str(), grpc::InsecureChannelCredentials(), chArgs)
|
||||
);
|
||||
LOG(log_.debug()) << "Made stub for remote = " << toString();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.debug()) << "Exception while creating stub = " << e.what() << " . Remote = " << toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~SourceImpl()
|
||||
~SourceImpl() override
|
||||
{
|
||||
derived().close(false);
|
||||
}
|
||||
@@ -302,18 +354,13 @@ public:
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const override
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
LOG(log_.trace()) << "Attempting to forward request to tx. "
|
||||
<< "request = " << boost::json::serialize(request);
|
||||
LOG(log_.trace()) << "Attempting to forward request to tx. Request = " << boost::json::serialize(request);
|
||||
|
||||
boost::json::object response;
|
||||
if (!isConnected())
|
||||
{
|
||||
LOG(log_.error()) << "Attempted to proxy but failed to connect to tx";
|
||||
return {};
|
||||
}
|
||||
|
||||
namespace beast = boost::beast;
|
||||
namespace http = beast::http;
|
||||
@@ -321,10 +368,9 @@ public:
|
||||
namespace net = boost::asio;
|
||||
using tcp = boost::asio::ip::tcp;
|
||||
|
||||
try
|
||||
{
|
||||
try {
|
||||
auto executor = boost::asio::get_associated_executor(yield);
|
||||
boost::beast::error_code ec;
|
||||
beast::error_code ec;
|
||||
tcp::resolver resolver{executor};
|
||||
|
||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(executor);
|
||||
@@ -338,12 +384,15 @@ public:
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake and to tell rippled to charge the client IP for
|
||||
// RPC resources. See "secure_gateway" in
|
||||
// if client ip is know, change the User-Agent of the handshake and to tell rippled to charge the client
|
||||
// IP for RPC resources. See "secure_gateway" in
|
||||
// https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg
|
||||
|
||||
// TODO: user-agent can be clio-[version]
|
||||
ws->set_option(websocket::stream_base::decorator([&clientIp](websocket::request_type& req) {
|
||||
req.set(http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " websocket-client-coro");
|
||||
req.set(http::field::forwarded, "for=" + clientIp);
|
||||
if (clientIp)
|
||||
req.set(http::field::forwarded, "for=" + *clientIp);
|
||||
}));
|
||||
|
||||
ws->async_handshake(ip_, "/", yield[ec]);
|
||||
@@ -363,8 +412,7 @@ public:
|
||||
auto end = begin + buffer.data().size();
|
||||
auto parsed = boost::json::parse(std::string(begin, end));
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
if (!parsed.is_object()) {
|
||||
LOG(log_.error()) << "Error parsing response: " << std::string{begin, end};
|
||||
return {};
|
||||
}
|
||||
@@ -373,9 +421,7 @@ public:
|
||||
response["forwarded"] = true;
|
||||
|
||||
return response;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.error()) << "Encountered exception : " << e.what();
|
||||
return {};
|
||||
}
|
||||
@@ -384,15 +430,12 @@ public:
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const override
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
for (auto& pair : validatedLedgers_)
|
||||
{
|
||||
if (sequence >= pair.first && sequence <= pair.second)
|
||||
{
|
||||
std::lock_guard const lck(mtx_);
|
||||
for (auto& pair : validatedLedgers_) {
|
||||
if (sequence >= pair.first && sequence <= pair.second) {
|
||||
return true;
|
||||
}
|
||||
else if (sequence < pair.first)
|
||||
{
|
||||
if (sequence < pair.first) {
|
||||
// validatedLedgers_ is a sorted list of disjoint ranges
|
||||
// if the sequence comes before this range, the sequence will
|
||||
// come before all subsequent ranges
|
||||
@@ -420,13 +463,12 @@ public:
|
||||
request.set_get_object_neighbors(getObjectNeighbors);
|
||||
request.set_user("ETL");
|
||||
|
||||
grpc::Status status = stub_->GetLedger(&context, request, &response);
|
||||
grpc::Status const status = stub_->GetLedger(&context, request, &response);
|
||||
|
||||
if (status.ok() && !response.is_unlimited())
|
||||
{
|
||||
log_.warn()
|
||||
<< "is_unlimited is false. Make sure secure_gateway is set correctly on the ETL source. source = "
|
||||
<< toString() << "; status = " << status.error_message();
|
||||
if (status.ok() && !response.is_unlimited()) {
|
||||
log_.warn(
|
||||
) << "is_unlimited is false. Make sure secure_gateway is set correctly on the ETL source. source = "
|
||||
<< toString() << "; status = " << status.error_message();
|
||||
}
|
||||
|
||||
return {status, std::move(response)};
|
||||
@@ -451,10 +493,12 @@ public:
|
||||
res["grpc_port"] = grpcPort_;
|
||||
|
||||
auto last = getLastMsgTime();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
if (last.time_since_epoch().count() != 0) {
|
||||
res["last_msg_age_seconds"] = std::to_string(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now() - getLastMsgTime())
|
||||
.count());
|
||||
.count()
|
||||
);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -466,13 +510,12 @@ public:
|
||||
return {{}, false};
|
||||
|
||||
grpc::CompletionQueue cq;
|
||||
void* tag;
|
||||
void* tag = nullptr;
|
||||
bool ok = false;
|
||||
std::vector<etl::detail::AsyncCallData> calls;
|
||||
auto markers = getMarkers(numMarkers);
|
||||
|
||||
for (size_t i = 0; i < markers.size(); ++i)
|
||||
{
|
||||
for (size_t i = 0; i < markers.size(); ++i) {
|
||||
std::optional<ripple::uint256> nextMarker;
|
||||
|
||||
if (i + 1 < markers.size())
|
||||
@@ -488,45 +531,39 @@ public:
|
||||
|
||||
size_t numFinished = 0;
|
||||
bool abort = false;
|
||||
size_t incr = 500000;
|
||||
size_t const incr = 500000;
|
||||
size_t progress = incr;
|
||||
std::vector<std::string> edgeKeys;
|
||||
|
||||
while (numFinished < calls.size() && cq.Next(&tag, &ok))
|
||||
{
|
||||
assert(tag);
|
||||
while (numFinished < calls.size() && cq.Next(&tag, &ok)) {
|
||||
ASSERT(tag != nullptr, "Tag can't be null.");
|
||||
auto ptr = static_cast<etl::detail::AsyncCallData*>(tag);
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
if (!ok) {
|
||||
LOG(log_.error()) << "loadInitialLedger - ok is false";
|
||||
return {{}, false}; // handle cancelled
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.trace()) << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
|
||||
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
|
||||
if (result != etl::detail::AsyncCallData::CallStatus::MORE)
|
||||
{
|
||||
++numFinished;
|
||||
LOG(log_.debug()) << "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
LOG(log_.trace()) << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
|
||||
std::string lastKey = ptr->getLastKey();
|
||||
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
|
||||
if (result != etl::detail::AsyncCallData::CallStatus::MORE) {
|
||||
++numFinished;
|
||||
LOG(log_.debug()) << "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
|
||||
if (lastKey.size())
|
||||
edgeKeys.push_back(ptr->getLastKey());
|
||||
}
|
||||
std::string const lastKey = ptr->getLastKey();
|
||||
|
||||
if (result == etl::detail::AsyncCallData::CallStatus::ERRORED)
|
||||
abort = true;
|
||||
if (!lastKey.empty())
|
||||
edgeKeys.push_back(ptr->getLastKey());
|
||||
}
|
||||
|
||||
if (backend_->cache().size() > progress)
|
||||
{
|
||||
LOG(log_.info()) << "Downloaded " << backend_->cache().size() << " records from rippled";
|
||||
progress += incr;
|
||||
}
|
||||
if (result == etl::detail::AsyncCallData::CallStatus::ERRORED)
|
||||
abort = true;
|
||||
|
||||
if (backend_->cache().size() > progress) {
|
||||
LOG(log_.info()) << "Downloaded " << backend_->cache().size() << " records from rippled";
|
||||
progress += incr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -535,11 +572,13 @@ public:
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const override
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
if (auto resp = forwardCache_.get(request); resp)
|
||||
{
|
||||
if (auto resp = forwardCache_.get(request); resp) {
|
||||
LOG(log_.debug()) << "request hit forwardCache";
|
||||
return resp;
|
||||
}
|
||||
@@ -570,14 +609,13 @@ public:
|
||||
void
|
||||
onResolve(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// try again
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_after(std::chrono::seconds(30));
|
||||
} else {
|
||||
static constexpr std::size_t LOWEST_LAYER_TIMEOUT_SECONDS = 30;
|
||||
boost::beast::get_lowest_layer(derived().ws())
|
||||
.expires_after(std::chrono::seconds(LOWEST_LAYER_TIMEOUT_SECONDS));
|
||||
boost::beast::get_lowest_layer(derived().ws()).async_connect(results, [this](auto ec, auto ep) {
|
||||
derived().onConnect(ec, ep);
|
||||
});
|
||||
@@ -595,14 +633,11 @@ public:
|
||||
if (auto action = hooks_.onConnected(ec); action == SourceHooks::Action::STOP)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::json::object jv{
|
||||
} else {
|
||||
boost::json::object const jv{
|
||||
{"command", "subscribe"},
|
||||
{"streams", {"ledger", "manifests", "validations", "transactions_proposed"}},
|
||||
};
|
||||
@@ -612,10 +647,11 @@ public:
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(
|
||||
boost::beast::http::field::user_agent,
|
||||
std::string(BOOST_BEAST_VERSION_STRING) + " clio-client");
|
||||
boost::beast::http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " clio-client"
|
||||
);
|
||||
req.set("X-User", "coro-client");
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
// Send subscription message
|
||||
derived().ws().async_write(boost::asio::buffer(s), [this](auto ec, size_t size) { onWrite(ec, size); });
|
||||
@@ -631,10 +667,11 @@ public:
|
||||
void
|
||||
onWrite(boost::beast::error_code ec, [[maybe_unused]] size_t size)
|
||||
{
|
||||
if (ec)
|
||||
if (ec) {
|
||||
reconnect(ec);
|
||||
else
|
||||
} else {
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -646,12 +683,9 @@ public:
|
||||
void
|
||||
onRead(boost::beast::error_code ec, size_t size)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
handleMessage(size);
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
@@ -668,8 +702,7 @@ public:
|
||||
{
|
||||
setLastMsgTime();
|
||||
|
||||
try
|
||||
{
|
||||
try {
|
||||
auto const msg = boost::beast::buffers_to_string(readBuffer_.data());
|
||||
readBuffer_.consume(size);
|
||||
|
||||
@@ -677,65 +710,48 @@ public:
|
||||
auto const response = raw.as_object();
|
||||
uint32_t ledgerIndex = 0;
|
||||
|
||||
if (response.contains("result"))
|
||||
{
|
||||
if (response.contains("result")) {
|
||||
auto const& result = response.at("result").as_object();
|
||||
if (result.contains("ledger_index"))
|
||||
ledgerIndex = result.at("ledger_index").as_int64();
|
||||
|
||||
if (result.contains("validated_ledgers"))
|
||||
{
|
||||
if (result.contains("validated_ledgers")) {
|
||||
auto const& validatedLedgers = result.at("validated_ledgers").as_string();
|
||||
setValidatedRange({validatedLedgers.data(), validatedLedgers.size()});
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
}
|
||||
else if (response.contains("type") && response.at("type") == "ledgerClosed")
|
||||
{
|
||||
} else if (response.contains("type") && response.at("type") == "ledgerClosed") {
|
||||
LOG(log_.info()) << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
if (response.contains("ledger_index"))
|
||||
{
|
||||
if (response.contains("ledger_index")) {
|
||||
ledgerIndex = response.at("ledger_index").as_int64();
|
||||
}
|
||||
if (response.contains("validated_ledgers"))
|
||||
{
|
||||
if (response.contains("validated_ledgers")) {
|
||||
auto const& validatedLedgers = response.at("validated_ledgers").as_string();
|
||||
setValidatedRange({validatedLedgers.data(), validatedLedgers.size()});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (balancer_.shouldPropagateTxnStream(this))
|
||||
{
|
||||
if (response.contains("transaction"))
|
||||
{
|
||||
} else {
|
||||
if (balancer_.shouldPropagateTxnStream(this)) {
|
||||
if (response.contains("transaction")) {
|
||||
forwardCache_.freshen();
|
||||
subscriptions_->forwardProposedTransaction(response);
|
||||
}
|
||||
else if (response.contains("type") && response.at("type") == "validationReceived")
|
||||
{
|
||||
} else if (response.contains("type") && response.at("type") == "validationReceived") {
|
||||
subscriptions_->forwardValidation(response);
|
||||
}
|
||||
else if (response.contains("type") && response.at("type") == "manifestReceived")
|
||||
{
|
||||
} else if (response.contains("type") && response.at("type") == "manifestReceived") {
|
||||
subscriptions_->forwardManifest(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ledgerIndex != 0)
|
||||
{
|
||||
if (ledgerIndex != 0) {
|
||||
LOG(log_.trace()) << "Pushing ledger sequence = " << ledgerIndex << " - " << toString();
|
||||
networkValidatedLedgers_->push(ledgerIndex);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.error()) << "Exception in handleMessage : " << e.what();
|
||||
return false;
|
||||
}
|
||||
@@ -757,6 +773,7 @@ protected:
|
||||
void
|
||||
reconnect(boost::beast::error_code ec)
|
||||
{
|
||||
static constexpr std::size_t BUFFER_SIZE = 128;
|
||||
if (paused_)
|
||||
return;
|
||||
|
||||
@@ -770,34 +787,30 @@ protected:
|
||||
// when the timer is cancelled. connection_refused will occur repeatedly
|
||||
std::string err = ec.message();
|
||||
// if we cannot connect to the transaction processing process
|
||||
if (ec.category() == boost::asio::error::get_ssl_category())
|
||||
{
|
||||
if (ec.category() == boost::asio::error::get_ssl_category()) {
|
||||
err = std::string(" (") + boost::lexical_cast<std::string>(ERR_GET_LIB(ec.value())) + "," +
|
||||
boost::lexical_cast<std::string>(ERR_GET_REASON(ec.value())) + ") ";
|
||||
|
||||
// ERR_PACK /* crypto/err/err.h */
|
||||
char buf[128];
|
||||
char buf[BUFFER_SIZE];
|
||||
::ERR_error_string_n(ec.value(), buf, sizeof(buf));
|
||||
err += buf;
|
||||
|
||||
LOG(log_.error()) << err;
|
||||
}
|
||||
|
||||
if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused)
|
||||
{
|
||||
if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused) {
|
||||
LOG(log_.error()) << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.warn()) << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
|
||||
// exponentially increasing timeouts, with a max of 30 seconds
|
||||
size_t waitTime = std::min(pow(2, numFailures_), 30.0);
|
||||
size_t const waitTime = std::min(pow(2, numFailures_), 30.0);
|
||||
numFailures_++;
|
||||
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
|
||||
timer_.async_wait([this](auto ec) {
|
||||
bool startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
bool const startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
derived().close(startAgain);
|
||||
});
|
||||
}
|
||||
@@ -806,14 +819,14 @@ private:
|
||||
void
|
||||
setLastMsgTime()
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
std::lock_guard const lck(lastMsgTimeMtx_);
|
||||
lastMsgTime_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
std::chrono::system_clock::time_point
|
||||
getLastMsgTime() const
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
std::lock_guard const lck(lastMsgTimeMtx_);
|
||||
return lastMsgTime_;
|
||||
}
|
||||
|
||||
@@ -823,29 +836,25 @@ private:
|
||||
std::vector<std::pair<uint32_t, uint32_t>> pairs;
|
||||
std::vector<std::string> ranges;
|
||||
boost::split(ranges, range, boost::is_any_of(","));
|
||||
for (auto& pair : ranges)
|
||||
{
|
||||
for (auto& pair : ranges) {
|
||||
std::vector<std::string> minAndMax;
|
||||
|
||||
boost::split(minAndMax, pair, boost::is_any_of("-"));
|
||||
|
||||
if (minAndMax.size() == 1)
|
||||
{
|
||||
uint32_t sequence = std::stoll(minAndMax[0]);
|
||||
pairs.push_back(std::make_pair(sequence, sequence));
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(minAndMax.size() == 2);
|
||||
uint32_t min = std::stoll(minAndMax[0]);
|
||||
uint32_t max = std::stoll(minAndMax[1]);
|
||||
pairs.push_back(std::make_pair(min, max));
|
||||
if (minAndMax.size() == 1) {
|
||||
uint32_t const sequence = std::stoll(minAndMax[0]);
|
||||
pairs.emplace_back(sequence, sequence);
|
||||
} else {
|
||||
ASSERT(minAndMax.size() == 2, "Min and max should be of size 2. Got size = {}", minAndMax.size());
|
||||
uint32_t const min = std::stoll(minAndMax[0]);
|
||||
uint32_t const max = std::stoll(minAndMax[1]);
|
||||
pairs.emplace_back(min, max);
|
||||
}
|
||||
}
|
||||
std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { return left.first < right.first; });
|
||||
|
||||
// we only hold the lock here, to avoid blocking while string processing
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
validatedLedgers_ = std::move(pairs);
|
||||
validatedLedgersRaw_ = range;
|
||||
}
|
||||
@@ -853,7 +862,7 @@ private:
|
||||
std::string
|
||||
getValidatedRange() const
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
return validatedLedgersRaw_;
|
||||
}
|
||||
};
|
||||
@@ -861,8 +870,7 @@ private:
|
||||
/**
|
||||
* @brief Implementation of a source that uses a regular, non-secure websocket connection.
|
||||
*/
|
||||
class PlainSource : public SourceImpl<PlainSource>
|
||||
{
|
||||
class PlainSource : public SourceImpl<PlainSource> {
|
||||
using StreamType = boost::beast::websocket::stream<boost::beast::tcp_stream>;
|
||||
std::unique_ptr<StreamType> ws_;
|
||||
|
||||
@@ -885,7 +893,8 @@ public:
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
SourceHooks hooks
|
||||
)
|
||||
: SourceImpl(config, ioc, backend, subscriptions, validatedLedgers, balancer, std::move(hooks))
|
||||
, ws_(std::make_unique<StreamType>(strand_))
|
||||
{
|
||||
@@ -919,8 +928,7 @@ public:
|
||||
/**
|
||||
* @brief Implementation of a source that uses a secure websocket connection.
|
||||
*/
|
||||
class SslSource : public SourceImpl<SslSource>
|
||||
{
|
||||
class SslSource : public SourceImpl<SslSource> {
|
||||
using StreamType = boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>;
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx_;
|
||||
std::unique_ptr<StreamType> ws_;
|
||||
@@ -946,7 +954,8 @@ public:
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
SourceHooks hooks
|
||||
)
|
||||
: SourceImpl(config, ioc, backend, subscriptions, validatedLedgers, balancer, std::move(hooks))
|
||||
, sslCtx_(sslCtx)
|
||||
, ws_(std::make_unique<StreamType>(strand_, *sslCtx_))
|
||||
@@ -26,8 +26,7 @@ namespace etl {
|
||||
/**
|
||||
* @brief Represents the state of the ETL subsystem.
|
||||
*/
|
||||
struct SystemState
|
||||
{
|
||||
struct SystemState {
|
||||
/**
|
||||
* @brief Whether the process is in strict read-only mode.
|
||||
*
|
||||
@@ -19,10 +19,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/SystemState.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/post.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
|
||||
#include <chrono>
|
||||
@@ -30,21 +31,18 @@
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
struct AmendmentBlockAction
|
||||
{
|
||||
struct AmendmentBlockAction {
|
||||
void
|
||||
operator()()
|
||||
{
|
||||
static util::Logger log{"ETL"};
|
||||
LOG(log.fatal())
|
||||
<< "Can't process new ledgers: The current ETL source is not compatible with the version of the "
|
||||
"libxrpl Clio is currently using. Please upgrade Clio to a newer version.";
|
||||
static util::Logger const log{"ETL"};
|
||||
LOG(log.fatal()) << "Can't process new ledgers: The current ETL source is not compatible with the version of "
|
||||
<< "the libxrpl Clio is currently using. Please upgrade Clio to a newer version.";
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ActionCallableType = AmendmentBlockAction>
|
||||
class AmendmentBlockHandler
|
||||
{
|
||||
class AmendmentBlockHandler {
|
||||
std::reference_wrapper<boost::asio::io_context> ctx_;
|
||||
std::reference_wrapper<SystemState> state_;
|
||||
boost::asio::steady_timer timer_;
|
||||
@@ -58,7 +56,8 @@ public:
|
||||
boost::asio::io_context& ioc,
|
||||
SystemState& state,
|
||||
DurationType interval = DurationType{1},
|
||||
ActionCallableType&& action = ActionCallableType())
|
||||
ActionCallableType&& action = ActionCallableType()
|
||||
)
|
||||
: ctx_{std::ref(ioc)}
|
||||
, state_{std::ref(state)}
|
||||
, timer_{ioc}
|
||||
@@ -19,16 +19,30 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/NFTHelpers.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <grpcpp/support/status.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger_data.pb.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
class AsyncCallData
|
||||
{
|
||||
class AsyncCallData {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
|
||||
@@ -46,22 +60,26 @@ public:
|
||||
AsyncCallData(uint32_t seq, ripple::uint256 const& marker, std::optional<ripple::uint256> const& nextMarker)
|
||||
{
|
||||
request_.mutable_ledger()->set_sequence(seq);
|
||||
if (marker.isNonZero())
|
||||
{
|
||||
request_.set_marker(marker.data(), marker.size());
|
||||
if (marker.isNonZero()) {
|
||||
request_.set_marker(marker.data(), ripple::uint256::size());
|
||||
}
|
||||
request_.set_user("ETL");
|
||||
nextPrefix_ = 0x00;
|
||||
if (nextMarker)
|
||||
nextPrefix_ = nextMarker->data()[0];
|
||||
|
||||
unsigned char prefix = marker.data()[0];
|
||||
unsigned char const prefix = marker.data()[0];
|
||||
|
||||
LOG(log_.debug()) << "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
|
||||
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
|
||||
<< " . nextPrefix_ = " << ripple::strHex(std::string(1, nextPrefix_));
|
||||
|
||||
assert(nextPrefix_ > prefix || nextPrefix_ == 0x00);
|
||||
ASSERT(
|
||||
nextPrefix_ > prefix || nextPrefix_ == 0x00,
|
||||
"Next prefix must be greater than current prefix. Got: nextPrefix_ = {}, prefix = {}",
|
||||
nextPrefix_,
|
||||
prefix
|
||||
);
|
||||
|
||||
cur_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
next_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
@@ -76,25 +94,23 @@ public:
|
||||
grpc::CompletionQueue& cq,
|
||||
BackendInterface& backend,
|
||||
bool abort,
|
||||
bool cacheOnly = false)
|
||||
bool cacheOnly = false
|
||||
)
|
||||
{
|
||||
LOG(log_.trace()) << "Processing response. "
|
||||
<< "Marker prefix = " << getMarkerPrefix();
|
||||
if (abort)
|
||||
{
|
||||
if (abort) {
|
||||
LOG(log_.error()) << "AsyncCallData aborted";
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok())
|
||||
{
|
||||
if (!status_.ok()) {
|
||||
LOG(log_.error()) << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code() << " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited())
|
||||
{
|
||||
LOG(log_.warn()) << "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
if (!next_->is_unlimited()) {
|
||||
LOG(log_.warn()) << "AsyncCallData is_unlimited is false. "
|
||||
<< "Make sure secure_gateway is set correctly at the ETL source";
|
||||
}
|
||||
|
||||
std::swap(cur_, next_);
|
||||
@@ -102,18 +118,17 @@ public:
|
||||
bool more = true;
|
||||
|
||||
// if no marker returned, we are done
|
||||
if (cur_->marker().size() == 0)
|
||||
if (cur_->marker().empty())
|
||||
more = false;
|
||||
|
||||
// if returned marker is greater than our end, we are done
|
||||
unsigned char prefix = cur_->marker()[0];
|
||||
unsigned char const prefix = cur_->marker()[0];
|
||||
if (nextPrefix_ != 0x00 && prefix >= nextPrefix_)
|
||||
more = false;
|
||||
|
||||
// if we are not done, make the next async call
|
||||
if (more)
|
||||
{
|
||||
request_.set_marker(std::move(cur_->marker()));
|
||||
if (more) {
|
||||
request_.set_marker(cur_->marker());
|
||||
call(stub, cq);
|
||||
}
|
||||
|
||||
@@ -123,25 +138,23 @@ public:
|
||||
std::vector<data::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(numObjects);
|
||||
|
||||
for (int i = 0; i < numObjects; ++i)
|
||||
{
|
||||
for (int i = 0; i < numObjects; ++i) {
|
||||
auto& obj = *(cur_->mutable_ledger_objects()->mutable_objects(i));
|
||||
if (!more && nextPrefix_ != 0x00)
|
||||
{
|
||||
if (!more && nextPrefix_ != 0x00) {
|
||||
if (static_cast<unsigned char>(obj.key()[0]) >= nextPrefix_)
|
||||
continue;
|
||||
}
|
||||
cacheUpdates.push_back(
|
||||
{*ripple::uint256::fromVoidChecked(obj.key()),
|
||||
{obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
if (!cacheOnly)
|
||||
{
|
||||
if (lastKey_.size())
|
||||
{*ripple::uint256::fromVoidChecked(obj.key()), {obj.mutable_data()->begin(), obj.mutable_data()->end()}}
|
||||
);
|
||||
if (!cacheOnly) {
|
||||
if (!lastKey_.empty())
|
||||
backend.writeSuccessor(std::move(lastKey_), request_.ledger().sequence(), std::string{obj.key()});
|
||||
lastKey_ = obj.key();
|
||||
backend.writeNFTs(getNFTDataFromObj(request_.ledger().sequence(), obj.key(), obj.data()));
|
||||
backend.writeLedgerObject(
|
||||
std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data()));
|
||||
std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data())
|
||||
);
|
||||
}
|
||||
}
|
||||
backend.cache().update(cacheUpdates, request_.ledger().sequence(), cacheOnly);
|
||||
@@ -156,7 +169,8 @@ public:
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
|
||||
std::unique_ptr<grpc::ClientAsyncResponseReader<org::xrpl::rpc::v1::GetLedgerDataResponse>> rpc(
|
||||
stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq));
|
||||
stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq)
|
||||
);
|
||||
|
||||
rpc->StartCall();
|
||||
|
||||
@@ -166,10 +180,10 @@ public:
|
||||
std::string
|
||||
getMarkerPrefix()
|
||||
{
|
||||
if (next_->marker().size() == 0)
|
||||
if (next_->marker().empty()) {
|
||||
return "";
|
||||
else
|
||||
return ripple::strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
return ripple::strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
|
||||
std::string
|
||||
@@ -19,20 +19,48 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/algorithm/hex.hpp>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/core/buffers_to_string.hpp>
|
||||
#include <boost/beast/core/error.hpp>
|
||||
#include <boost/beast/core/flat_buffer.hpp>
|
||||
#include <boost/beast/core/string.hpp>
|
||||
#include <boost/beast/core/tcp_stream.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <boost/beast/websocket/stream.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <mutex>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <random>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
@@ -40,8 +68,11 @@ namespace etl::detail {
|
||||
* @brief Cache loading interface
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class CacheLoader
|
||||
{
|
||||
class CacheLoader {
|
||||
static constexpr size_t DEFAULT_NUM_CACHE_DIFFS = 32;
|
||||
static constexpr size_t DEFAULT_NUM_CACHE_MARKERS = 48;
|
||||
static constexpr size_t DEFAULT_CACHE_PAGE_FETCH_SIZE = 512;
|
||||
|
||||
enum class LoadStyle { ASYNC, SYNC, NOT_AT_ALL };
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
@@ -52,18 +83,17 @@ class CacheLoader
|
||||
LoadStyle cacheLoadStyle_ = LoadStyle::ASYNC;
|
||||
|
||||
// number of diffs to use to generate cursors to traverse the ledger in parallel during initial cache download
|
||||
size_t numCacheDiffs_ = 32;
|
||||
size_t numCacheDiffs_ = DEFAULT_NUM_CACHE_DIFFS;
|
||||
|
||||
// number of markers to use at one time to traverse the ledger in parallel during initial cache download
|
||||
size_t numCacheMarkers_ = 48;
|
||||
size_t numCacheMarkers_ = DEFAULT_NUM_CACHE_MARKERS;
|
||||
|
||||
// number of ledger objects to fetch concurrently per marker during cache download
|
||||
size_t cachePageFetchSize_ = 512;
|
||||
size_t cachePageFetchSize_ = DEFAULT_CACHE_PAGE_FETCH_SIZE;
|
||||
|
||||
struct ClioPeer
|
||||
{
|
||||
struct ClioPeer {
|
||||
std::string ip;
|
||||
int port;
|
||||
int port{};
|
||||
};
|
||||
|
||||
std::vector<ClioPeer> clioPeers_;
|
||||
@@ -76,14 +106,13 @@ public:
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> const& backend,
|
||||
CacheType& ledgerCache)
|
||||
CacheType& ledgerCache
|
||||
)
|
||||
: ioContext_{std::ref(ioc)}, backend_{backend}, cache_{ledgerCache}
|
||||
{
|
||||
if (config.contains("cache"))
|
||||
{
|
||||
if (config.contains("cache")) {
|
||||
auto const cache = config.section("cache");
|
||||
if (auto entry = cache.maybeValue<std::string>("load"); entry)
|
||||
{
|
||||
if (auto entry = cache.maybeValue<std::string>("load"); entry) {
|
||||
if (boost::iequals(*entry, "sync"))
|
||||
cacheLoadStyle_ = LoadStyle::SYNC;
|
||||
if (boost::iequals(*entry, "async"))
|
||||
@@ -96,10 +125,8 @@ public:
|
||||
numCacheMarkers_ = cache.valueOr<size_t>("num_markers", numCacheMarkers_);
|
||||
cachePageFetchSize_ = cache.valueOr<size_t>("page_fetch_size", cachePageFetchSize_);
|
||||
|
||||
if (auto peers = cache.maybeArray("peers"); peers)
|
||||
{
|
||||
for (auto const& peer : *peers)
|
||||
{
|
||||
if (auto peers = cache.maybeArray("peers"); peers) {
|
||||
for (auto const& peer : *peers) {
|
||||
auto ip = peer.value<std::string>("ip");
|
||||
auto port = peer.value<uint32_t>("port");
|
||||
|
||||
@@ -107,7 +134,7 @@ public:
|
||||
clioPeers_.push_back({ip, port});
|
||||
}
|
||||
|
||||
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
|
||||
unsigned const seed = std::chrono::system_clock::now().time_since_epoch().count();
|
||||
std::shuffle(std::begin(clioPeers_), std::end(clioPeers_), std::default_random_engine(seed));
|
||||
}
|
||||
}
|
||||
@@ -129,24 +156,17 @@ public:
|
||||
void
|
||||
load(uint32_t seq)
|
||||
{
|
||||
if (cacheLoadStyle_ == LoadStyle::NOT_AT_ALL)
|
||||
{
|
||||
if (cacheLoadStyle_ == LoadStyle::NOT_AT_ALL) {
|
||||
cache_.get().setDisabled();
|
||||
LOG(log_.warn()) << "Cache is disabled. Not loading";
|
||||
return;
|
||||
}
|
||||
|
||||
if (cache_.get().isFull())
|
||||
{
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
ASSERT(!cache_.get().isFull(), "Cache must not be full. seq = {}", seq);
|
||||
|
||||
if (clioPeers_.size() > 0)
|
||||
{
|
||||
if (!clioPeers_.empty()) {
|
||||
boost::asio::spawn(ioContext_.get(), [this, seq](boost::asio::yield_context yield) {
|
||||
for (auto const& peer : clioPeers_)
|
||||
{
|
||||
for (auto const& peer : clioPeers_) {
|
||||
// returns true on success
|
||||
if (loadCacheFromClioPeer(seq, peer.ip, std::to_string(peer.port), yield))
|
||||
return;
|
||||
@@ -157,16 +177,14 @@ public:
|
||||
});
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
loadCacheFromDb(seq);
|
||||
}
|
||||
|
||||
loadCacheFromDb(seq);
|
||||
|
||||
// If loading synchronously, poll cache until full
|
||||
while (cacheLoadStyle_ == LoadStyle::SYNC && not cache_.get().isFull())
|
||||
{
|
||||
static constexpr size_t SLEEP_TIME_SECONDS = 10;
|
||||
while (cacheLoadStyle_ == LoadStyle::SYNC && not cache_.get().isFull()) {
|
||||
LOG(log_.debug()) << "Cache not full. Cache size = " << cache_.get().size() << ". Sleeping ...";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(10));
|
||||
std::this_thread::sleep_for(std::chrono::seconds(SLEEP_TIME_SECONDS));
|
||||
if (cache_.get().isFull())
|
||||
LOG(log_.info()) << "Cache is full. Cache size = " << cache_.get().size();
|
||||
}
|
||||
@@ -184,17 +202,16 @@ private:
|
||||
uint32_t ledgerIndex,
|
||||
std::string const& ip,
|
||||
std::string const& port,
|
||||
boost::asio::yield_context yield)
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
LOG(log_.info()) << "Loading cache from peer. ip = " << ip << " . port = " << port;
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
namespace http = beast::http; // from <boost/beast/http.hpp>
|
||||
namespace websocket = beast::websocket; // from
|
||||
namespace net = boost::asio; // from
|
||||
using tcp = boost::asio::ip::tcp; // from
|
||||
try
|
||||
{
|
||||
boost::beast::error_code ec;
|
||||
try {
|
||||
beast::error_code ec;
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioContext_.get()};
|
||||
|
||||
@@ -221,13 +238,15 @@ private:
|
||||
std::optional<boost::json::value> marker;
|
||||
|
||||
LOG(log_.trace()) << "Sending request";
|
||||
static constexpr int LIMIT = 2048;
|
||||
auto getRequest = [&](auto marker) {
|
||||
boost::json::object request = {
|
||||
{"command", "ledger_data"},
|
||||
{"ledger_index", ledgerIndex},
|
||||
{"binary", true},
|
||||
{"out_of_order", true},
|
||||
{"limit", 2048}};
|
||||
{"limit", LIMIT}
|
||||
};
|
||||
|
||||
if (marker)
|
||||
request["marker"] = *marker;
|
||||
@@ -236,20 +255,17 @@ private:
|
||||
|
||||
bool started = false;
|
||||
size_t numAttempts = 0;
|
||||
do
|
||||
{
|
||||
do {
|
||||
// Send the message
|
||||
ws->async_write(net::buffer(boost::json::serialize(getRequest(marker))), yield[ec]);
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "error writing = " << ec.message();
|
||||
return false;
|
||||
}
|
||||
|
||||
beast::flat_buffer buffer;
|
||||
ws->async_read(buffer, yield[ec]);
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "error reading = " << ec.message();
|
||||
return false;
|
||||
}
|
||||
@@ -257,26 +273,22 @@ private:
|
||||
auto raw = beast::buffers_to_string(buffer.data());
|
||||
auto parsed = boost::json::parse(raw);
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
if (!parsed.is_object()) {
|
||||
LOG(log_.error()) << "Error parsing response: " << raw;
|
||||
return false;
|
||||
}
|
||||
LOG(log_.trace()) << "Successfully parsed response " << parsed;
|
||||
|
||||
if (auto const& response = parsed.as_object(); response.contains("error"))
|
||||
{
|
||||
if (auto const& response = parsed.as_object(); response.contains("error")) {
|
||||
LOG(log_.error()) << "Response contains error: " << response;
|
||||
auto const& err = response.at("error");
|
||||
if (err.is_string() && err.as_string() == "lgrNotFound")
|
||||
{
|
||||
if (err.is_string() && err.as_string() == "lgrNotFound") {
|
||||
static constexpr size_t MAX_ATTEMPTS = 5;
|
||||
++numAttempts;
|
||||
if (numAttempts >= 5)
|
||||
{
|
||||
LOG(log_.error()) << " ledger not found at peer after 5 attempts. "
|
||||
"peer = "
|
||||
<< ip << " ledger = " << ledgerIndex
|
||||
<< ". Check your config and the health of the peer";
|
||||
if (numAttempts >= MAX_ATTEMPTS) {
|
||||
LOG(log_.error())
|
||||
<< " ledger not found at peer after 5 attempts. peer = " << ip
|
||||
<< " ledger = " << ledgerIndex << ". Check your config and the health of the peer";
|
||||
return false;
|
||||
}
|
||||
LOG(log_.warn()) << "Ledger not found. ledger = " << ledgerIndex
|
||||
@@ -289,28 +301,26 @@ private:
|
||||
started = true;
|
||||
auto const& response = parsed.as_object()["result"].as_object();
|
||||
|
||||
if (!response.contains("cache_full") || !response.at("cache_full").as_bool())
|
||||
{
|
||||
if (!response.contains("cache_full") || !response.at("cache_full").as_bool()) {
|
||||
LOG(log_.error()) << "cache not full for clio node. ip = " << ip;
|
||||
return false;
|
||||
}
|
||||
if (response.contains("marker"))
|
||||
if (response.contains("marker")) {
|
||||
marker = response.at("marker");
|
||||
else
|
||||
} else {
|
||||
marker = {};
|
||||
}
|
||||
|
||||
auto const& state = response.at("state").as_array();
|
||||
|
||||
std::vector<data::LedgerObject> objects;
|
||||
objects.reserve(state.size());
|
||||
for (auto const& ledgerObject : state)
|
||||
{
|
||||
for (auto const& ledgerObject : state) {
|
||||
auto const& obj = ledgerObject.as_object();
|
||||
|
||||
data::LedgerObject stateObject = {};
|
||||
|
||||
if (!stateObject.key.parseHex(obj.at("index").as_string().c_str()))
|
||||
{
|
||||
if (!stateObject.key.parseHex(obj.at("index").as_string().c_str())) {
|
||||
LOG(log_.error()) << "failed to parse object id";
|
||||
return false;
|
||||
}
|
||||
@@ -327,9 +337,7 @@ private:
|
||||
|
||||
cache_.get().setFull();
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.error()) << "Encountered exception : " << e.what() << " - ip = " << ip;
|
||||
return false;
|
||||
}
|
||||
@@ -343,8 +351,7 @@ private:
|
||||
|
||||
auto append = [](auto&& a, auto&& b) { a.insert(std::end(a), std::begin(b), std::end(b)); };
|
||||
|
||||
for (size_t i = 0; i < numCacheDiffs_; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < numCacheDiffs_; ++i) {
|
||||
append(diff, data::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
return backend_->fetchLedgerDiff(seq - i, yield);
|
||||
}));
|
||||
@@ -356,16 +363,18 @@ private:
|
||||
|
||||
diff.erase(std::unique(diff.begin(), diff.end(), [](auto a, auto b) { return a.key == b.key; }), diff.end());
|
||||
|
||||
cursors.push_back({});
|
||||
for (auto const& obj : diff)
|
||||
if (obj.blob.size())
|
||||
cursors.push_back({obj.key});
|
||||
cursors.push_back({});
|
||||
cursors.emplace_back();
|
||||
for (auto const& obj : diff) {
|
||||
if (!obj.blob.empty())
|
||||
cursors.emplace_back(obj.key);
|
||||
}
|
||||
cursors.emplace_back();
|
||||
|
||||
std::stringstream cursorStr;
|
||||
for (auto const& c : cursors)
|
||||
for (auto const& c : cursors) {
|
||||
if (c)
|
||||
cursorStr << ripple::strHex(*c) << ", ";
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Loading cache. num cursors = " << cursors.size() - 1;
|
||||
LOG(log_.trace()) << "cursors = " << cursorStr.str();
|
||||
@@ -375,8 +384,7 @@ private:
|
||||
auto markers = std::make_shared<std::atomic_int>(0);
|
||||
auto numRemaining = std::make_shared<std::atomic_int>(cursors.size() - 1);
|
||||
|
||||
for (size_t i = 0; i < cursors.size() - 1; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < cursors.size() - 1; ++i) {
|
||||
auto const start = cursors.at(i);
|
||||
auto const end = cursors.at(i + 1);
|
||||
|
||||
@@ -391,8 +399,7 @@ private:
|
||||
cursor.has_value() ? ripple::strHex(cursor.value()) : ripple::strHex(data::firstKey);
|
||||
LOG(log_.debug()) << "Starting a cursor: " << cursorStr << " markers = " << *markers;
|
||||
|
||||
while (not stopping_)
|
||||
{
|
||||
while (not stopping_) {
|
||||
auto res = data::retryOnTimeout([this, seq, &cursor, yield]() {
|
||||
return backend_->fetchLedgerPage(cursor, seq, cachePageFetchSize_, false, yield);
|
||||
});
|
||||
@@ -412,21 +419,19 @@ private:
|
||||
--(*markers);
|
||||
markers->notify_one();
|
||||
|
||||
if (--(*numRemaining) == 0)
|
||||
{
|
||||
if (--(*numRemaining) == 0) {
|
||||
auto endTime = std::chrono::system_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::seconds>(endTime - startTime);
|
||||
|
||||
LOG(log_.info()) << "Finished loading cache. cache size = " << cache_.get().size()
|
||||
<< ". Took " << duration.count() << " seconds";
|
||||
cache_.get().setFull();
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.info()) << "Finished a cursor. num remaining = " << *numRemaining
|
||||
<< " start = " << cursorStr << " markers = " << *markers;
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
}};
|
||||
}
|
||||
@@ -19,10 +19,13 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
namespace etl::detail {
|
||||
@@ -31,8 +34,7 @@ namespace etl::detail {
|
||||
* @brief A collection of thread safe async queues used by Extractor and Transformer to communicate
|
||||
*/
|
||||
template <typename RawDataType>
|
||||
class ExtractionDataPipe
|
||||
{
|
||||
class ExtractionDataPipe {
|
||||
public:
|
||||
using DataType = std::optional<RawDataType>;
|
||||
using QueueType = ThreadSafeQueue<DataType>; // TODO: probably should use boost::lockfree::queue instead?
|
||||
@@ -19,15 +19,20 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/SystemState.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <mutex>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
@@ -35,8 +40,7 @@ namespace etl::detail {
|
||||
* @brief Extractor thread that is fetching GRPC data and enqueue it on the DataPipeType
|
||||
*/
|
||||
template <typename DataPipeType, typename NetworkValidatedLedgersType, typename LedgerFetcherType>
|
||||
class Extractor
|
||||
{
|
||||
class Extractor {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::reference_wrapper<DataPipeType> pipe_;
|
||||
@@ -55,9 +59,10 @@ public:
|
||||
LedgerFetcherType& ledgerFetcher,
|
||||
uint32_t startSequence,
|
||||
std::optional<uint32_t> finishSequence,
|
||||
SystemState const& state)
|
||||
SystemState const& state
|
||||
)
|
||||
: pipe_(std::ref(pipe))
|
||||
, networkValidatedLedgers_{networkValidatedLedgers}
|
||||
, networkValidatedLedgers_{std::move(networkValidatedLedgers)}
|
||||
, ledgerFetcher_{std::ref(ledgerFetcher)}
|
||||
, startSequence_{startSequence}
|
||||
, finishSequence_{finishSequence}
|
||||
@@ -75,7 +80,7 @@ public:
|
||||
void
|
||||
waitTillFinished()
|
||||
{
|
||||
assert(thread_.joinable());
|
||||
ASSERT(thread_.joinable(), "Extractor thread must be joinable");
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
@@ -88,16 +93,17 @@ private:
|
||||
double totalTime = 0.0;
|
||||
auto currentSequence = startSequence_;
|
||||
|
||||
while (!shouldFinish(currentSequence) && networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence))
|
||||
{
|
||||
auto [fetchResponse, time] = ::util::timed<std::chrono::duration<double>>(
|
||||
[this, currentSequence]() { return ledgerFetcher_.get().fetchDataAndDiff(currentSequence); });
|
||||
while (!shouldFinish(currentSequence) && networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence)
|
||||
) {
|
||||
auto [fetchResponse, time] = ::util::timed<std::chrono::duration<double>>([this, currentSequence]() {
|
||||
return ledgerFetcher_.get().fetchDataAndDiff(currentSequence);
|
||||
});
|
||||
totalTime += time;
|
||||
|
||||
// if the fetch is unsuccessful, stop. fetchLedger only returns false if the server is shutting down, or if
|
||||
// the ledger was found in the database (which means another process already wrote the ledger that this
|
||||
// process was trying to extract; this is a form of a write conflict).
|
||||
// Otherwise, fetchDataAndDiff will keep trying to fetch the specified ledger until successful.
|
||||
// if the fetch is unsuccessful, stop. fetchLedger only returns false if the server is shutting down, or
|
||||
// if the ledger was found in the database (which means another process already wrote the ledger that
|
||||
// this process was trying to extract; this is a form of a write conflict). Otherwise, fetchDataAndDiff
|
||||
// will keep trying to fetch the specified ledger until successful.
|
||||
if (!fetchResponse)
|
||||
break;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user