mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 11:55:51 +00:00
Compare commits
65 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bacad9e49 | ||
|
|
ca16858878 | ||
|
|
feae85782c | ||
|
|
b016c1d7ba | ||
|
|
0597a9d685 | ||
|
|
05bea6a971 | ||
|
|
fa660ef400 | ||
|
|
25d9e3cc36 | ||
|
|
58f13e1660 | ||
|
|
a16b680a7a | ||
|
|
320ebaa5d2 | ||
|
|
058df4d12a | ||
|
|
5145d07693 | ||
|
|
5e9e5f6f65 | ||
|
|
1ce7bcbc28 | ||
|
|
243858df12 | ||
|
|
b363cc93af | ||
|
|
200d97f0de | ||
|
|
1ec5d3e5a3 | ||
|
|
e062121917 | ||
|
|
1aab2b94b1 | ||
|
|
5de87b9ef8 | ||
|
|
398db13f4d | ||
|
|
5e8ffb66b4 | ||
|
|
939740494b | ||
|
|
ff3d2b5600 | ||
|
|
7080b4d549 | ||
|
|
8d783ecd6a | ||
|
|
5e6682ddc7 | ||
|
|
fca29694a0 | ||
|
|
a541e6d00e | ||
|
|
9bd38dd290 | ||
|
|
f683b25f76 | ||
|
|
91ad1ffc3b | ||
|
|
64b4a908da | ||
|
|
ac752c656e | ||
|
|
4fe868aaeb | ||
|
|
59eb40a1f2 | ||
|
|
0b5f667e4a | ||
|
|
fa42c5c900 | ||
|
|
0818b6ce5b | ||
|
|
e2cc56d25a | ||
|
|
caaa01bf0f | ||
|
|
4b53bef1f5 | ||
|
|
69f5025a29 | ||
|
|
d1c41a8bb7 | ||
|
|
207ba51461 | ||
|
|
ebe7688ccb | ||
|
|
6d9f8a7ead | ||
|
|
6ca777ea96 | ||
|
|
963685dd31 | ||
|
|
e36545058d | ||
|
|
44527140f0 | ||
|
|
0eaaa1fb31 | ||
|
|
1846f629a5 | ||
|
|
83af5af3c6 | ||
|
|
418a0ddbf2 | ||
|
|
6cfbfda014 | ||
|
|
91648f98ad | ||
|
|
71e1637c5f | ||
|
|
59cd2ce5aa | ||
|
|
d783edd57a | ||
|
|
1ce8a58167 | ||
|
|
92e5c4792b | ||
|
|
d7f36733bc |
@@ -1,7 +1,7 @@
|
||||
---
|
||||
Language: Cpp
|
||||
AccessModifierOffset: -4
|
||||
AlignAfterOpenBracket: AlwaysBreak
|
||||
AlignAfterOpenBracket: BlockIndent
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
AlignEscapedNewlinesLeft: true
|
||||
@@ -18,20 +18,8 @@ AlwaysBreakBeforeMultilineStrings: true
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
BraceWrapping:
|
||||
AfterClass: true
|
||||
AfterControlStatement: true
|
||||
AfterEnum: false
|
||||
AfterFunction: true
|
||||
AfterNamespace: false
|
||||
AfterObjCDeclaration: true
|
||||
AfterStruct: true
|
||||
AfterUnion: true
|
||||
BeforeCatch: true
|
||||
BeforeElse: true
|
||||
IndentBraces: false
|
||||
BreakBeforeBinaryOperators: false
|
||||
BreakBeforeBraces: Custom
|
||||
BreakBeforeBraces: WebKit
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: true
|
||||
ColumnLimit: 120
|
||||
@@ -43,6 +31,7 @@ Cpp11BracedListStyle: true
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
FixNamespaceComments: true
|
||||
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
|
||||
IncludeCategories:
|
||||
- Regex: '^<(BeastConfig)'
|
||||
@@ -58,6 +47,8 @@ IndentCaseLabels: true
|
||||
IndentFunctionDeclarationAfterType: false
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
IndentRequiresClause: true
|
||||
RequiresClausePosition: OwnLine
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
NamespaceIndentation: None
|
||||
@@ -70,6 +61,7 @@ PenaltyBreakString: 1000
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 200
|
||||
PointerAlignment: Left
|
||||
QualifierAlignment: Right
|
||||
ReflowComments: true
|
||||
SortIncludes: true
|
||||
SpaceAfterCStyleCast: false
|
||||
|
||||
118
.clang-tidy
Normal file
118
.clang-tidy
Normal file
@@ -0,0 +1,118 @@
|
||||
---
|
||||
Checks: '-*,
|
||||
bugprone-argument-comment,
|
||||
bugprone-assert-side-effect,
|
||||
bugprone-bad-signal-to-kill-thread,
|
||||
bugprone-bool-pointer-implicit-conversion,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-dangling-handle,
|
||||
bugprone-dynamic-static-initializers,
|
||||
bugprone-fold-init-type,
|
||||
bugprone-forward-declaration-namespace,
|
||||
bugprone-inaccurate-erase,
|
||||
bugprone-incorrect-roundings,
|
||||
bugprone-infinite-loop,
|
||||
bugprone-integer-division,
|
||||
bugprone-lambda-function-name,
|
||||
bugprone-macro-parentheses,
|
||||
bugprone-macro-repeated-side-effects,
|
||||
bugprone-misplaced-operator-in-strlen-in-alloc,
|
||||
bugprone-misplaced-pointer-arithmetic-in-alloc,
|
||||
bugprone-misplaced-widening-cast,
|
||||
bugprone-move-forwarding-reference,
|
||||
bugprone-multiple-statement-macro,
|
||||
bugprone-no-escape,
|
||||
bugprone-parent-virtual-call,
|
||||
bugprone-posix-return,
|
||||
bugprone-redundant-branch-condition,
|
||||
bugprone-shared-ptr-array-mismatch,
|
||||
bugprone-signal-handler,
|
||||
bugprone-signed-char-misuse,
|
||||
bugprone-sizeof-container,
|
||||
bugprone-sizeof-expression,
|
||||
bugprone-spuriously-wake-up-functions,
|
||||
bugprone-standalone-empty,
|
||||
bugprone-string-constructor,
|
||||
bugprone-string-integer-assignment,
|
||||
bugprone-string-literal-with-embedded-nul,
|
||||
bugprone-stringview-nullptr,
|
||||
bugprone-suspicious-enum-usage,
|
||||
bugprone-suspicious-include,
|
||||
bugprone-suspicious-memory-comparison,
|
||||
bugprone-suspicious-memset-usage,
|
||||
bugprone-suspicious-missing-comma,
|
||||
bugprone-suspicious-realloc-usage,
|
||||
bugprone-suspicious-semicolon,
|
||||
bugprone-suspicious-string-compare,
|
||||
bugprone-swapped-arguments,
|
||||
bugprone-terminating-continue,
|
||||
bugprone-throw-keyword-missing,
|
||||
bugprone-too-small-loop-variable,
|
||||
bugprone-undefined-memory-manipulation,
|
||||
bugprone-undelegated-constructor,
|
||||
bugprone-unhandled-exception-at-new,
|
||||
bugprone-unhandled-self-assignment,
|
||||
bugprone-unused-raii,
|
||||
bugprone-unused-return-value,
|
||||
bugprone-use-after-move,
|
||||
bugprone-virtual-near-miss,
|
||||
cppcoreguidelines-init-variables,
|
||||
cppcoreguidelines-prefer-member-initializer,
|
||||
cppcoreguidelines-pro-type-member-init,
|
||||
cppcoreguidelines-pro-type-static-cast-downcast,
|
||||
cppcoreguidelines-virtual-class-destructor,
|
||||
llvm-namespace-comment,
|
||||
misc-const-correctness,
|
||||
misc-definitions-in-headers,
|
||||
misc-misplaced-const,
|
||||
misc-redundant-expression,
|
||||
misc-static-assert,
|
||||
misc-throw-by-value-catch-by-reference,
|
||||
misc-unused-alias-decls,
|
||||
misc-unused-using-decls,
|
||||
modernize-concat-nested-namespaces,
|
||||
modernize-deprecated-headers,
|
||||
modernize-make-shared,
|
||||
modernize-make-unique,
|
||||
modernize-pass-by-value,
|
||||
modernize-use-emplace,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-override,
|
||||
modernize-use-using,
|
||||
performance-faster-string-find,
|
||||
performance-for-range-copy,
|
||||
performance-implicit-conversion-in-loop,
|
||||
performance-inefficient-vector-operation,
|
||||
performance-move-const-arg,
|
||||
performance-move-constructor-init,
|
||||
performance-no-automatic-move,
|
||||
performance-trivially-destructible,
|
||||
readability-avoid-const-params-in-decls,
|
||||
readability-braces-around-statements,
|
||||
readability-const-return-type,
|
||||
readability-container-contains,
|
||||
readability-container-size-empty,
|
||||
readability-convert-member-functions-to-static,
|
||||
readability-duplicate-include,
|
||||
readability-else-after-return,
|
||||
readability-implicit-bool-conversion,
|
||||
readability-inconsistent-declaration-parameter-name,
|
||||
readability-make-member-function-const,
|
||||
readability-misleading-indentation,
|
||||
readability-non-const-parameter,
|
||||
readability-redundant-declaration,
|
||||
readability-redundant-member-init,
|
||||
readability-redundant-string-init,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-suspicious-call-argument
|
||||
'
|
||||
|
||||
CheckOptions:
|
||||
readability-braces-around-statements.ShortStatementLines: 2
|
||||
|
||||
HeaderFilterRegex: '^.*/(src|unitests)/.*\.(h|hpp)$'
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
@@ -4,7 +4,22 @@ exec 1>&2
|
||||
|
||||
# paths to check and re-format
|
||||
sources="src unittests"
|
||||
formatter="clang-format-11 -i"
|
||||
formatter="clang-format -i"
|
||||
version=$($formatter --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "16.0.0" > "$version" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 16 of `clang-format` is required.
|
||||
Your version is $version.
|
||||
Please fix paths and run again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 2
|
||||
fi
|
||||
|
||||
first=$(git diff $sources)
|
||||
find $sources -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
|
||||
|
||||
37
.github/actions/build_clio/action.yml
vendored
Normal file
37
.github/actions/build_clio/action.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
default: default
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build Clio
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
LINT: "${{ runner.os == 'Linux' && 'True' || 'False' }}"
|
||||
run: |
|
||||
mkdir -p build
|
||||
cd build
|
||||
threads_num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=Release -o clio:tests=True -o clio:lint=$LINT --profile ${{ inputs.conan_profile }}
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -G Ninja
|
||||
cmake --build . --parallel $threads_num
|
||||
27
.github/actions/clang_format/action.yml
vendored
Normal file
27
.github/actions/clang_format/action.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Check format
|
||||
description: Check format using clang-format-16
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Add llvm repo
|
||||
run: |
|
||||
echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main' | sudo tee -a /etc/apt/sources.list
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
shell: bash
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
sudo apt update -qq
|
||||
sudo apt install -y jq clang-format-16
|
||||
shell: bash
|
||||
|
||||
- name: Run formatter
|
||||
run: |
|
||||
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-16 -i
|
||||
shell: bash
|
||||
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
shell: bash
|
||||
run: |
|
||||
git diff --color --exit-code | tee "clang-format.patch"
|
||||
14
.github/actions/git_common_ancestor/action.yml
vendored
Normal file
14
.github/actions/git_common_ancestor/action.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Git common ancestor
|
||||
description: Find the closest common commit
|
||||
outputs:
|
||||
commit:
|
||||
description: Hash of commit
|
||||
value: ${{ steps.find_common_ancestor.outputs.commit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common git ancestor
|
||||
id: find_common_ancestor
|
||||
shell: bash
|
||||
run: |
|
||||
echo "commit=$(git merge-base --fork-point origin/develop)" >> $GITHUB_OUTPUT
|
||||
13
.github/actions/lint/action.yml
vendored
13
.github/actions/lint/action.yml
vendored
@@ -1,13 +0,0 @@
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Github's ubuntu-20.04 image already has clang-format-11 installed
|
||||
- run: |
|
||||
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i
|
||||
shell: bash
|
||||
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
shell: bash
|
||||
run: |
|
||||
git diff --color --exit-code | tee "clang-format.patch"
|
||||
20
.github/actions/linux_build/build.sh
vendored
20
.github/actions/linux_build/build.sh
vendored
@@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
conan profile new default --detect
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
|
||||
cd rippled
|
||||
conan export external/snappy snappy/1.1.10@
|
||||
conan export external/soci soci/4.0.3@
|
||||
conan export .
|
||||
conan install --output-folder build_rippled -install-folder build_rippled --build missing --settings build_type=Release
|
||||
cmake -B build_rippled -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release
|
||||
cmake --build build_rippled --target xrpl_core --parallel $(($(nproc) - 2))
|
||||
cd ..
|
||||
|
||||
conan export external/cassandra
|
||||
conan install . -if build_clio -of build_clio --build missing --settings build_type=Release -o tests=True
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -B build_clio
|
||||
cmake --build build_clio --parallel $(($(nproc) - 2))
|
||||
50
.github/actions/restore_cache/action.yml
vendored
Normal file
50
.github/actions/restore_cache/action.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Restore cache
|
||||
description: Find and restores conan and ccache cache
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
outputs:
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
value: ${{ steps.conan_hash.outputs.hash }}
|
||||
conan_cache_hit:
|
||||
description: True if conan cache has been downloaded
|
||||
value: ${{ steps.conan_cache.outputs.cache-hit }}
|
||||
ccache_cache_hit:
|
||||
description: True if ccache cache has been downloaded
|
||||
value: ${{ steps.ccache_cache.outputs.cache-hit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Calculate conan hash
|
||||
id: conan_hash
|
||||
shell: bash
|
||||
run: |
|
||||
conan info . -j info.json -o clio:tests=True
|
||||
packages_info=$(cat info.json | jq '.[] | "\(.display_name): \(.id)"' | grep -v 'clio')
|
||||
echo "$packages_info"
|
||||
hash=$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)
|
||||
rm info.json
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore conan cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: conan_cache
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: ccache_cache
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
46
.github/actions/save_cache/action.yml
vendored
Normal file
46
.github/actions/save_cache/action.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Save cache
|
||||
description: Save conan and ccache cache for develop branch
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
ccache_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Cleanup conan directory from extra data
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
conan remove "*" -s -b -f
|
||||
|
||||
- name: Save conan cache
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-develop-${{ inputs.conan_hash }}
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
|
||||
|
||||
55
.github/actions/setup_conan/action.yml
vendored
Normal file
55
.github/actions/setup_conan/action.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Setup conan
|
||||
description: Setup conan profile and artifactory
|
||||
outputs:
|
||||
conan_profile:
|
||||
description: Created conan profile name
|
||||
value: ${{ steps.conan_export_output.outputs.conan_profile }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: On mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_PROFILE: clio_clang_14
|
||||
id: conan_setup_mac
|
||||
run: |
|
||||
echo "Creating $CONAN_PROFILE conan profile";
|
||||
clang_path="$(brew --prefix llvm@14)/bin/clang"
|
||||
clang_cxx_path="$(brew --prefix llvm@14)/bin/clang++"
|
||||
conan profile new $CONAN_PROFILE --detect --force
|
||||
conan profile update settings.compiler=clang $CONAN_PROFILE
|
||||
conan profile update settings.compiler.version=14 $CONAN_PROFILE
|
||||
conan profile update settings.compiler.cppstd=20 $CONAN_PROFILE
|
||||
conan profile update "conf.tools.build:compiler_executables={\"c\": \"$clang_path\", \"cpp\": \"$clang_cxx_path\"}" $CONAN_PROFILE
|
||||
conan profile update env.CC="$clang_path" $CONAN_PROFILE
|
||||
conan profile update env.CXX="$clang_cxx_path" $CONAN_PROFILE
|
||||
echo "created_conan_profile=$CONAN_PROFILE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: On linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
id: conan_setup_linux
|
||||
run: |
|
||||
conan profile new default --detect
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
echo "created_conan_profile=default" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
id: conan_export_output
|
||||
run: |
|
||||
echo "conan_profile=${{ steps.conan_setup_mac.outputs.created_conan_profile || steps.conan_setup_linux.outputs.created_conan_profile }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add conan-non-prod artifactory
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z $(conan remote list | grep conan-non-prod) ]]; then
|
||||
echo "Adding conan-non-prod"
|
||||
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
else
|
||||
echo "Conan-non-prod is available"
|
||||
fi
|
||||
|
||||
|
||||
150
.github/workflows/build.yml
vendored
150
.github/workflows/build.yml
vendored
@@ -13,103 +13,147 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run clang-format
|
||||
uses: ./.github/actions/lint
|
||||
uses: ./.github/actions/clang_format
|
||||
|
||||
build_mac:
|
||||
name: Build macOS
|
||||
needs: lint
|
||||
continue-on-error: true
|
||||
runs-on: [self-hosted, macOS]
|
||||
env:
|
||||
CCACHE_DIR: ${{ github.workspace }}/.ccache
|
||||
CONAN_USER_HOME: ${{ github.workspace }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio
|
||||
fetch-depth: 0
|
||||
|
||||
- name: List conan artifactory
|
||||
- name: Install packages
|
||||
run: |
|
||||
conan search
|
||||
conan remote list
|
||||
if [[ $(conan remote list |grep conan-non-prod| wc -c) -ne 0 ]]; then
|
||||
echo "conan-non-prod is available"
|
||||
else
|
||||
echo "adding conan-non-prod"
|
||||
conan remote add conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
fi
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
|
||||
- name: Setup environment for llvm-14
|
||||
run: |
|
||||
export PATH="/usr/local/opt/llvm@14/bin:$PATH"
|
||||
export LDFLAGS="-L/usr/local/opt/llvm@14/lib -L/usr/local/opt/llvm@14/lib/c++ -Wl,-rpath,/usr/local/opt/llvm@14/lib/c++"
|
||||
export CPPFLAGS="-I/usr/local/opt/llvm@14/include"
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Build Clio
|
||||
run: |
|
||||
cd clio
|
||||
mkdir -p build
|
||||
cd build
|
||||
conan install .. -of . -b missing -s build_type=Release -o clio:tests=True
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel $(($(sysctl -n hw.logicalcpu) - 2))
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_mac
|
||||
path: ./clio/build/clio_tests
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
|
||||
build_linux:
|
||||
name: Build linux
|
||||
needs: lint
|
||||
continue-on-error: true
|
||||
runs-on: [self-hosted, Linux]
|
||||
container:
|
||||
image: conanio/gcc11:1.60.2
|
||||
image: conanio/gcc11:1.61.0
|
||||
options: --user root
|
||||
env:
|
||||
CCACHE_DIR: /root/.ccache
|
||||
CONAN_USER_HOME: /root/
|
||||
steps:
|
||||
- name: Get Clio
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get rippled
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: thejohnfreeman/rippled
|
||||
ref: clio
|
||||
path: rippled
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Add llvm repo
|
||||
run: |
|
||||
echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main' >> /etc/apt/sources.list
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
apt update -qq
|
||||
apt install -y jq clang-tidy-16
|
||||
|
||||
- name: Install ccache
|
||||
run: |
|
||||
wget https://github.com/ccache/ccache/releases/download/v4.8.3/ccache-4.8.3-linux-x86_64.tar.xz
|
||||
tar xf ./ccache-4.8.3-linux-x86_64.tar.xz
|
||||
mv ./ccache-4.8.3-linux-x86_64/ccache /usr/bin/ccache
|
||||
|
||||
- name: Fix git permissions
|
||||
run: git config --global --add safe.directory $PWD
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Build Clio
|
||||
run: |
|
||||
./.github/actions/linux_build/build.sh
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_linux
|
||||
path: ./build_clio/clio_tests
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
|
||||
test_mac:
|
||||
needs: build_mac
|
||||
runs-on: [self-hosted, macOS]
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_mac
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_mac
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
test_linux:
|
||||
needs: build_linux
|
||||
runs-on: [self-hosted, x-heavy]
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_linux
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_linux
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
*clio*.log
|
||||
build*/
|
||||
/build*/
|
||||
.build
|
||||
.cache
|
||||
.vscode
|
||||
.python-version
|
||||
CMakeUserPresets.json
|
||||
|
||||
5
CMake/Ccache.cmake
Normal file
5
CMake/Ccache.cmake
Normal file
@@ -0,0 +1,5 @@
|
||||
find_program (CCACHE_PATH "ccache")
|
||||
if (CCACHE_PATH)
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
|
||||
message (STATUS "Using ccache: ${CCACHE_PATH}")
|
||||
endif ()
|
||||
31
CMake/ClangTidy.cmake
Normal file
31
CMake/ClangTidy.cmake
Normal file
@@ -0,0 +1,31 @@
|
||||
if (lint)
|
||||
|
||||
# Find clang-tidy binary
|
||||
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
|
||||
set (_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
|
||||
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
|
||||
message (FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
|
||||
endif ()
|
||||
message (STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||
else ()
|
||||
find_program (_CLANG_TIDY_BIN NAMES "clang-tidy-16" "clang-tidy" REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (NOT _CLANG_TIDY_BIN)
|
||||
message (FATAL_ERROR
|
||||
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy.")
|
||||
endif ()
|
||||
|
||||
# Support for https://github.com/matus-chochlik/ctcache
|
||||
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
|
||||
if (CLANG_TIDY_CACHE_PATH)
|
||||
set (_CLANG_TIDY_CMD
|
||||
"${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
|
||||
CACHE STRING "A combined command to run clang-tidy with caching wrapper")
|
||||
else ()
|
||||
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
|
||||
endif ()
|
||||
|
||||
set (CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
|
||||
message (STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
|
||||
endif ()
|
||||
@@ -5,21 +5,27 @@
|
||||
find_package (Git REQUIRED)
|
||||
|
||||
set (GIT_COMMAND rev-parse --short HEAD)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
set (GIT_COMMAND branch --show-current)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if (BRANCH STREQUAL "")
|
||||
set (BRANCH "dev")
|
||||
endif ()
|
||||
|
||||
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-ref>
|
||||
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-rev>
|
||||
execute_process (COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
else ()
|
||||
set (GIT_COMMAND describe --tags)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (VERSION "${TAG_VERSION}-${REV}")
|
||||
endif ()
|
||||
|
||||
|
||||
@@ -1,7 +1,45 @@
|
||||
target_compile_options (clio PUBLIC
|
||||
set(COMPILER_FLAGS
|
||||
-Wall
|
||||
-Wcast-align
|
||||
-Wdouble-promotion
|
||||
-Wextra
|
||||
-Werror
|
||||
-Wformat=2
|
||||
-Wimplicit-fallthrough
|
||||
-Wmisleading-indentation
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else
|
||||
-Wno-unused-but-set-variable)
|
||||
-Wno-unused-but-set-variable
|
||||
-Wnon-virtual-dtor
|
||||
-Wnull-dereference
|
||||
-Wold-style-cast
|
||||
-pedantic
|
||||
-Wpedantic
|
||||
-Wunused
|
||||
)
|
||||
|
||||
if (is_gcc AND NOT lint)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wduplicated-branches
|
||||
-Wduplicated-cond
|
||||
-Wlogical-op
|
||||
-Wuseless-cast
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (is_clang)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (is_appleclang)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wreorder-init-list
|
||||
)
|
||||
endif ()
|
||||
|
||||
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for the flags description
|
||||
|
||||
target_compile_options (clio PUBLIC ${COMPILER_FLAGS})
|
||||
|
||||
@@ -4,18 +4,22 @@ project(clio)
|
||||
# ========================================================================== #
|
||||
# Options #
|
||||
# ========================================================================== #
|
||||
option (verbose "Verbose build" FALSE)
|
||||
option (verbose "Verbose build" FALSE)
|
||||
option (tests "Build tests" FALSE)
|
||||
option (docs "Generate doxygen docs" FALSE)
|
||||
option (coverage "Build test coverage report" FALSE)
|
||||
option (packaging "Create distribution packages" FALSE)
|
||||
option (lint "Run clang-tidy checks during compilation" FALSE)
|
||||
# ========================================================================== #
|
||||
set (san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set (CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
# ========================================================================== #
|
||||
|
||||
# Include required modules
|
||||
include (CMake/Ccache.cmake)
|
||||
include (CheckCXXCompilerFlag)
|
||||
include (CMake/ClangTidy.cmake)
|
||||
|
||||
if (verbose)
|
||||
set (CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
@@ -70,6 +74,7 @@ target_sources (clio PRIVATE
|
||||
## Main
|
||||
src/main/impl/Build.cpp
|
||||
## Backend
|
||||
src/data/BackendCounters.cpp
|
||||
src/data/BackendInterface.cpp
|
||||
src/data/LedgerCache.cpp
|
||||
src/data/cassandra/impl/Future.cpp
|
||||
@@ -85,11 +90,13 @@ target_sources (clio PRIVATE
|
||||
src/etl/ProbingSource.cpp
|
||||
src/etl/NFTHelpers.cpp
|
||||
src/etl/ETLService.cpp
|
||||
src/etl/ETLState.cpp
|
||||
src/etl/LoadBalancer.cpp
|
||||
src/etl/impl/ForwardCache.cpp
|
||||
## Feed
|
||||
src/feed/SubscriptionManager.cpp
|
||||
## Web
|
||||
src/web/impl/AdminVerificationStrategy.cpp
|
||||
src/web/IntervalSweepHandler.cpp
|
||||
## RPC
|
||||
src/rpc/Errors.cpp
|
||||
@@ -119,6 +126,7 @@ target_sources (clio PRIVATE
|
||||
src/rpc/handlers/LedgerData.cpp
|
||||
src/rpc/handlers/LedgerEntry.cpp
|
||||
src/rpc/handlers/LedgerRange.cpp
|
||||
src/rpc/handlers/NFTsByIssuer.cpp
|
||||
src/rpc/handlers/NFTBuyOffers.cpp
|
||||
src/rpc/handlers/NFTHistory.cpp
|
||||
src/rpc/handlers/NFTInfo.cpp
|
||||
@@ -127,15 +135,23 @@ target_sources (clio PRIVATE
|
||||
src/rpc/handlers/NoRippleCheck.cpp
|
||||
src/rpc/handlers/Random.cpp
|
||||
src/rpc/handlers/TransactionEntry.cpp
|
||||
src/rpc/handlers/Tx.cpp
|
||||
## Util
|
||||
src/util/config/Config.cpp
|
||||
src/util/log/Logger.cpp
|
||||
src/util/prometheus/Http.cpp
|
||||
src/util/prometheus/Label.cpp
|
||||
src/util/prometheus/Metrics.cpp
|
||||
src/util/prometheus/Prometheus.cpp
|
||||
src/util/Random.cpp
|
||||
src/util/Taggable.cpp)
|
||||
|
||||
# Clio server
|
||||
add_executable (clio_server src/main/Main.cpp)
|
||||
target_link_libraries (clio_server PUBLIC clio)
|
||||
target_link_libraries (clio_server PRIVATE clio)
|
||||
target_link_options(clio_server
|
||||
PRIVATE
|
||||
$<$<AND:$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:-static-libstdc++ -static-libgcc>
|
||||
)
|
||||
|
||||
# Unittesting
|
||||
if (tests)
|
||||
@@ -153,21 +169,29 @@ if (tests)
|
||||
unittests/SubscriptionManagerTests.cpp
|
||||
unittests/util/TestObject.cpp
|
||||
unittests/util/StringUtils.cpp
|
||||
unittests/util/prometheus/CounterTests.cpp
|
||||
unittests/util/prometheus/GaugeTests.cpp
|
||||
unittests/util/prometheus/HttpTests.cpp
|
||||
unittests/util/prometheus/LabelTests.cpp
|
||||
unittests/util/prometheus/MetricsTests.cpp
|
||||
# ETL
|
||||
unittests/etl/ExtractionDataPipeTests.cpp
|
||||
unittests/etl/ExtractorTests.cpp
|
||||
unittests/etl/TransformerTests.cpp
|
||||
unittests/etl/CacheLoaderTests.cpp
|
||||
unittests/etl/AmendmentBlockHandlerTests.cpp
|
||||
unittests/etl/LedgerPublisherTests.cpp
|
||||
unittests/etl/ETLStateTests.cpp
|
||||
# RPC
|
||||
unittests/rpc/ErrorTests.cpp
|
||||
unittests/rpc/BaseTests.cpp
|
||||
unittests/rpc/RPCHelpersTests.cpp
|
||||
unittests/rpc/CountersTests.cpp
|
||||
unittests/rpc/AdminVerificationTests.cpp
|
||||
unittests/rpc/APIVersionTests.cpp
|
||||
unittests/rpc/ForwardingProxyTests.cpp
|
||||
unittests/rpc/WorkQueueTests.cpp
|
||||
unittests/rpc/AmendmentsTests.cpp
|
||||
unittests/rpc/JsonBoolTests.cpp
|
||||
## RPC handlers
|
||||
unittests/rpc/handlers/DefaultProcessorTests.cpp
|
||||
unittests/rpc/handlers/TestHandlerTests.cpp
|
||||
@@ -191,6 +215,7 @@ if (tests)
|
||||
unittests/rpc/handlers/RandomTests.cpp
|
||||
unittests/rpc/handlers/NFTInfoTests.cpp
|
||||
unittests/rpc/handlers/NFTBuyOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTsByIssuerTest.cpp
|
||||
unittests/rpc/handlers/NFTSellOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTHistoryTests.cpp
|
||||
unittests/rpc/handlers/SubscribeTests.cpp
|
||||
@@ -202,6 +227,7 @@ if (tests)
|
||||
unittests/rpc/handlers/VersionHandlerTests.cpp
|
||||
# Backend
|
||||
unittests/data/BackendFactoryTests.cpp
|
||||
unittests/data/BackendCountersTests.cpp
|
||||
unittests/data/cassandra/BaseTests.cpp
|
||||
unittests/data/cassandra/BackendTests.cpp
|
||||
unittests/data/cassandra/RetryPolicyTests.cpp
|
||||
@@ -209,6 +235,7 @@ if (tests)
|
||||
unittests/data/cassandra/ExecutionStrategyTests.cpp
|
||||
unittests/data/cassandra/AsyncExecutorTests.cpp
|
||||
# Webserver
|
||||
unittests/web/AdminVerificationTests.cpp
|
||||
unittests/web/ServerTests.cpp
|
||||
unittests/web/RPCServerHandlerTests.cpp
|
||||
unittests/web/WhitelistHandlerTests.cpp
|
||||
@@ -219,7 +246,7 @@ if (tests)
|
||||
# See https://github.com/google/googletest/issues/3475
|
||||
gtest_discover_tests (clio_tests DISCOVERY_TIMEOUT 10)
|
||||
|
||||
# Fix for dwarf5 bug on ci
|
||||
# Fix for dwarf5 bug on ci
|
||||
target_compile_options (clio PUBLIC -gdwarf-4)
|
||||
|
||||
target_compile_definitions (${TEST_TARGET} PUBLIC UNITTEST_BUILD)
|
||||
@@ -229,6 +256,7 @@ if (tests)
|
||||
# Generate `clio_tests-ccov` if coverage is enabled
|
||||
# Note: use `make clio_tests-ccov` to generate report
|
||||
if (coverage)
|
||||
target_compile_definitions(${TEST_TARGET} PRIVATE COVERAGE_ENABLED)
|
||||
include (CMake/Coverage.cmake)
|
||||
add_coverage (${TEST_TARGET})
|
||||
endif ()
|
||||
|
||||
@@ -91,7 +91,7 @@ The button for that is near the bottom of the PR's page on GitHub.
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
|
||||
|
||||
## Formatting
|
||||
Code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
|
||||
Code must conform to `clang-format` version 16, unless the result would be unreasonably difficult to read or maintain.
|
||||
To change your code to conform use `clang-format -i <your changed files>`.
|
||||
|
||||
## Avoid
|
||||
|
||||
49
README.md
49
README.md
@@ -15,6 +15,10 @@ To access non-validated data for *any* request, simply add `ledger_index: "curre
|
||||
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
|
||||
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
|
||||
|
||||
## Help
|
||||
Feel free to open an [issue](https://github.com/XRPLF/clio/issues) if you have a feature request or something doesn't work as expected.
|
||||
If you have any questions about building, running, contributing, using clio or any other, you could always start a new [discussion](https://github.com/XRPLF/clio/discussions).
|
||||
|
||||
## Requirements
|
||||
1. Access to a Cassandra cluster or ScyllaDB cluster. Can be local or remote.
|
||||
2. Access to one or more rippled nodes. Can be local or remote.
|
||||
@@ -80,7 +84,6 @@ Now you should be able to download prebuilt `xrpl` package on some platforms.
|
||||
2. Remove old packages you may have cached:
|
||||
```sh
|
||||
conan remove -f xrpl
|
||||
conan remove -f cassandra-cpp-driver
|
||||
```
|
||||
|
||||
## Building Clio
|
||||
@@ -224,19 +227,55 @@ a database in each region, and the Clio nodes in each region use their region's
|
||||
This is effectively two systems.
|
||||
|
||||
Clio supports API versioning as [described here](https://xrpl.org/request-formatting.html#api-versioning).
|
||||
It's possible to configure `minimum`, `maximum` and `default` version like so:
|
||||
It's possible to configure `minimum`, `maximum` and `default` version like so:
|
||||
```json
|
||||
"api_version": {
|
||||
"min": 1,
|
||||
"max": 2,
|
||||
"default": 2
|
||||
"default": 1
|
||||
}
|
||||
```
|
||||
All of the above are optional.
|
||||
Clio will fallback to hardcoded defaults when not specified in the config file or configured values are outside
|
||||
All of the above are optional.
|
||||
Clio will fallback to hardcoded defaults when not specified in the config file or configured values are outside
|
||||
of the minimum and maximum supported versions hardcoded in `src/rpc/common/APIVersion.h`.
|
||||
> **Note:** See `example-config.json` for more details.
|
||||
|
||||
## Admin rights for requests
|
||||
|
||||
By default clio checks admin privileges by IP address from request (only `127.0.0.1` is considered to be an admin).
|
||||
It is not very secure because the IP could be spoofed.
|
||||
For a better security `admin_password` could be provided in the `server` section of clio's config:
|
||||
```json
|
||||
"server": {
|
||||
"admin_password": "secret"
|
||||
}
|
||||
```
|
||||
If the password is presented in the config, clio will check the Authorization header (if any) in each request for the password.
|
||||
The Authorization header should contain type `Password` and the password from the config, e.g. `Password secret`.
|
||||
Exactly equal password gains admin rights for the request or a websocket connection.
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports Prometheus metrics collection. It accepts Prometheus requests on the port configured in `server` section of config.
|
||||
Prometheus metrics are enabled by default. To disable it add `"prometheus_enabled": false` to the config.
|
||||
It is important to know that clio responds to Prometheus request only if they are admin requests, so Prometheus should be configured to send admin password in header.
|
||||
There is an example of docker-compose file, Prometheus and Grafana configs in [examples/infrastructure](examples/infrastructure).
|
||||
|
||||
## Using clang-tidy for static analysis
|
||||
|
||||
Minimum clang-tidy version required is 16.0.
|
||||
Clang-tidy could be run by cmake during building the project.
|
||||
For that provide the option `-o lint=True` for `conan install` command:
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
By default cmake will try to find clang-tidy automatically in your system.
|
||||
To force cmake use desired binary set `CLIO_CLANG_TIDY_BIN` environment variable as path to clang-tidy binary.
|
||||
E.g.:
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@16/bin/clang-tidy
|
||||
```
|
||||
|
||||
## Developing against `rippled` in standalone mode
|
||||
|
||||
If you wish you develop against a `rippled` instance running in standalone
|
||||
|
||||
12
conanfile.py
12
conanfile.py
@@ -16,15 +16,17 @@ class Clio(ConanFile):
|
||||
'docs': [True, False], # doxygen API docs; create custom target 'docs'
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'cassandra-cpp-driver/2.16.2',
|
||||
'fmt/10.0.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.12',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/1.12.0-b2',
|
||||
'xrpl/2.0.0-rc1',
|
||||
]
|
||||
|
||||
default_options = {
|
||||
@@ -33,6 +35,7 @@ class Clio(ConanFile):
|
||||
'tests': False,
|
||||
'packaging': False,
|
||||
'coverage': False,
|
||||
'lint': False,
|
||||
'docs': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
@@ -55,7 +58,7 @@ class Clio(ConanFile):
|
||||
|
||||
def requirements(self):
|
||||
if self.options.tests:
|
||||
self.requires('gtest/1.13.0')
|
||||
self.requires('gtest/1.14.0')
|
||||
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
@@ -73,6 +76,7 @@ class Clio(ConanFile):
|
||||
tc.variables['verbose'] = self.options.verbose
|
||||
tc.variables['tests'] = self.options.tests
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['lint'] = self.options.lint
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.generate()
|
||||
|
||||
@@ -16,25 +16,17 @@
|
||||
//
|
||||
// Advanced options. USE AT OWN RISK:
|
||||
// ---
|
||||
"max_connections_per_host": 1, // Defaults to 2
|
||||
"core_connections_per_host": 1, // Defaults to 2
|
||||
"max_concurrent_requests_threshold": 55000 // Defaults to ((max_read + max_write) / core_connections_per_host)
|
||||
"core_connections_per_host": 1 // Defaults to 1
|
||||
//
|
||||
// Below options will use defaults from cassandra driver if left unspecified.
|
||||
// See https://docs.datastax.com/en/developer/cpp-driver/2.0/api/struct.CassCluster/ for details.
|
||||
// See https://docs.datastax.com/en/developer/cpp-driver/2.17/api/struct.CassCluster/ for details.
|
||||
//
|
||||
// "queue_size_event": 1,
|
||||
// "queue_size_io": 2,
|
||||
// "write_bytes_high_water_mark": 3,
|
||||
// "write_bytes_low_water_mark": 4,
|
||||
// "pending_requests_high_water_mark": 5,
|
||||
// "pending_requests_low_water_mark": 6,
|
||||
// "max_requests_per_flush": 7,
|
||||
// "max_concurrent_creation": 8
|
||||
// "queue_size_io": 2
|
||||
//
|
||||
// ---
|
||||
}
|
||||
},
|
||||
"allow_no_etl": false, // Allow Clio to run without valid ETL source, otherwise Clio will stop if ETL check fails
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
@@ -73,7 +65,13 @@
|
||||
"port": 51233,
|
||||
// Max number of requests to queue up before rejecting further requests.
|
||||
// Defaults to 0, which disables the limit.
|
||||
"max_queue_size": 500
|
||||
"max_queue_size": 500,
|
||||
// If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash
|
||||
// If matches, the request will be considered as admin request
|
||||
"admin_password": "xrp",
|
||||
// If local_admin is true, Clio will consider requests come from 127.0.0.1 as admin requests
|
||||
// It's true by default unless admin_password is set,'local_admin' : true and 'admin_password' can not be set at the same time
|
||||
"local_amdin": false
|
||||
},
|
||||
// Overrides log level on a per logging channel.
|
||||
// Defaults to global "log_level" for each unspecified channel.
|
||||
@@ -103,6 +101,7 @@
|
||||
"log_level": "trace"
|
||||
}
|
||||
],
|
||||
"prometheus_enabled": true,
|
||||
"log_level": "info",
|
||||
// Log format (this is the default format)
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%",
|
||||
@@ -120,8 +119,8 @@
|
||||
// "ssl_cert_file" : "/full/path/to/cert.file",
|
||||
// "ssl_key_file" : "/full/path/to/key.file"
|
||||
"api_version": {
|
||||
"min": 2,
|
||||
"max": 2,
|
||||
"default": 2 // Clio only supports API v2 and newer
|
||||
"min": 1, // Minimum API version supported (could be 1 or 2)
|
||||
"max": 2, // Maximum API version supported (could be 1 or 2, but >= min)
|
||||
"default": 1 // Clio behaves the same as rippled by default
|
||||
}
|
||||
}
|
||||
|
||||
25
examples/infrastructure/README.md
Normal file
25
examples/infrastructure/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Example of clio monitoring infrastructure
|
||||
|
||||
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
|
||||
|
||||
The structure of the directory:
|
||||
- `compose.yaml`
|
||||
Docker-compose file with Prometheus and Grafana set up.
|
||||
- `prometheus.yaml`
|
||||
Defines metrics collection from Clio and Prometheus itself.
|
||||
Demonstrates how to setup Clio target and Clio's admin authorisation in Prometheus.
|
||||
- `grafana/clio_dashboard.json`
|
||||
Json file containing preconfigured dashboard in Grafana format.
|
||||
- `grafana/dashboard_local.yaml`
|
||||
Grafana configuration file defining the directory to search for dashboards json files.
|
||||
- `grafana/datasources.yaml`
|
||||
Grafana configuration file defining Prometheus as a data source for Grafana.
|
||||
|
||||
## How to try
|
||||
|
||||
1. Make sure you have `docker` and `docker-compose` installed.
|
||||
2. Run `docker-compose up -d` from this directory. It will start docker containers with Prometheus and Grafana.
|
||||
3. Open [http://localhost:3000/dashboards](http://localhost:3000/dashboards). Grafana login `admin`, password `grafana`.
|
||||
There will be preconfigured Clio dashboard.
|
||||
|
||||
If Clio is not running yet launch Clio to see metrics. Some of the metrics may appear only after requests to Clio.
|
||||
20
examples/infrastructure/compose.yaml
Normal file
20
examples/infrastructure/compose.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
grafana:
|
||||
image: grafana/grafana
|
||||
ports:
|
||||
- 3000:3000
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=grafana
|
||||
volumes:
|
||||
- ./grafana/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
- ./grafana/dashboard_local.yaml:/etc/grafana/provisioning/dashboards/local.yaml
|
||||
- ./grafana/clio_dashboard.json:/var/lib/grafana/dashboards/clio_dashboard.json
|
||||
1102
examples/infrastructure/grafana/clio_dashboard.json
Normal file
1102
examples/infrastructure/grafana/clio_dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
23
examples/infrastructure/grafana/dashboard_local.yaml
Normal file
23
examples/infrastructure/grafana/dashboard_local.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'Clio dashboard'
|
||||
# <int> Org id. Default to 1
|
||||
orgId: 1
|
||||
# <string> name of the dashboard folder.
|
||||
folder: ''
|
||||
# <string> folder UID. will be automatically generated if not specified
|
||||
folderUid: ''
|
||||
# <string> provider type. Default to 'file'
|
||||
type: file
|
||||
# <bool> disable dashboard deletion
|
||||
disableDeletion: false
|
||||
# <int> how often Grafana will scan for changed dashboards
|
||||
updateIntervalSeconds: 10
|
||||
# <bool> allow updating provisioned dashboards from the UI
|
||||
allowUiUpdates: false
|
||||
options:
|
||||
# <string, required> path to dashboard files on disk. Required when using the 'file' type
|
||||
path: /var/lib/grafana/dashboards
|
||||
# <bool> use folder names from filesystem to create folders in Grafana
|
||||
foldersFromFilesStructure: true
|
||||
8
examples/infrastructure/grafana/datasources.yaml
Normal file
8
examples/infrastructure/grafana/datasources.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
access: proxy
|
||||
19
examples/infrastructure/prometheus.yaml
Normal file
19
examples/infrastructure/prometheus.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
scrape_configs:
|
||||
- job_name: clio
|
||||
scrape_interval: 5s
|
||||
scrape_timeout: 5s
|
||||
authorization:
|
||||
type: Password
|
||||
# sha256sum from password `xrp`
|
||||
# use echo -n 'your_password' | shasum -a 256 to get hash
|
||||
credentials: 0e1dcf1ff020cceabf8f4a60a32e814b5b46ee0bb8cd4af5c814e4071bd86a18
|
||||
static_configs:
|
||||
- targets:
|
||||
- host.docker.internal:51233
|
||||
- job_name: prometheus
|
||||
honor_timestamps: true
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
87
external/cassandra/conanfile.py
vendored
87
external/cassandra/conanfile.py
vendored
@@ -1,87 +0,0 @@
|
||||
from conan import ConanFile, tools
|
||||
from conan.tools.cmake import CMake, CMakeToolchain
|
||||
|
||||
class Cassandra(ConanFile):
|
||||
name = 'cassandra-cpp-driver'
|
||||
version = '2.16.2'
|
||||
license = 'Apache-2.0'
|
||||
url = 'https://github.com/conan-io/conan-center-index'
|
||||
homepage = 'https://docs.datastax.com/en/developer/cpp-driver/'
|
||||
description = 'Cassandra C++ Driver'
|
||||
topics = ('conan', 'cassandra', 'driver')
|
||||
|
||||
settings = 'os', 'arch', 'compiler', 'build_type'
|
||||
options = {
|
||||
'shared': [True, False],
|
||||
'fPIC': [True, False],
|
||||
'install_header_in_subdir': [True, False],
|
||||
'use_atomic': [None, 'boost', 'std'],
|
||||
'with_openssl': [True, False],
|
||||
'with_zlib': [True, False],
|
||||
'with_kerberos': [True, False],
|
||||
'use_timerfd': [True, False],
|
||||
}
|
||||
default_options = {
|
||||
'shared': False,
|
||||
'fPIC': True,
|
||||
'install_header_in_subdir': False,
|
||||
'use_atomic': None,
|
||||
'with_openssl': True,
|
||||
'with_zlib': True,
|
||||
'with_kerberos': False,
|
||||
'use_timerfd': True,
|
||||
}
|
||||
|
||||
def requirements(self):
|
||||
self.requires('libuv/1.44.1')
|
||||
self.requires('http_parser/2.9.4')
|
||||
if self.options.with_openssl:
|
||||
self.requires('openssl/1.1.1q')
|
||||
if self.options.with_zlib:
|
||||
self.requires('minizip/1.2.12')
|
||||
self.requires('zlib/1.2.13')
|
||||
if self.options.use_atomic == 'boost':
|
||||
self.requires('boost/1.79.0')
|
||||
|
||||
exports_sources = ['CMakeLists.txt']
|
||||
|
||||
def config_options(self):
|
||||
if self.settings.os == 'Windows':
|
||||
del self.options.fPIC
|
||||
|
||||
def configure(self):
|
||||
self.options['libuv'].shared = self.options.shared
|
||||
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
if self.settings.get_safe('compiler.cppstd') == '20':
|
||||
tc.blocks['cppstd'].values['cppstd'] = 17
|
||||
tc.variables['CASS_BUILD_STATIC'] = not self.options.shared
|
||||
tc.variables['CASS_USE_STATIC_LIBS'] = not self.options.shared
|
||||
tc.variables['CASS_BUILD_SHARED'] = self.options.shared
|
||||
tc.variables['LIBUV_ROOT_DIR'] = self.deps_cpp_info['libuv'].rootpath
|
||||
if self.options.with_openssl:
|
||||
tc.variables['OPENSSL_ROOT_DIR'] = self.deps_cpp_info['openssl'].rootpath
|
||||
tc.generate()
|
||||
|
||||
def source(self):
|
||||
tools.files.get(self, f'https://github.com/datastax/cpp-driver/archive/refs/tags/{self.version}.tar.gz', strip_root=True)
|
||||
|
||||
def build(self):
|
||||
cmake = CMake(self)
|
||||
cmake.configure()
|
||||
cmake.build()
|
||||
|
||||
def package(self):
|
||||
cmake = CMake(self)
|
||||
cmake.install()
|
||||
|
||||
def package_info(self):
|
||||
if self.options.shared:
|
||||
self.cpp_info.libs = ['cassandra']
|
||||
else:
|
||||
self.cpp_info.libs = ['cassandra_static']
|
||||
if self.settings.os == 'Windows':
|
||||
self.cpp_info.libs.extend(['iphlpapi', 'psapi', 'wsock32', 'crypt32', 'ws2_32', 'userenv'])
|
||||
if not self.options.shared:
|
||||
self.cpp_info.defines = ['CASS_STATIC']
|
||||
193
src/data/BackendCounters.cpp
Normal file
193
src/data/BackendCounters.cpp
Normal file
@@ -0,0 +1,193 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/BackendCounters.h>
|
||||
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
|
||||
namespace data {
|
||||
|
||||
using namespace util::prometheus;
|
||||
|
||||
BackendCounters::BackendCounters()
|
||||
: tooBusyCounter_(PrometheusService::counterInt(
|
||||
"backend_too_busy_total_number",
|
||||
Labels(),
|
||||
"The total number of times the backend was too busy to process a request"
|
||||
))
|
||||
, writeSyncCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync"}}),
|
||||
"The total number of times the backend had to write synchronously"
|
||||
))
|
||||
, writeSyncRetryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync_retry"}}),
|
||||
"The total number of times the backend had to retry a synchronous write"
|
||||
))
|
||||
, asyncWriteCounters_{"write_async"}
|
||||
, asyncReadCounters_{"read_async"}
|
||||
{
|
||||
}
|
||||
|
||||
BackendCounters::PtrType
|
||||
BackendCounters::make()
|
||||
{
|
||||
struct EnableMakeShared : public BackendCounters {};
|
||||
return std::make_shared<EnableMakeShared>();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerTooBusy()
|
||||
{
|
||||
++tooBusyCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSync()
|
||||
{
|
||||
++writeSyncCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSyncRetry()
|
||||
{
|
||||
++writeSyncRetryCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteStarted()
|
||||
{
|
||||
asyncWriteCounters_.registerStarted(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteFinished()
|
||||
{
|
||||
asyncWriteCounters_.registerFinished(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteRetry()
|
||||
{
|
||||
asyncWriteCounters_.registerRetry(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadStarted(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerStarted(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadFinished(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerFinished(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadRetry(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerRetry(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadError(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerError(count);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::report() const
|
||||
{
|
||||
boost::json::object result;
|
||||
result["too_busy"] = tooBusyCounter_.get().value();
|
||||
result["write_sync"] = writeSyncCounter_.get().value();
|
||||
result["write_sync_retry"] = writeSyncRetryCounter_.get().value();
|
||||
for (auto const& [key, value] : asyncWriteCounters_.report())
|
||||
result[key] = value;
|
||||
for (auto const& [key, value] : asyncReadCounters_.report())
|
||||
result[key] = value;
|
||||
return result;
|
||||
}
|
||||
|
||||
BackendCounters::AsyncOperationCounters::AsyncOperationCounters(std::string name)
|
||||
: name_(std::move(name))
|
||||
, pendingCounter_(PrometheusService::gaugeInt(
|
||||
"backend_operations_current_number",
|
||||
Labels({{"operation", name_}, {"status", "pending"}}),
|
||||
"The current number of pending " + name_ + " operations"
|
||||
))
|
||||
, completedCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "completed"}}),
|
||||
"The total number of completed " + name_ + " operations"
|
||||
))
|
||||
, retryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "retry"}}),
|
||||
"The total number of retried " + name_ + " operations"
|
||||
))
|
||||
, errorCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "error"}}),
|
||||
"The total number of errored " + name_ + " operations"
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerStarted(std::uint64_t const count)
|
||||
{
|
||||
pendingCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerFinished(std::uint64_t const count)
|
||||
{
|
||||
assert(pendingCounter_.get().value() >= static_cast<std::int64_t>(count));
|
||||
pendingCounter_.get() -= count;
|
||||
completedCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerRetry(std::uint64_t count)
|
||||
{
|
||||
retryCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerError(std::uint64_t count)
|
||||
{
|
||||
assert(pendingCounter_.get().value() >= static_cast<std::int64_t>(count));
|
||||
pendingCounter_.get() -= count;
|
||||
errorCounter_.get() += count;
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::AsyncOperationCounters::report() const
|
||||
{
|
||||
return boost::json::object{
|
||||
{name_ + "_pending", pendingCounter_.get().value()},
|
||||
{name_ + "_completed", completedCounter_.get().value()},
|
||||
{name_ + "_retry", retryCounter_.get().value()},
|
||||
{name_ + "_error", errorCounter_.get().value()}};
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
138
src/data/BackendCounters.h
Normal file
138
src/data/BackendCounters.h
Normal file
@@ -0,0 +1,138 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A concept for a class that can be used to count backend operations.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeBackendCounters = requires(T a) {
|
||||
typename T::PtrType;
|
||||
{ a.registerTooBusy() } -> std::same_as<void>;
|
||||
{ a.registerWriteSync() } -> std::same_as<void>;
|
||||
{ a.registerWriteSyncRetry() } -> std::same_as<void>;
|
||||
{ a.registerWriteStarted() } -> std::same_as<void>;
|
||||
{ a.registerWriteFinished() } -> std::same_as<void>;
|
||||
{ a.registerWriteRetry() } -> std::same_as<void>;
|
||||
{ a.registerReadStarted(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadFinished(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadRetry(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadError(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.report() } -> std::same_as<boost::json::object>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief Holds statistics about the backend.
|
||||
*
|
||||
* @note This class is thread-safe.
|
||||
*/
|
||||
class BackendCounters {
|
||||
public:
|
||||
using PtrType = std::shared_ptr<BackendCounters>;
|
||||
|
||||
static PtrType
|
||||
make();
|
||||
|
||||
void
|
||||
registerTooBusy();
|
||||
|
||||
void
|
||||
registerWriteSync();
|
||||
|
||||
void
|
||||
registerWriteSyncRetry();
|
||||
|
||||
void
|
||||
registerWriteStarted();
|
||||
|
||||
void
|
||||
registerWriteFinished();
|
||||
|
||||
void
|
||||
registerWriteRetry();
|
||||
|
||||
void
|
||||
registerReadStarted(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadFinished(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadRetry(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadError(std::uint64_t count = 1u);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
BackendCounters();
|
||||
|
||||
class AsyncOperationCounters {
|
||||
public:
|
||||
AsyncOperationCounters(std::string name);
|
||||
|
||||
void
|
||||
registerStarted(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerFinished(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerRetry(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerError(std::uint64_t count);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
std::reference_wrapper<util::prometheus::GaugeInt> pendingCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> completedCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> retryCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> errorCounter_;
|
||||
};
|
||||
|
||||
std::reference_wrapper<util::prometheus::CounterInt> tooBusyCounter_;
|
||||
|
||||
std::reference_wrapper<util::prometheus::CounterInt> writeSyncCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> writeSyncRetryCounter_;
|
||||
|
||||
AsyncOperationCounters asyncWriteCounters_{"write_async"};
|
||||
AsyncOperationCounters asyncReadCounters_{"read_async"};
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -34,10 +34,10 @@ namespace data {
|
||||
* @param config The clio config to use
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
std::shared_ptr<BackendInterface>
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
make_Backend(util::Config const& config)
|
||||
{
|
||||
static util::Logger log{"Backend"};
|
||||
static util::Logger const log{"Backend"};
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
auto const readOnly = config.valueOr("read_only", false);
|
||||
@@ -46,8 +46,7 @@ make_Backend(util::Config const& config)
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
// TODO: retire `cassandra-new` by next release after 2.0
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new"))
|
||||
{
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new")) {
|
||||
auto cfg = config.section("database." + type);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
}
|
||||
@@ -56,8 +55,7 @@ make_Backend(util::Config const& config)
|
||||
throw std::runtime_error("Invalid database type");
|
||||
|
||||
auto const rng = backend->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
{
|
||||
if (rng) {
|
||||
backend->updateRange(rng->minSequence);
|
||||
backend->updateRange(rng->maxSequence);
|
||||
}
|
||||
|
||||
@@ -34,8 +34,7 @@ BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
{
|
||||
LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence;
|
||||
auto commitRes = doFinishWrites();
|
||||
if (commitRes)
|
||||
{
|
||||
if (commitRes) {
|
||||
LOG(gLog.debug()) << "Successfully commited. Updating range now to " << ledgerSequence;
|
||||
updateRange(ledgerSequence);
|
||||
}
|
||||
@@ -59,52 +58,49 @@ std::optional<Blob>
|
||||
BackendInterface::fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj)
|
||||
{
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj)
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
else
|
||||
LOG(gLog.trace()) << "Missed cache but found in db";
|
||||
return dbObj;
|
||||
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj) {
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Missed cache but found in db";
|
||||
}
|
||||
return dbObj;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
BackendInterface::fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
std::vector<ripple::uint256> misses;
|
||||
for (size_t i = 0; i < keys.size(); ++i)
|
||||
{
|
||||
for (size_t i = 0; i < keys.size(); ++i) {
|
||||
auto obj = cache_.get(keys[i], sequence);
|
||||
if (obj)
|
||||
if (obj) {
|
||||
results[i] = *obj;
|
||||
else
|
||||
} else {
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
}
|
||||
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
|
||||
if (misses.size())
|
||||
{
|
||||
if (!misses.empty()) {
|
||||
auto objs = doFetchLedgerObjects(misses, sequence, yield);
|
||||
for (size_t i = 0, j = 0; i < results.size(); ++i)
|
||||
{
|
||||
if (results[i].size() == 0)
|
||||
{
|
||||
for (size_t i = 0, j = 0; i < results.size(); ++i) {
|
||||
if (results[i].empty()) {
|
||||
results[i] = objs[j];
|
||||
++j;
|
||||
}
|
||||
@@ -118,13 +114,15 @@ std::optional<ripple::uint256>
|
||||
BackendInterface::fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
if (succ)
|
||||
if (succ) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
else
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
}
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
|
||||
@@ -132,11 +130,11 @@ std::optional<LedgerObject>
|
||||
BackendInterface::fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
|
||||
if (succ)
|
||||
{
|
||||
if (succ) {
|
||||
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
|
||||
if (!obj)
|
||||
return {{*succ, {}}};
|
||||
@@ -151,7 +149,8 @@ BackendInterface::fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
// TODO try to speed this up. This can take a few seconds. The goal is
|
||||
// to get it down to a few hundred milliseconds.
|
||||
@@ -165,28 +164,25 @@ BackendInterface::fetchBookOffers(
|
||||
std::uint32_t numPages = 0;
|
||||
long succMillis = 0;
|
||||
long pageMillis = 0;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
while (keys.size() < limit) {
|
||||
auto mid1 = std::chrono::system_clock::now();
|
||||
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
|
||||
auto mid2 = std::chrono::system_clock::now();
|
||||
numSucc++;
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd)
|
||||
{
|
||||
if (!offerDir || offerDir->key >= bookEnd) {
|
||||
LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
while (keys.size() < limit) {
|
||||
++numPages;
|
||||
ripple::STLedgerEntry sle{ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
|
||||
ripple::STLedgerEntry const sle{
|
||||
ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
|
||||
auto indexes = sle.getFieldV256(ripple::sfIndexes);
|
||||
keys.insert(keys.end(), indexes.begin(), indexes.end());
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (!next)
|
||||
{
|
||||
if (next == 0u) {
|
||||
LOG(gLog.trace()) << "Next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
@@ -201,11 +197,10 @@ BackendInterface::fetchBookOffers(
|
||||
}
|
||||
auto mid = std::chrono::system_clock::now();
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i) {
|
||||
LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
assert(objs[i].size());
|
||||
assert(!objs[i].empty());
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
@@ -231,19 +226,20 @@ BackendInterface::hardFetchLedgerRange() const
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock lck(rngMtx_);
|
||||
std::shared_lock const lck(rngMtx_);
|
||||
return range;
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock lck(rngMtx_);
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
assert(!range || newMax >= range->maxSequence);
|
||||
if (!range)
|
||||
if (!range) {
|
||||
range = {newMax, newMax};
|
||||
else
|
||||
} else {
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
}
|
||||
|
||||
LedgerPage
|
||||
@@ -252,41 +248,39 @@ BackendInterface::fetchLedgerPage(
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
LedgerPage page;
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
bool reachedEnd = false;
|
||||
while (keys.size() < limit && !reachedEnd)
|
||||
{
|
||||
ripple::uint256 const& curCursor = keys.size() ? keys.back() : cursor ? *cursor : firstKey;
|
||||
while (keys.size() < limit && !reachedEnd) {
|
||||
ripple::uint256 const& curCursor = !keys.empty() ? keys.back() : (cursor ? *cursor : firstKey);
|
||||
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
if (!succ)
|
||||
if (!succ) {
|
||||
reachedEnd = true;
|
||||
else
|
||||
keys.push_back(std::move(*succ));
|
||||
} else {
|
||||
keys.push_back(*succ);
|
||||
}
|
||||
}
|
||||
|
||||
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < objects.size(); ++i)
|
||||
{
|
||||
if (objects[i].size())
|
||||
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
|
||||
else if (!outOfOrder)
|
||||
{
|
||||
for (size_t i = 0; i < objects.size(); ++i) {
|
||||
if (!objects[i].empty()) {
|
||||
page.objects.push_back({keys[i], std::move(objects[i])});
|
||||
} else if (!outOfOrder) {
|
||||
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
<< " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j)
|
||||
{
|
||||
for (size_t j = 0; j < objects.size(); ++j) {
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
LOG(gLog.error()) << msg.str();
|
||||
}
|
||||
}
|
||||
if (keys.size() && !reachedEnd)
|
||||
if (!keys.empty() && !reachedEnd)
|
||||
page.cursor = keys.back();
|
||||
|
||||
return page;
|
||||
@@ -300,14 +294,13 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
|
||||
auto key = ripple::keylet::fees().key;
|
||||
auto bytes = fetchLedgerObject(key, seq, yield);
|
||||
|
||||
if (!bytes)
|
||||
{
|
||||
if (!bytes) {
|
||||
LOG(gLog.error()) << "Could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
ripple::SerialIter it(bytes->data(), bytes->size());
|
||||
ripple::SLE sle{it, key};
|
||||
ripple::SLE const sle{it, key};
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
|
||||
fees.base = sle.getFieldU64(ripple::sfBaseFee);
|
||||
|
||||
@@ -38,16 +38,16 @@ namespace data {
|
||||
/**
|
||||
* @brief Represents a database timeout error.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception
|
||||
{
|
||||
class DatabaseTimeout : public std::exception {
|
||||
public:
|
||||
const char*
|
||||
char const*
|
||||
what() const throw() override
|
||||
{
|
||||
return "Database read timed out. Please retry the request";
|
||||
}
|
||||
};
|
||||
|
||||
static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
/**
|
||||
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
|
||||
*
|
||||
@@ -58,18 +58,14 @@ public:
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
retryOnTimeout(FnType func, size_t waitMs = 500)
|
||||
retryOnTimeout(FnType func, size_t waitMs = DEFAULT_WAIT_BETWEEN_RETRY)
|
||||
{
|
||||
static util::Logger log{"Backend"};
|
||||
static util::Logger const log{"Backend"};
|
||||
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
while (true) {
|
||||
try {
|
||||
return func();
|
||||
}
|
||||
catch (DatabaseTimeout const&)
|
||||
{
|
||||
} catch (DatabaseTimeout const&) {
|
||||
LOG(log.error()) << "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
}
|
||||
@@ -90,17 +86,16 @@ synchronous(FnType&& func)
|
||||
boost::asio::io_context ctx;
|
||||
|
||||
using R = typename boost::result_of<FnType(boost::asio::yield_context)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value)
|
||||
{
|
||||
if constexpr (!std::is_same<R, void>::value) {
|
||||
R res;
|
||||
boost::asio::spawn(ctx, [&func, &res](auto yield) { res = func(yield); });
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) {
|
||||
res = func(yield);
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::asio::spawn(ctx, [&func](auto yield) { func(yield); });
|
||||
} else {
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
|
||||
ctx.run();
|
||||
}
|
||||
}
|
||||
@@ -122,8 +117,7 @@ synchronousAndRetryOnTimeout(FnType&& func)
|
||||
/**
|
||||
* @brief The interface to the database used by Clio.
|
||||
*/
|
||||
class BackendInterface
|
||||
{
|
||||
class BackendInterface {
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
@@ -160,7 +154,7 @@ public:
|
||||
* @return The ripple::LedgerHeader if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const = 0;
|
||||
fetchLedgerBySequence(std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by hash.
|
||||
@@ -205,7 +199,7 @@ public:
|
||||
* @return ripple::Fees if fees are found; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t const seq, boost::asio::yield_context yield) const;
|
||||
fetchFees(std::uint32_t seq, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
@@ -240,10 +234,11 @@ public:
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
@@ -253,7 +248,7 @@ public:
|
||||
* @return Results as a vector of TransactionAndMetadata
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
fetchAllTransactionsInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
@@ -263,7 +258,7 @@ public:
|
||||
* @return Hashes as ripple::uint256 in a vector
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific NFT.
|
||||
@@ -274,8 +269,7 @@ public:
|
||||
* @return NFT object on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
@@ -290,10 +284,33 @@ public:
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all NFTs issued by a given address.
|
||||
*
|
||||
* @param issuer AccountID of issuer you wish you query.
|
||||
* @param taxon Optional taxon of NFTs by which you wish to filter.
|
||||
* @param limit Paging limit.
|
||||
* @param cursorIn Optional cursor to allow us to pick up from where we
|
||||
* last left off.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<NFT> of NFTs issued by this account, or
|
||||
* this issuer/taxon combination if taxon is passed and an optional marker
|
||||
*/
|
||||
virtual NFTsAndCursor
|
||||
fetchNFTsByIssuer(
|
||||
ripple::AccountID const& issuer,
|
||||
std::optional<std::uint32_t> const& taxon,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger object.
|
||||
@@ -307,7 +324,7 @@ public:
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield) const;
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects by their keys.
|
||||
@@ -323,8 +340,9 @@ public:
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const;
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching a ledger object.
|
||||
@@ -335,8 +353,7 @@ public:
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching ledger objects.
|
||||
@@ -349,8 +366,9 @@ public:
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers.
|
||||
@@ -360,7 +378,7 @@ public:
|
||||
* @return A vector of LedgerObject representing the diff
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
fetchLedgerDiff(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
@@ -375,10 +393,11 @@ public:
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield) const;
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor object.
|
||||
@@ -389,8 +408,7 @@ public:
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const;
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor key.
|
||||
@@ -404,7 +422,7 @@ public:
|
||||
* @return The sucessor key on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const;
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Database-specific implementation of fetching the successor key
|
||||
@@ -415,8 +433,7 @@ public:
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
@@ -430,9 +447,10 @@ public:
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context yield) const;
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Synchronously fetches the ledger range from DB.
|
||||
@@ -477,7 +495,7 @@ public:
|
||||
* @param blob The data to write
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob);
|
||||
writeLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
@@ -491,10 +509,11 @@ public:
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::uint32_t seq,
|
||||
std::uint32_t date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) = 0;
|
||||
std::string&& metadata
|
||||
) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes NFTs to the database.
|
||||
@@ -528,7 +547,7 @@ public:
|
||||
* @param successor The successor data to write
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0;
|
||||
writeSuccessor(std::string&& key, std::uint32_t seq, std::string&& successor) = 0;
|
||||
|
||||
/**
|
||||
* @brief Starts a write transaction with the DB. No-op for cassandra.
|
||||
@@ -547,7 +566,7 @@ public:
|
||||
* @return true on success; false otherwise
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t const ledgerSequence);
|
||||
finishWrites(std::uint32_t ledgerSequence);
|
||||
|
||||
/**
|
||||
* @return true if database is overwhelmed; false otherwise
|
||||
@@ -555,9 +574,15 @@ public:
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
/**
|
||||
* @return json object containing backend usage statistics
|
||||
*/
|
||||
virtual boost::json::object
|
||||
stats() const = 0;
|
||||
|
||||
private:
|
||||
virtual void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0;
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
|
||||
@@ -44,8 +44,7 @@ namespace data::cassandra {
|
||||
* @tparam ExecutionStrategyType The execution strategy type to use
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType, SomeExecutionStrategy ExecutionStrategyType>
|
||||
class BasicCassandraBackend : public BackendInterface
|
||||
{
|
||||
class BasicCassandraBackend : public BackendInterface {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
SettingsProviderType settingsProvider_;
|
||||
@@ -73,12 +72,10 @@ public:
|
||||
if (auto const res = handle_.connect(); not res)
|
||||
throw std::runtime_error("Could not connect to Cassandra: " + res.error());
|
||||
|
||||
if (not readOnly)
|
||||
{
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res)
|
||||
{
|
||||
// on datastax, creation of keyspaces can be configured to only be done thru the admin interface.
|
||||
// this does not mean that the keyspace does not already exist tho.
|
||||
if (not readOnly) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
|
||||
// on datastax, creation of keyspaces can be configured to only be done thru the admin
|
||||
// interface. this does not mean that the keyspace does not already exist tho.
|
||||
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
|
||||
throw std::runtime_error("Could not create keyspace: " + res.error());
|
||||
}
|
||||
@@ -87,12 +84,9 @@ public:
|
||||
throw std::runtime_error("Could not create schema: " + res.error());
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
try {
|
||||
schema_.prepareStatements(handle_);
|
||||
}
|
||||
catch (std::runtime_error const& ex)
|
||||
{
|
||||
} catch (std::runtime_error const& ex) {
|
||||
LOG(log_.error()) << "Failed to prepare the statements: " << ex.what() << "; readOnly: " << readOnly;
|
||||
throw;
|
||||
}
|
||||
@@ -106,28 +100,26 @@ public:
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield) const override
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
|
||||
Statement statement = [this, forward, &account]() {
|
||||
Statement const statement = [this, forward, &account]() {
|
||||
if (forward)
|
||||
return schema_->selectAccountTxForward.bind(account);
|
||||
else
|
||||
return schema_->selectAccountTx.bind(account);
|
||||
|
||||
return schema_->selectAccountTx.bind(account);
|
||||
}();
|
||||
|
||||
auto cursor = cursorIn;
|
||||
if (cursor)
|
||||
{
|
||||
if (cursor) {
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
auto const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||
auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
@@ -142,8 +134,7 @@ public:
|
||||
statement.bindAt(2, Limit{limit});
|
||||
auto const res = executor_.read(yield, statement);
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
@@ -152,11 +143,9 @@ public:
|
||||
auto numRows = results.numRows();
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results))
|
||||
{
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0)
|
||||
{
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
@@ -170,8 +159,7 @@ public:
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
LOG(log_.debug()) << "Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit)
|
||||
{
|
||||
if (txns.size() == limit) {
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
@@ -185,13 +173,11 @@ public:
|
||||
// wait for other threads to finish their writes
|
||||
executor_.sync();
|
||||
|
||||
if (!range)
|
||||
{
|
||||
if (!range) {
|
||||
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
|
||||
}
|
||||
|
||||
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1)))
|
||||
{
|
||||
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
|
||||
LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
|
||||
return false;
|
||||
}
|
||||
@@ -213,10 +199,8 @@ public:
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res)
|
||||
{
|
||||
if (auto const& result = res.value(); result)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return maybeValue;
|
||||
|
||||
@@ -225,9 +209,7 @@ public:
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
|
||||
}
|
||||
|
||||
@@ -238,12 +220,9 @@ public:
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
|
||||
if (res)
|
||||
{
|
||||
if (auto const& result = res.value(); result)
|
||||
{
|
||||
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue)
|
||||
{
|
||||
if (res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
|
||||
return util::deserializeHeader(ripple::makeSlice(*maybeValue));
|
||||
}
|
||||
|
||||
@@ -252,9 +231,7 @@ public:
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
|
||||
}
|
||||
|
||||
@@ -264,10 +241,8 @@ public:
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res)
|
||||
{
|
||||
if (auto const& result = res.value(); result)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return fetchLedgerBySequence(*maybeValue, yield);
|
||||
|
||||
@@ -276,9 +251,7 @@ public:
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash - no result";
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
|
||||
}
|
||||
|
||||
@@ -288,11 +261,10 @@ public:
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerRange); res)
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerRange);
|
||||
if (res) {
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "Could not fetch ledger range - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -302,12 +274,12 @@ public:
|
||||
// least use tuple<int, int>?
|
||||
LedgerRange range;
|
||||
std::size_t idx = 0;
|
||||
for (auto [seq] : extract<uint32_t>(results))
|
||||
{
|
||||
if (idx == 0)
|
||||
for (auto [seq] : extract<uint32_t>(results)) {
|
||||
if (idx == 0) {
|
||||
range.maxSequence = range.minSequence = seq;
|
||||
else if (idx == 1)
|
||||
} else if (idx == 1) {
|
||||
range.maxSequence = seq;
|
||||
}
|
||||
|
||||
++idx;
|
||||
}
|
||||
@@ -319,10 +291,7 @@ public:
|
||||
<< range.maxSequence;
|
||||
return range;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
|
||||
}
|
||||
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -341,15 +310,13 @@ public:
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
|
||||
|
||||
if (not res)
|
||||
{
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& result = res.value();
|
||||
if (not result.hasRows())
|
||||
{
|
||||
if (not result.hasRows()) {
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes - no rows; ledger = "
|
||||
<< std::to_string(ledgerSequence);
|
||||
return {};
|
||||
@@ -375,8 +342,7 @@ public:
|
||||
if (not res)
|
||||
return std::nullopt;
|
||||
|
||||
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow)
|
||||
{
|
||||
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
|
||||
|
||||
@@ -392,8 +358,7 @@ public:
|
||||
// even though we are returning a blank URI, the NFT might have had
|
||||
// one.
|
||||
auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
|
||||
if (uriRes)
|
||||
{
|
||||
if (uriRes) {
|
||||
if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
|
||||
result->uri = *maybeUri;
|
||||
}
|
||||
@@ -411,28 +376,26 @@ public:
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield) const override
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
|
||||
Statement statement = [this, forward, &tokenID]() {
|
||||
Statement const statement = [this, forward, &tokenID]() {
|
||||
if (forward)
|
||||
return schema_->selectNFTTxForward.bind(tokenID);
|
||||
else
|
||||
return schema_->selectNFTTx.bind(tokenID);
|
||||
|
||||
return schema_->selectNFTTx.bind(tokenID);
|
||||
}();
|
||||
|
||||
auto cursor = cursorIn;
|
||||
if (cursor)
|
||||
{
|
||||
if (cursor) {
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
auto const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||
auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
@@ -445,8 +408,7 @@ public:
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows())
|
||||
{
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
@@ -455,11 +417,9 @@ public:
|
||||
auto numRows = results.numRows();
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results))
|
||||
{
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0)
|
||||
{
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
@@ -473,8 +433,7 @@ public:
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
LOG(log_.debug()) << "NFT Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit)
|
||||
{
|
||||
if (txns.size() == limit) {
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
@@ -482,25 +441,109 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
NFTsAndCursor
|
||||
fetchNFTsByIssuer(
|
||||
ripple::AccountID const& issuer,
|
||||
std::optional<std::uint32_t> const& taxon,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
NFTsAndCursor ret;
|
||||
|
||||
Statement const idQueryStatement = [&taxon, &issuer, &cursorIn, &limit, this]() {
|
||||
if (taxon.has_value()) {
|
||||
auto r = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
r.bindAt(1, *taxon);
|
||||
r.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
|
||||
r.bindAt(3, Limit{limit});
|
||||
return r;
|
||||
}
|
||||
|
||||
auto r = schema_->selectNFTIDsByIssuer.bind(issuer);
|
||||
r.bindAt(
|
||||
1,
|
||||
std::make_tuple(
|
||||
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
|
||||
cursorIn.value_or(ripple::uint256(0))
|
||||
)
|
||||
);
|
||||
r.bindAt(2, Limit{limit});
|
||||
return r;
|
||||
}();
|
||||
|
||||
// Query for all the NFTs issued by the account, potentially filtered by the taxon
|
||||
auto const res = executor_.read(yield, idQueryStatement);
|
||||
|
||||
auto const& idQueryResults = res.value();
|
||||
if (not idQueryResults.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
for (auto const [nftID] : extract<ripple::uint256>(idQueryResults))
|
||||
nftIDs.push_back(nftID);
|
||||
|
||||
if (nftIDs.empty())
|
||||
return ret;
|
||||
|
||||
if (nftIDs.size() == limit)
|
||||
ret.cursor = nftIDs.back();
|
||||
|
||||
auto const nftQueryStatement = schema_->selectNFTBulk.bind(nftIDs);
|
||||
nftQueryStatement.bindAt(1, ledgerSequence);
|
||||
|
||||
// Fetch all the NFT data, meanwhile filtering out the NFTs that are not within the ledger range
|
||||
auto const nftRes = executor_.read(yield, nftQueryStatement);
|
||||
auto const& nftQueryResults = nftRes.value();
|
||||
|
||||
if (not nftQueryResults.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const nftURIQueryStatement = schema_->selectNFTURIBulk.bind(nftIDs);
|
||||
nftURIQueryStatement.bindAt(1, ledgerSequence);
|
||||
|
||||
// Get the URI for each NFT, but it's possible that URI doesn't exist
|
||||
auto const uriRes = executor_.read(yield, nftURIQueryStatement);
|
||||
auto const& nftURIQueryResults = uriRes.value();
|
||||
|
||||
std::unordered_map<std::string, Blob> nftURIMap;
|
||||
for (auto const [nftID, uri] : extract<ripple::uint256, Blob>(nftURIQueryResults))
|
||||
nftURIMap.insert({ripple::strHex(nftID), uri});
|
||||
|
||||
for (auto const [nftID, seq, owner, isBurned] :
|
||||
extract<ripple::uint256, std::uint32_t, ripple::AccountID, bool>(nftQueryResults)) {
|
||||
NFT nft;
|
||||
nft.tokenID = nftID;
|
||||
nft.ledgerSequence = seq;
|
||||
nft.owner = owner;
|
||||
nft.isBurned = isBurned;
|
||||
if (nftURIMap.contains(ripple::strHex(nft.tokenID)))
|
||||
nft.uri = nftURIMap.at(ripple::strHex(nft.tokenID));
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res)
|
||||
{
|
||||
if (auto const result = res->template get<Blob>(); result)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
if (auto const result = res->template get<Blob>(); result) {
|
||||
if (result->size())
|
||||
return *result;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.debug()) << "Could not fetch ledger object - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
|
||||
}
|
||||
|
||||
@@ -510,20 +553,14 @@ public:
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res)
|
||||
{
|
||||
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
|
||||
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
|
||||
auto [transaction, meta, seq, date] = *maybeValue;
|
||||
return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.debug()) << "Could not fetch transaction - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch transaction - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch transaction: " << res.error();
|
||||
}
|
||||
|
||||
@@ -534,21 +571,15 @@ public:
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res)
|
||||
{
|
||||
if (auto const result = res->template get<ripple::uint256>(); result)
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
|
||||
if (auto const result = res->template get<ripple::uint256>(); result) {
|
||||
if (*result == lastKey)
|
||||
return std::nullopt;
|
||||
return *result;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.debug()) << "Could not fetch successor - no rows";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch successor - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch successor: " << res.error();
|
||||
}
|
||||
|
||||
@@ -558,7 +589,7 @@ public:
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (hashes.size() == 0)
|
||||
if (hashes.empty())
|
||||
return {};
|
||||
|
||||
auto const numHashes = hashes.size();
|
||||
@@ -571,9 +602,11 @@ public:
|
||||
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
|
||||
// TODO: seems like a job for "hash IN (list of hashes)" instead?
|
||||
std::transform(
|
||||
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
|
||||
return schema_->selectTransaction.bind(hash);
|
||||
});
|
||||
std::cbegin(hashes),
|
||||
std::cend(hashes),
|
||||
std::back_inserter(statements),
|
||||
[this](auto const& hash) { return schema_->selectTransaction.bind(hash); }
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
@@ -583,9 +616,10 @@ public:
|
||||
[](auto const& res) -> TransactionAndMetadata {
|
||||
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
|
||||
return *maybeRow;
|
||||
else
|
||||
return {};
|
||||
});
|
||||
|
||||
return {};
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
assert(numHashes == results.size());
|
||||
@@ -598,9 +632,10 @@ public:
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield) const override
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
if (keys.size() == 0)
|
||||
if (keys.empty())
|
||||
return {};
|
||||
|
||||
auto const numKeys = keys.size();
|
||||
@@ -614,18 +649,24 @@ public:
|
||||
|
||||
// TODO: seems like a job for "key IN (list of keys)" instead?
|
||||
std::transform(
|
||||
std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
|
||||
return schema_->selectObject.bind(key, sequence);
|
||||
});
|
||||
std::cbegin(keys),
|
||||
std::cend(keys),
|
||||
std::back_inserter(statements),
|
||||
[this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
|
||||
std::cbegin(entries),
|
||||
std::cend(entries),
|
||||
std::back_inserter(results),
|
||||
[](auto const& res) -> Blob {
|
||||
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
|
||||
return *maybeValue;
|
||||
else
|
||||
return {};
|
||||
});
|
||||
|
||||
return {};
|
||||
}
|
||||
);
|
||||
|
||||
LOG(log_.trace()) << "Fetched " << numKeys << " objects";
|
||||
return results;
|
||||
@@ -636,24 +677,22 @@ public:
|
||||
{
|
||||
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
|
||||
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
|
||||
if (not res)
|
||||
{
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& results = res.value();
|
||||
if (not results)
|
||||
{
|
||||
if (not results) {
|
||||
LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
std::vector<ripple::uint256> resultKeys;
|
||||
for (auto [key] : extract<ripple::uint256>(results))
|
||||
keys.push_back(key);
|
||||
resultKeys.push_back(key);
|
||||
|
||||
return keys;
|
||||
return resultKeys;
|
||||
});
|
||||
|
||||
// one of the above errors must have happened
|
||||
@@ -674,7 +713,8 @@ public:
|
||||
std::back_inserter(results),
|
||||
[](auto const& key, auto const& obj) {
|
||||
return LedgerObject{key, obj};
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
return results;
|
||||
}
|
||||
@@ -695,8 +735,8 @@ public:
|
||||
{
|
||||
LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
|
||||
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
|
||||
assert(key.size() != 0);
|
||||
assert(successor.size() != 0);
|
||||
assert(!key.empty());
|
||||
assert(!successor.empty());
|
||||
|
||||
executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
|
||||
}
|
||||
@@ -707,18 +747,19 @@ public:
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size() * 10); // assume 10 transactions avg
|
||||
|
||||
for (auto& record : data)
|
||||
{
|
||||
for (auto& record : data) {
|
||||
std::transform(
|
||||
std::begin(record.accounts),
|
||||
std::end(record.accounts),
|
||||
std::back_inserter(statements),
|
||||
[this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::move(account),
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash);
|
||||
});
|
||||
record.txHash
|
||||
);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
@@ -732,7 +773,8 @@ public:
|
||||
|
||||
std::transform(std::cbegin(data), std::cend(data), std::back_inserter(statements), [this](auto const& record) {
|
||||
return schema_->insertNFTTx.bind(
|
||||
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash);
|
||||
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
|
||||
);
|
||||
});
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
@@ -744,13 +786,15 @@ public:
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) override
|
||||
std::string&& metadata
|
||||
) override
|
||||
{
|
||||
LOG(log_.trace()) << "Writing txn to cassandra";
|
||||
|
||||
executor_.write(schema_->insertLedgerTransaction, seq, hash);
|
||||
executor_.write(
|
||||
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata));
|
||||
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -759,24 +803,25 @@ public:
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size() * 3);
|
||||
|
||||
for (NFTsData const& record : data)
|
||||
{
|
||||
for (NFTsData const& record : data) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned));
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri)
|
||||
{
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID));
|
||||
record.tokenID
|
||||
));
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value()));
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -796,20 +841,24 @@ public:
|
||||
return executor_.isTooBusy();
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
stats() const override
|
||||
{
|
||||
return executor_.stats();
|
||||
}
|
||||
|
||||
private:
|
||||
bool
|
||||
executeSyncUpdate(Statement statement)
|
||||
{
|
||||
auto const res = executor_.writeSync(statement);
|
||||
auto maybeSuccess = res->template get<bool>();
|
||||
if (not maybeSuccess)
|
||||
{
|
||||
if (not maybeSuccess) {
|
||||
LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (not maybeSuccess.value())
|
||||
{
|
||||
if (not maybeSuccess.value()) {
|
||||
LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
|
||||
|
||||
// error may indicate that another writer wrote something.
|
||||
|
||||
@@ -33,14 +33,13 @@
|
||||
/**
|
||||
* @brief Struct used to keep track of what to write to account_transactions/account_tx tables.
|
||||
*/
|
||||
struct AccountTransactionsData
|
||||
{
|
||||
struct AccountTransactionsData {
|
||||
boost::container::flat_set<ripple::AccountID> accounts;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
std::uint32_t ledgerSequence{};
|
||||
std::uint32_t transactionIndex{};
|
||||
ripple::uint256 txHash;
|
||||
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash, beast::Journal& j)
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
|
||||
: accounts(meta.getAffectedAccounts())
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
@@ -56,8 +55,7 @@ struct AccountTransactionsData
|
||||
*
|
||||
* Gets written to nf_token_transactions table and the like.
|
||||
*/
|
||||
struct NFTTransactionsData
|
||||
{
|
||||
struct NFTTransactionsData {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
@@ -74,8 +72,7 @@ struct NFTTransactionsData
|
||||
*
|
||||
* Gets written to nf_tokens table and the like.
|
||||
*/
|
||||
struct NFTsData
|
||||
{
|
||||
struct NFTsData {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
|
||||
@@ -107,7 +104,8 @@ struct NFTsData
|
||||
ripple::uint256 const& tokenID,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::Blob const& uri,
|
||||
ripple::TxMeta const& meta)
|
||||
ripple::TxMeta const& meta
|
||||
)
|
||||
: tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
@@ -133,7 +131,8 @@ struct NFTsData
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::Blob const& uri)
|
||||
ripple::Blob const& uri
|
||||
)
|
||||
: tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
@@ -149,8 +148,11 @@ template <class T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
{
|
||||
short offer_bytes = (object[1] << 8) | object[2];
|
||||
return offer_bytes == 0x006f;
|
||||
static constexpr short OFFER_OFFSET = 0x006f;
|
||||
static constexpr short SHIFT = 8;
|
||||
|
||||
short offer_bytes = (object[1] << SHIFT) | object[2];
|
||||
return offer_bytes == OFFER_OFFSET;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -179,8 +181,9 @@ template <class T>
|
||||
inline bool
|
||||
isDirNode(T const& object)
|
||||
{
|
||||
short spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == 0x0064;
|
||||
static constexpr short DIR_NODE_SPACE_KEY = 0x0064;
|
||||
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == DIR_NODE_SPACE_KEY;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -212,7 +215,7 @@ inline ripple::uint256
|
||||
getBook(T const& offer)
|
||||
{
|
||||
ripple::SerialIter it{offer.data(), offer.size()};
|
||||
ripple::SLE sle{it, {}};
|
||||
ripple::SLE const sle{it, {}};
|
||||
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
|
||||
|
||||
return book;
|
||||
@@ -228,10 +231,12 @@ template <class T>
|
||||
inline ripple::uint256
|
||||
getBookBase(T const& key)
|
||||
{
|
||||
static constexpr size_t KEY_SIZE = 24;
|
||||
|
||||
assert(key.size() == ripple::uint256::size());
|
||||
|
||||
ripple::uint256 ret;
|
||||
for (size_t i = 0; i < 24; ++i)
|
||||
for (size_t i = 0; i < KEY_SIZE; ++i)
|
||||
ret.data()[i] = key.data()[i];
|
||||
|
||||
return ret;
|
||||
@@ -246,7 +251,7 @@ getBookBase(T const& key)
|
||||
inline std::string
|
||||
uint256ToString(ripple::uint256 const& input)
|
||||
{
|
||||
return {reinterpret_cast<const char*>(input.data()), input.size()};
|
||||
return {reinterpret_cast<char const*>(input.data()), ripple::uint256::size()};
|
||||
}
|
||||
|
||||
/** @brief The ripple epoch start timestamp. Midnight on 1st January 2000. */
|
||||
|
||||
@@ -24,7 +24,7 @@ namespace data {
|
||||
uint32_t
|
||||
LedgerCache::latestLedgerSequence() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
return latestSeq_;
|
||||
}
|
||||
|
||||
@@ -35,27 +35,21 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
return;
|
||||
|
||||
{
|
||||
std::scoped_lock lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
{
|
||||
std::scoped_lock const lck{mtx_};
|
||||
if (seq > latestSeq_) {
|
||||
assert(seq == latestSeq_ + 1 || latestSeq_ == 0);
|
||||
latestSeq_ = seq;
|
||||
}
|
||||
for (auto const& obj : objs)
|
||||
{
|
||||
if (obj.blob.size())
|
||||
{
|
||||
if (isBackground && deletes_.count(obj.key))
|
||||
for (auto const& obj : objs) {
|
||||
if (!obj.blob.empty()) {
|
||||
if (isBackground && deletes_.contains(obj.key))
|
||||
continue;
|
||||
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq)
|
||||
{
|
||||
if (seq > e.seq) {
|
||||
e = {seq, obj.blob};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
map_.erase(obj.key);
|
||||
if (!full_ && !isBackground)
|
||||
deletes_.insert(obj.key);
|
||||
@@ -69,14 +63,14 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock{mtx_};
|
||||
successorReqCounter_++;
|
||||
std::shared_lock const lck{mtx_};
|
||||
++successorReqCounter_.get();
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.upper_bound(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
successorHitCounter_++;
|
||||
++successorHitCounter_.get();
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
|
||||
@@ -85,7 +79,7 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.lower_bound(key);
|
||||
@@ -98,16 +92,16 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
std::optional<Blob>
|
||||
LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
return {};
|
||||
objectReqCounter_++;
|
||||
++objectReqCounter_.get();
|
||||
auto e = map_.find(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
if (seq < e->second.seq)
|
||||
return {};
|
||||
objectHitCounter_++;
|
||||
++objectHitCounter_.get();
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
@@ -124,7 +118,7 @@ LedgerCache::setFull()
|
||||
return;
|
||||
|
||||
full_ = true;
|
||||
std::scoped_lock lck{mtx_};
|
||||
std::scoped_lock const lck{mtx_};
|
||||
deletes_.clear();
|
||||
}
|
||||
|
||||
@@ -137,24 +131,24 @@ LedgerCache::isFull() const
|
||||
size_t
|
||||
LedgerCache::size() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
return map_.size();
|
||||
}
|
||||
|
||||
float
|
||||
LedgerCache::getObjectHitRate() const
|
||||
{
|
||||
if (!objectReqCounter_)
|
||||
if (objectReqCounter_.get().value() == 0u)
|
||||
return 1;
|
||||
return ((float)objectHitCounter_) / objectReqCounter_;
|
||||
return static_cast<float>(objectHitCounter_.get().value()) / objectReqCounter_.get().value();
|
||||
}
|
||||
|
||||
float
|
||||
LedgerCache::getSuccessorHitRate() const
|
||||
{
|
||||
if (!successorReqCounter_)
|
||||
if (successorReqCounter_.get().value() == 0u)
|
||||
return 1;
|
||||
return ((float)successorHitCounter_) / successorReqCounter_;
|
||||
return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value();
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
@@ -33,21 +34,33 @@ namespace data {
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCache
|
||||
{
|
||||
struct CacheEntry
|
||||
{
|
||||
class LedgerCache {
|
||||
struct CacheEntry {
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
|
||||
// counters for fetchLedgerObject(s) hit rate
|
||||
mutable std::atomic_uint32_t objectReqCounter_ = 0;
|
||||
mutable std::atomic_uint32_t objectHitCounter_ = 0;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "request"}, {"fetch", "ledger_objects"}}),
|
||||
"LedgerCache statistics"
|
||||
)};
|
||||
std::reference_wrapper<util::prometheus::CounterInt> objectHitCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "ledger_objects"}})
|
||||
)};
|
||||
|
||||
// counters for fetchSuccessorKey hit rate
|
||||
mutable std::atomic_uint32_t successorReqCounter_ = 0;
|
||||
mutable std::atomic_uint32_t successorHitCounter_ = 0;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> successorReqCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "request"}, {"fetch", "successor_key"}}),
|
||||
"ledgerCache"
|
||||
)};
|
||||
std::reference_wrapper<util::prometheus::CounterInt> successorHitCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
|
||||
)};
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
|
||||
@@ -68,7 +81,7 @@ public:
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
void
|
||||
update(std::vector<LedgerObject> const& blobs, uint32_t seq, bool isBackground = false);
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
@@ -33,13 +34,12 @@ using Blob = std::vector<unsigned char>;
|
||||
/**
|
||||
* @brief Represents an object in the ledger.
|
||||
*/
|
||||
struct LedgerObject
|
||||
{
|
||||
struct LedgerObject {
|
||||
ripple::uint256 key;
|
||||
Blob blob;
|
||||
|
||||
bool
|
||||
operator==(const LedgerObject& other) const
|
||||
operator==(LedgerObject const& other) const
|
||||
{
|
||||
return key == other.key && blob == other.blob;
|
||||
}
|
||||
@@ -48,8 +48,7 @@ struct LedgerObject
|
||||
/**
|
||||
* @brief Represents a page of LedgerObjects.
|
||||
*/
|
||||
struct LedgerPage
|
||||
{
|
||||
struct LedgerPage {
|
||||
std::vector<LedgerObject> objects;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
@@ -57,8 +56,7 @@ struct LedgerPage
|
||||
/**
|
||||
* @brief Represents a page of book offer objects.
|
||||
*/
|
||||
struct BookOffersPage
|
||||
{
|
||||
struct BookOffersPage {
|
||||
std::vector<LedgerObject> offers;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
@@ -66,20 +64,15 @@ struct BookOffersPage
|
||||
/**
|
||||
* @brief Represents a transaction and its metadata bundled together.
|
||||
*/
|
||||
struct TransactionAndMetadata
|
||||
{
|
||||
struct TransactionAndMetadata {
|
||||
Blob transaction;
|
||||
Blob metadata;
|
||||
std::uint32_t ledgerSequence = 0;
|
||||
std::uint32_t date = 0;
|
||||
|
||||
TransactionAndMetadata() = default;
|
||||
TransactionAndMetadata(
|
||||
Blob const& transaction,
|
||||
Blob const& metadata,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t date)
|
||||
: transaction{transaction}, metadata{metadata}, ledgerSequence{ledgerSequence}, date{date}
|
||||
TransactionAndMetadata(Blob transaction, Blob metadata, std::uint32_t ledgerSequence, std::uint32_t date)
|
||||
: transaction{std::move(transaction)}, metadata{std::move(metadata)}, ledgerSequence{ledgerSequence}, date{date}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -92,7 +85,7 @@ struct TransactionAndMetadata
|
||||
}
|
||||
|
||||
bool
|
||||
operator==(const TransactionAndMetadata& other) const
|
||||
operator==(TransactionAndMetadata const& other) const
|
||||
{
|
||||
return transaction == other.transaction && metadata == other.metadata &&
|
||||
ledgerSequence == other.ledgerSequence && date == other.date;
|
||||
@@ -102,8 +95,7 @@ struct TransactionAndMetadata
|
||||
/**
|
||||
* @brief Represents a cursor into the transactions table.
|
||||
*/
|
||||
struct TransactionsCursor
|
||||
{
|
||||
struct TransactionsCursor {
|
||||
std::uint32_t ledgerSequence = 0;
|
||||
std::uint32_t transactionIndex = 0;
|
||||
|
||||
@@ -118,9 +110,6 @@ struct TransactionsCursor
|
||||
{
|
||||
}
|
||||
|
||||
TransactionsCursor&
|
||||
operator=(TransactionsCursor const&) = default;
|
||||
|
||||
bool
|
||||
operator==(TransactionsCursor const& other) const = default;
|
||||
|
||||
@@ -134,8 +123,7 @@ struct TransactionsCursor
|
||||
/**
|
||||
* @brief Represests a bundle of transactions with metadata and a cursor to the next page.
|
||||
*/
|
||||
struct TransactionsAndCursor
|
||||
{
|
||||
struct TransactionsAndCursor {
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
std::optional<TransactionsCursor> cursor;
|
||||
};
|
||||
@@ -143,21 +131,20 @@ struct TransactionsAndCursor
|
||||
/**
|
||||
* @brief Represents a NFToken.
|
||||
*/
|
||||
struct NFT
|
||||
{
|
||||
struct NFT {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t ledgerSequence{};
|
||||
ripple::AccountID owner;
|
||||
Blob uri;
|
||||
bool isBurned;
|
||||
bool isBurned{};
|
||||
|
||||
NFT() = default;
|
||||
NFT(ripple::uint256 const& tokenID,
|
||||
std::uint32_t ledgerSequence,
|
||||
ripple::AccountID const& owner,
|
||||
Blob const& uri,
|
||||
Blob uri,
|
||||
bool isBurned)
|
||||
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{uri}, isBurned{isBurned}
|
||||
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{std::move(uri)}, isBurned{isBurned}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -175,11 +162,15 @@ struct NFT
|
||||
}
|
||||
};
|
||||
|
||||
struct NFTsAndCursor {
|
||||
std::vector<NFT> nfts;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Stores a range of sequences as a min and max pair.
|
||||
*/
|
||||
struct LedgerRange
|
||||
{
|
||||
struct LedgerRange {
|
||||
std::uint32_t minSequence = 0;
|
||||
std::uint32_t maxSequence = 0;
|
||||
};
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <data/cassandra/Types.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <concepts>
|
||||
@@ -33,56 +34,93 @@ namespace data::cassandra {
|
||||
/**
|
||||
* @brief The requirements of a settings provider.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeSettingsProvider = requires(T a) {
|
||||
{ a.getSettings() } -> std::same_as<Settings>;
|
||||
{ a.getKeyspace() } -> std::same_as<std::string>;
|
||||
{ a.getTablePrefix() } -> std::same_as<std::optional<std::string>>;
|
||||
{ a.getReplicationFactor() } -> std::same_as<uint16_t>;
|
||||
{ a.getTtl() } -> std::same_as<uint16_t>;
|
||||
{
|
||||
a.getSettings()
|
||||
} -> std::same_as<Settings>;
|
||||
{
|
||||
a.getKeyspace()
|
||||
} -> std::same_as<std::string>;
|
||||
{
|
||||
a.getTablePrefix()
|
||||
} -> std::same_as<std::optional<std::string>>;
|
||||
{
|
||||
a.getReplicationFactor()
|
||||
} -> std::same_as<uint16_t>;
|
||||
{
|
||||
a.getTtl()
|
||||
} -> std::same_as<uint16_t>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief The requirements of an execution strategy.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeExecutionStrategy = requires(
|
||||
T a,
|
||||
Settings settings,
|
||||
Handle handle,
|
||||
Statement statement,
|
||||
T a,
|
||||
Settings settings,
|
||||
Handle handle,
|
||||
Statement statement,
|
||||
std::vector<Statement> statements,
|
||||
PreparedStatement prepared,
|
||||
boost::asio::yield_context token
|
||||
) {
|
||||
{ T(settings, handle) };
|
||||
{ a.sync() } -> std::same_as<void>;
|
||||
{ a.isTooBusy() } -> std::same_as<bool>;
|
||||
{ a.writeSync(statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.writeSync(prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.write(prepared) } -> std::same_as<void>;
|
||||
{ a.write(std::move(statements)) } -> std::same_as<void>;
|
||||
{ a.read(token, prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statements) } -> std::same_as<ResultOrError>;
|
||||
{ a.readEach(token, statements) } -> std::same_as<std::vector<Result>>;
|
||||
{
|
||||
T(settings, handle)
|
||||
};
|
||||
{
|
||||
a.sync()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.isTooBusy()
|
||||
} -> std::same_as<bool>;
|
||||
{
|
||||
a.writeSync(statement)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.writeSync(prepared)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.write(prepared)
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.write(std::move(statements))
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.read(token, prepared)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.read(token, statement)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.read(token, statements)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.readEach(token, statements)
|
||||
} -> std::same_as<std::vector<Result>>;
|
||||
{
|
||||
a.stats()
|
||||
} -> std::same_as<boost::json::object>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief The requirements of a retry policy.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
|
||||
{ T(ioc) };
|
||||
{ a.shouldRetry(err) } -> std::same_as<bool>;
|
||||
{ a.retry([](){}) } -> std::same_as<void>;
|
||||
{ a.calculateDelay(attempt) } -> std::same_as<std::chrono::milliseconds>;
|
||||
{
|
||||
T(ioc)
|
||||
};
|
||||
{
|
||||
a.shouldRetry(err)
|
||||
} -> std::same_as<bool>;
|
||||
{
|
||||
a.retry([]() {})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.calculateDelay(attempt)
|
||||
} -> std::same_as<std::chrono::milliseconds>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
} // namespace data::cassandra
|
||||
|
||||
@@ -22,33 +22,35 @@
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief A simple container for both error message and error code.
|
||||
*/
|
||||
class CassandraError
|
||||
{
|
||||
class CassandraError {
|
||||
std::string message_;
|
||||
uint32_t code_;
|
||||
uint32_t code_{};
|
||||
|
||||
public:
|
||||
CassandraError() = default; // default constructible required by Expected
|
||||
CassandraError(std::string message, uint32_t code) : message_{message}, code_{code}
|
||||
CassandraError(std::string message, uint32_t code) : message_{std::move(message)}, code_{code}
|
||||
{
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend std::string
|
||||
operator+(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
|
||||
operator+(T const& lhs, CassandraError const& rhs)
|
||||
requires std::is_convertible_v<T, std::string>
|
||||
{
|
||||
return lhs + rhs.message();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend bool
|
||||
operator==(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
|
||||
operator==(T const& lhs, CassandraError const& rhs)
|
||||
requires std::is_convertible_v<T, std::string>
|
||||
{
|
||||
return lhs == rhs.message();
|
||||
}
|
||||
@@ -91,11 +93,9 @@ public:
|
||||
bool
|
||||
isTimeout() const
|
||||
{
|
||||
if (code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
|
||||
return code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
|
||||
code_ == CASS_ERROR_SERVER_UNAVAILABLE or code_ == CASS_ERROR_SERVER_OVERLOADED or
|
||||
code_ == CASS_ERROR_SERVER_READ_TIMEOUT)
|
||||
return true;
|
||||
return false;
|
||||
code_ == CASS_ERROR_SERVER_READ_TIMEOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -88,17 +88,17 @@ std::vector<Handle::FutureType>
|
||||
Handle::asyncExecuteEach(std::vector<Statement> const& statements) const
|
||||
{
|
||||
std::vector<Handle::FutureType> futures;
|
||||
futures.reserve(statements.size());
|
||||
for (auto const& statement : statements)
|
||||
futures.push_back(cass_session_execute(session_, statement));
|
||||
futures.emplace_back(cass_session_execute(session_, statement));
|
||||
return futures;
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::executeEach(std::vector<Statement> const& statements) const
|
||||
{
|
||||
for (auto futures = asyncExecuteEach(statements); auto const& future : futures)
|
||||
{
|
||||
if (auto const rc = future.await(); not rc)
|
||||
for (auto futures = asyncExecuteEach(statements); auto const& future : futures) {
|
||||
if (auto rc = future.await(); not rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -145,11 +145,12 @@ Handle::asyncExecute(std::vector<Statement> const& statements, std::function<voi
|
||||
Handle::PreparedStatementType
|
||||
Handle::prepare(std::string_view query) const
|
||||
{
|
||||
Handle::FutureType future = cass_session_prepare(session_, query.data());
|
||||
if (auto const rc = future.await(); rc)
|
||||
Handle::FutureType const future = cass_session_prepare(session_, query.data());
|
||||
auto const rc = future.await();
|
||||
if (rc)
|
||||
return cass_future_get_prepared(future);
|
||||
else
|
||||
throw std::runtime_error(rc.error().message());
|
||||
|
||||
throw std::runtime_error(rc.error().message());
|
||||
}
|
||||
|
||||
} // namespace data::cassandra
|
||||
|
||||
@@ -42,8 +42,7 @@ namespace data::cassandra {
|
||||
/**
|
||||
* @brief Represents a handle to the cassandra database cluster
|
||||
*/
|
||||
class Handle
|
||||
{
|
||||
class Handle {
|
||||
detail::Cluster cluster_;
|
||||
detail::Session session_;
|
||||
|
||||
|
||||
@@ -41,8 +41,7 @@ template <SomeSettingsProvider SettingsProviderType>
|
||||
* @brief Manages the DB schema and provides access to prepared statements.
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
class Schema
|
||||
{
|
||||
class Schema {
|
||||
util::Logger log_{"Backend"};
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
|
||||
@@ -62,7 +61,8 @@ public:
|
||||
AND durable_writes = true
|
||||
)",
|
||||
settingsProvider_.get().getKeyspace(),
|
||||
settingsProvider_.get().getReplicationFactor());
|
||||
settingsProvider_.get().getReplicationFactor()
|
||||
);
|
||||
}();
|
||||
|
||||
// =======================
|
||||
@@ -85,7 +85,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -100,7 +101,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -113,7 +115,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -127,7 +130,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -140,7 +144,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -155,7 +160,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -167,7 +173,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -179,7 +186,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -189,7 +197,8 @@ public:
|
||||
sequence bigint
|
||||
)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -205,7 +214,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -220,7 +230,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -235,7 +246,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -250,7 +262,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
return statements;
|
||||
}();
|
||||
@@ -258,8 +271,7 @@ public:
|
||||
/**
|
||||
* @brief Prepared statements holder.
|
||||
*/
|
||||
class Statements
|
||||
{
|
||||
class Statements {
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
std::reference_wrapper<Handle const> handle_;
|
||||
|
||||
@@ -280,7 +292,8 @@ public:
|
||||
(key, sequence, object)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertTransaction = [this]() {
|
||||
@@ -290,7 +303,8 @@ public:
|
||||
(hash, ledger_sequence, date, transaction, metadata)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerTransaction = [this]() {
|
||||
@@ -300,7 +314,8 @@ public:
|
||||
(ledger_sequence, hash)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertSuccessor = [this]() {
|
||||
@@ -310,7 +325,8 @@ public:
|
||||
(key, seq, next)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")));
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertDiff = [this]() {
|
||||
@@ -320,7 +336,8 @@ public:
|
||||
(seq, key)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")));
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertAccountTx = [this]() {
|
||||
@@ -330,7 +347,8 @@ public:
|
||||
(account, seq_idx, hash)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFT = [this]() {
|
||||
@@ -340,7 +358,8 @@ public:
|
||||
(token_id, sequence, owner, is_burned)
|
||||
VALUES (?, ?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertIssuerNFT = [this]() {
|
||||
@@ -350,7 +369,8 @@ public:
|
||||
(issuer, taxon, token_id)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")));
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFTURI = [this]() {
|
||||
@@ -360,7 +380,8 @@ public:
|
||||
(token_id, sequence, uri)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFTTx = [this]() {
|
||||
@@ -370,7 +391,8 @@ public:
|
||||
(token_id, seq_idx, hash)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHeader = [this]() {
|
||||
@@ -380,7 +402,8 @@ public:
|
||||
(sequence, header)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHash = [this]() {
|
||||
@@ -390,7 +413,8 @@ public:
|
||||
(hash, sequence)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
|
||||
));
|
||||
}();
|
||||
|
||||
//
|
||||
@@ -405,7 +429,8 @@ public:
|
||||
WHERE is_latest = ?
|
||||
IF sequence IN (?, null)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement deleteLedgerRange = [this]() {
|
||||
@@ -415,7 +440,8 @@ public:
|
||||
SET sequence = ?
|
||||
WHERE is_latest = false
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
//
|
||||
@@ -432,7 +458,8 @@ public:
|
||||
ORDER BY seq DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")));
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectDiff = [this]() {
|
||||
@@ -442,7 +469,8 @@ public:
|
||||
FROM {}
|
||||
WHERE seq = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")));
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectObject = [this]() {
|
||||
@@ -455,7 +483,8 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectTransaction = [this]() {
|
||||
@@ -465,7 +494,8 @@ public:
|
||||
FROM {}
|
||||
WHERE hash = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAllTransactionHashesInLedger = [this]() {
|
||||
@@ -475,7 +505,8 @@ public:
|
||||
FROM {}
|
||||
WHERE ledger_sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPageKeys = [this]() {
|
||||
@@ -489,7 +520,8 @@ public:
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPage = [this]() {
|
||||
@@ -503,7 +535,8 @@ public:
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement getToken = [this]() {
|
||||
@@ -514,7 +547,8 @@ public:
|
||||
WHERE key = ?
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTx = [this]() {
|
||||
@@ -526,7 +560,8 @@ public:
|
||||
AND seq_idx < ?
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTxForward = [this]() {
|
||||
@@ -539,7 +574,8 @@ public:
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFT = [this]() {
|
||||
@@ -552,7 +588,22 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTBulk = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id, sequence, owner, is_burned
|
||||
FROM {}
|
||||
WHERE token_id IN ?
|
||||
AND sequence <= ?
|
||||
ORDER BY sequence DESC
|
||||
PER PARTITION LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTURI = [this]() {
|
||||
@@ -565,7 +616,22 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTURIBulk = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id, uri
|
||||
FROM {}
|
||||
WHERE token_id IN ?
|
||||
AND sequence <= ?
|
||||
ORDER BY sequence DESC
|
||||
PER PARTITION LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTTx = [this]() {
|
||||
@@ -578,7 +644,8 @@ public:
|
||||
ORDER BY seq_idx DESC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTTxForward = [this]() {
|
||||
@@ -591,7 +658,37 @@ public:
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuer = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND (taxon, token_id) > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND taxon = ?
|
||||
AND token_id > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerByHash = [this]() {
|
||||
@@ -602,7 +699,8 @@ public:
|
||||
WHERE hash = ?
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerBySeq = [this]() {
|
||||
@@ -612,7 +710,8 @@ public:
|
||||
FROM {}
|
||||
WHERE sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLatestLedger = [this]() {
|
||||
@@ -622,7 +721,8 @@ public:
|
||||
FROM {}
|
||||
WHERE is_latest = true
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerRange = [this]() {
|
||||
@@ -631,7 +731,8 @@ public:
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
};
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Constants.h>
|
||||
#include <util/config/Config.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
@@ -34,12 +35,11 @@ namespace detail {
|
||||
inline Settings::ContactPoints
|
||||
tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_object())
|
||||
throw std::runtime_error(
|
||||
"Feed entire Cassandra section to parse "
|
||||
"Settings::ContactPoints instead");
|
||||
if (not value.is_object()) {
|
||||
throw std::runtime_error("Feed entire Cassandra section to parse Settings::ContactPoints instead");
|
||||
}
|
||||
|
||||
util::Config obj{value};
|
||||
util::Config const obj{value};
|
||||
Settings::ContactPoints out;
|
||||
|
||||
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
|
||||
@@ -76,18 +76,15 @@ SettingsProvider::getSettings() const
|
||||
std::optional<std::string>
|
||||
SettingsProvider::parseOptionalCertificate() const
|
||||
{
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath)
|
||||
{
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath) {
|
||||
auto const path = std::filesystem::path(*certPath);
|
||||
std::ifstream fileStream(path.string(), std::ios::in);
|
||||
if (!fileStream)
|
||||
{
|
||||
if (!fileStream) {
|
||||
throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string());
|
||||
}
|
||||
|
||||
std::string contents(std::istreambuf_iterator<char>{fileStream}, std::istreambuf_iterator<char>{});
|
||||
if (fileStream.bad())
|
||||
{
|
||||
if (fileStream.bad()) {
|
||||
throw std::system_error(errno, std::generic_category(), "Reading certificate " + path.string());
|
||||
}
|
||||
|
||||
@@ -101,12 +98,9 @@ Settings
|
||||
SettingsProvider::parseSettings() const
|
||||
{
|
||||
auto settings = Settings::defaultSettings();
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle)
|
||||
{
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle) {
|
||||
settings.connectionInfo = *bundle;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
settings.connectionInfo =
|
||||
config_.valueOrThrow<Settings::ContactPoints>("Missing contact_points in Cassandra config");
|
||||
}
|
||||
@@ -116,30 +110,18 @@ SettingsProvider::parseSettings() const
|
||||
config_.valueOr<uint32_t>("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding);
|
||||
settings.maxReadRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
|
||||
settings.maxConnectionsPerHost =
|
||||
config_.valueOr<uint32_t>("max_connections_per_host", settings.maxConnectionsPerHost);
|
||||
settings.coreConnectionsPerHost =
|
||||
config_.valueOr<uint32_t>("core_connections_per_host", settings.coreConnectionsPerHost);
|
||||
settings.maxConcurrentRequestsThreshold = config_.valueOr<uint32_t>(
|
||||
"max_concurrent_requests_threshold",
|
||||
(settings.maxReadRequestsOutstanding + settings.maxWriteRequestsOutstanding) / settings.coreConnectionsPerHost);
|
||||
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
settings.queueSizeEvent = config_.maybeValue<uint32_t>("queue_size_event");
|
||||
settings.writeBytesHighWatermark = config_.maybeValue<uint32_t>("write_bytes_high_water_mark");
|
||||
settings.writeBytesLowWatermark = config_.maybeValue<uint32_t>("write_bytes_low_water_mark");
|
||||
settings.pendingRequestsHighWatermark = config_.maybeValue<uint32_t>("pending_requests_high_water_mark");
|
||||
settings.pendingRequestsLowWatermark = config_.maybeValue<uint32_t>("pending_requests_low_water_mark");
|
||||
settings.maxRequestsPerFlush = config_.maybeValue<uint32_t>("max_requests_per_flush");
|
||||
settings.maxConcurrentCreation = config_.maybeValue<uint32_t>("max_concurrent_creation");
|
||||
|
||||
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
|
||||
if (connectTimeoutSecond)
|
||||
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * 1000};
|
||||
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
|
||||
auto const requestTimeoutSecond = config_.maybeValue<uint32_t>("request_timeout");
|
||||
if (requestTimeoutSecond)
|
||||
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * 1000};
|
||||
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
|
||||
settings.certificate = parseOptionalCertificate();
|
||||
settings.username = config_.maybeValue<std::string>("username");
|
||||
|
||||
@@ -30,8 +30,7 @@ namespace data::cassandra {
|
||||
/**
|
||||
* @brief Provides settings for @ref BasicCassandraBackend.
|
||||
*/
|
||||
class SettingsProvider
|
||||
{
|
||||
class SettingsProvider {
|
||||
util::Config config_;
|
||||
|
||||
std::string keyspace_;
|
||||
|
||||
@@ -52,8 +52,7 @@ using Batch = detail::Batch;
|
||||
* because clio uses bigint (int64) everywhere except for when one need
|
||||
* to specify LIMIT, which needs an int32 :-/
|
||||
*/
|
||||
struct Limit
|
||||
{
|
||||
struct Limit {
|
||||
int32_t limit;
|
||||
};
|
||||
|
||||
|
||||
@@ -48,16 +48,17 @@ template <
|
||||
typename StatementType,
|
||||
typename HandleType = Handle,
|
||||
SomeRetryPolicy RetryPolicyType = ExponentialBackoffRetryPolicy>
|
||||
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>>
|
||||
{
|
||||
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>> {
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using CallbackType = std::function<void(typename HandleType::ResultOrErrorType)>;
|
||||
using RetryCallbackType = std::function<void()>;
|
||||
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
StatementType data_;
|
||||
RetryPolicyType retryPolicy_;
|
||||
CallbackType onComplete_;
|
||||
RetryCallbackType onRetry_;
|
||||
|
||||
// does not exist during initial construction, hence optional
|
||||
std::optional<FutureWithCallbackType> future_;
|
||||
@@ -68,24 +69,37 @@ public:
|
||||
* @brief Create a new instance of the AsyncExecutor and execute it.
|
||||
*/
|
||||
static void
|
||||
run(boost::asio::io_context& ioc, HandleType const& handle, StatementType&& data, CallbackType&& onComplete)
|
||||
run(boost::asio::io_context& ioc,
|
||||
HandleType const& handle,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry)
|
||||
{
|
||||
// this is a helper that allows us to use std::make_shared below
|
||||
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType>
|
||||
{
|
||||
EnableMakeShared(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete))
|
||||
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType> {
|
||||
EnableMakeShared(
|
||||
boost::asio::io_context& ioc,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry
|
||||
)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete), std::move(onRetry))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete));
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete), std::move(onRetry));
|
||||
ptr->execute(handle);
|
||||
}
|
||||
|
||||
private:
|
||||
AsyncExecutor(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}
|
||||
AsyncExecutor(
|
||||
boost::asio::io_context& ioc,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry
|
||||
)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}, onRetry_{std::move(onRetry)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -96,22 +110,21 @@ private:
|
||||
|
||||
// lifetime is extended by capturing self ptr
|
||||
auto handler = [this, &handle, self](auto&& res) mutable {
|
||||
if (res)
|
||||
{
|
||||
onComplete_(std::move(res));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (retryPolicy_.shouldRetry(res.error()))
|
||||
if (res) {
|
||||
onComplete_(std::forward<decltype(res)>(res));
|
||||
} else {
|
||||
if (retryPolicy_.shouldRetry(res.error())) {
|
||||
onRetry_();
|
||||
retryPolicy_.retry([self, &handle]() { self->execute(handle); });
|
||||
else
|
||||
onComplete_(std::move(res)); // report error
|
||||
} else {
|
||||
onComplete_(std::forward<decltype(res)>(res)); // report error
|
||||
}
|
||||
}
|
||||
|
||||
self = nullptr; // explicitly decrement refcount
|
||||
};
|
||||
|
||||
std::scoped_lock lck{mtx_};
|
||||
std::scoped_lock const lck{mtx_};
|
||||
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -26,8 +26,8 @@
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
};
|
||||
constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
@@ -37,16 +37,16 @@ Batch::Batch(std::vector<Statement> const& statements)
|
||||
{
|
||||
cass_batch_set_is_idempotent(*this, cass_true);
|
||||
|
||||
for (auto const& statement : statements)
|
||||
for (auto const& statement : statements) {
|
||||
if (auto const res = add(statement); not res)
|
||||
throw std::runtime_error("Failed to add statement to batch: " + res.error());
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError
|
||||
Batch::add(Statement const& statement)
|
||||
{
|
||||
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK) {
|
||||
return Error{CassandraError{cass_error_desc(rc), rc}};
|
||||
}
|
||||
return {};
|
||||
|
||||
@@ -26,8 +26,7 @@
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Batch : public ManagedObject<CassBatch>
|
||||
{
|
||||
struct Batch : public ManagedObject<CassBatch> {
|
||||
Batch(std::vector<Statement> const& statements);
|
||||
|
||||
MaybeError
|
||||
|
||||
@@ -28,11 +28,10 @@
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
|
||||
template <class... Ts>
|
||||
struct overloadSet : Ts...
|
||||
{
|
||||
struct overloadSet : Ts... {
|
||||
using Ts::operator()...;
|
||||
};
|
||||
|
||||
@@ -48,113 +47,39 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), c
|
||||
using std::to_string;
|
||||
|
||||
cass_cluster_set_token_aware_routing(*this, cass_true);
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(
|
||||
fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc)));
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Error setting cassandra io threads to {}: {}", settings.threads, cass_error_desc(rc)));
|
||||
fmt::format("Error setting cassandra io threads to {}: {}", settings.threads, cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
|
||||
cass_log_set_level(settings.enableLog ? CASS_LOG_TRACE : CASS_LOG_DISABLED);
|
||||
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
if (auto const rc =
|
||||
cass_cluster_set_max_concurrent_requests_threshold(*this, settings.maxConcurrentRequestsThreshold);
|
||||
rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(
|
||||
fmt::format("Could not set max concurrent requests per host threshold: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_max_connections_per_host(*this, settings.maxConnectionsPerHost); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(fmt::format("Could not set max connections per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
|
||||
rc != CASS_OK)
|
||||
{
|
||||
rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
auto const queueSize =
|
||||
settings.queueSizeIO.value_or(settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(*this, queueSize); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(*this, queueSize); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Could not set queue size for IO per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
auto apply = []<typename ValueType, typename Fn>(
|
||||
std::optional<ValueType> const& maybeValue, Fn&& fn) requires std::is_object_v<Fn>
|
||||
{
|
||||
if (maybeValue)
|
||||
std::invoke(fn, maybeValue.value());
|
||||
};
|
||||
|
||||
apply(settings.queueSizeEvent, [this](auto value) {
|
||||
if (auto const rc = cass_cluster_set_queue_size_event(*this, value); rc != CASS_OK)
|
||||
throw std::runtime_error(
|
||||
fmt::format("Could not set queue size for events per host: {}", cass_error_desc(rc)));
|
||||
});
|
||||
|
||||
apply(settings.writeBytesHighWatermark, [this](auto value) {
|
||||
if (auto const rc = cass_cluster_set_write_bytes_high_water_mark(*this, value); rc != CASS_OK)
|
||||
throw std::runtime_error(fmt::format("Could not set write bytes high water_mark: {}", cass_error_desc(rc)));
|
||||
});
|
||||
|
||||
apply(settings.writeBytesLowWatermark, [this](auto value) {
|
||||
if (auto const rc = cass_cluster_set_write_bytes_low_water_mark(*this, value); rc != CASS_OK)
|
||||
throw std::runtime_error(fmt::format("Could not set write bytes low water mark: {}", cass_error_desc(rc)));
|
||||
});
|
||||
|
||||
apply(settings.pendingRequestsHighWatermark, [this](auto value) {
|
||||
if (auto const rc = cass_cluster_set_pending_requests_high_water_mark(*this, value); rc != CASS_OK)
|
||||
throw std::runtime_error(
|
||||
fmt::format("Could not set pending requests high water mark: {}", cass_error_desc(rc)));
|
||||
});
|
||||
|
||||
apply(settings.pendingRequestsLowWatermark, [this](auto value) {
|
||||
if (auto const rc = cass_cluster_set_pending_requests_low_water_mark(*this, value); rc != CASS_OK)
|
||||
throw std::runtime_error(
|
||||
fmt::format("Could not set pending requests low water mark: {}", cass_error_desc(rc)));
|
||||
});
|
||||
|
||||
apply(settings.maxRequestsPerFlush, [this](auto value) {
|
||||
if (auto const rc = cass_cluster_set_max_requests_per_flush(*this, value); rc != CASS_OK)
|
||||
throw std::runtime_error(fmt::format("Could not set max requests per flush: {}", cass_error_desc(rc)));
|
||||
});
|
||||
|
||||
apply(settings.maxConcurrentCreation, [this](auto value) {
|
||||
if (auto const rc = cass_cluster_set_max_concurrent_creation(*this, value); rc != CASS_OK)
|
||||
throw std::runtime_error(fmt::format("Could not set max concurrent creation: {}", cass_error_desc(rc)));
|
||||
});
|
||||
|
||||
setupConnection(settings);
|
||||
setupCertificate(settings);
|
||||
setupCredentials(settings);
|
||||
|
||||
auto valueOrDefault = []<typename T>(std::optional<T> const& maybeValue) -> std::string {
|
||||
return maybeValue ? to_string(*maybeValue) : "default";
|
||||
};
|
||||
|
||||
LOG(log_.info()) << "Threads: " << settings.threads;
|
||||
LOG(log_.info()) << "Max concurrent requests per host: " << settings.maxConcurrentRequestsThreshold;
|
||||
LOG(log_.info()) << "Max connections per host: " << settings.maxConnectionsPerHost;
|
||||
LOG(log_.info()) << "Core connections per host: " << settings.coreConnectionsPerHost;
|
||||
LOG(log_.info()) << "IO queue size: " << queueSize;
|
||||
LOG(log_.info()) << "Event queue size: " << valueOrDefault(settings.queueSizeEvent);
|
||||
LOG(log_.info()) << "Write bytes high watermark: " << valueOrDefault(settings.writeBytesHighWatermark);
|
||||
LOG(log_.info()) << "Write bytes low watermark: " << valueOrDefault(settings.writeBytesLowWatermark);
|
||||
LOG(log_.info()) << "Pending requests high watermark: " << valueOrDefault(settings.pendingRequestsHighWatermark);
|
||||
LOG(log_.info()) << "Pending requests low watermark: " << valueOrDefault(settings.pendingRequestsLowWatermark);
|
||||
LOG(log_.info()) << "Max requests per flush: " << valueOrDefault(settings.maxRequestsPerFlush);
|
||||
LOG(log_.info()) << "Max concurrent creation: " << valueOrDefault(settings.maxConcurrentCreation);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -164,7 +89,8 @@ Cluster::setupConnection(Settings const& settings)
|
||||
overloadSet{
|
||||
[this](Settings::ContactPoints const& points) { setupContactPoints(points); },
|
||||
[this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }},
|
||||
settings.connectionInfo);
|
||||
settings.connectionInfo
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -172,9 +98,11 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string const& label, std::string const& value) {
|
||||
if (rc != CASS_OK)
|
||||
if (rc != CASS_OK) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Cassandra: Error setting {} [{}]: {}", label, value, cass_error_desc(rc)));
|
||||
fmt::format("Cassandra: Error setting {} [{}]: {}", label, value, cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
{
|
||||
@@ -183,8 +111,7 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
throwErrorIfNeeded(rc, "contact_points", points.contactPoints);
|
||||
}
|
||||
|
||||
if (points.port)
|
||||
{
|
||||
if (points.port) {
|
||||
auto const rc = cass_cluster_set_port(*this, points.port.value());
|
||||
throwErrorIfNeeded(rc, "port", to_string(points.port.value()));
|
||||
}
|
||||
@@ -194,8 +121,7 @@ void
|
||||
Cluster::setupSecureBundle(Settings::SecureConnectionBundle const& bundle)
|
||||
{
|
||||
LOG(log_.debug()) << "Attempt connection using secure bundle";
|
||||
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK) {
|
||||
throw std::runtime_error("Failed to connect using secure connection bundle " + bundle.bundle);
|
||||
}
|
||||
}
|
||||
@@ -207,7 +133,7 @@ Cluster::setupCertificate(Settings const& settings)
|
||||
return;
|
||||
|
||||
LOG(log_.debug()) << "Configure SSL context";
|
||||
SslContext context = SslContext(*settings.certificate);
|
||||
SslContext const context = SslContext(*settings.certificate);
|
||||
cass_cluster_set_ssl(*this, context);
|
||||
}
|
||||
|
||||
|
||||
@@ -38,22 +38,22 @@ namespace data::cassandra::detail {
|
||||
/**
|
||||
* @brief Bundles all cassandra settings in one place.
|
||||
*/
|
||||
struct Settings
|
||||
{
|
||||
struct Settings {
|
||||
static constexpr std::size_t DEFAULT_CONNECTION_TIMEOUT = 10000;
|
||||
static constexpr uint32_t DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t DEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
/**
|
||||
* @brief Represents the configuration of contact points for cassandra.
|
||||
*/
|
||||
struct ContactPoints
|
||||
{
|
||||
struct ContactPoints {
|
||||
std::string contactPoints = "127.0.0.1"; // defaults to localhost
|
||||
std::optional<uint16_t> port;
|
||||
std::optional<uint16_t> port = {};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents the configuration of a secure connection bundle.
|
||||
*/
|
||||
struct SecureConnectionBundle
|
||||
{
|
||||
struct SecureConnectionBundle {
|
||||
std::string bundle; // no meaningful default
|
||||
};
|
||||
|
||||
@@ -61,7 +61,7 @@ struct Settings
|
||||
bool enableLog = false;
|
||||
|
||||
/** @brief Connect timeout specified in milliseconds */
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{10000};
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{DEFAULT_CONNECTION_TIMEOUT};
|
||||
|
||||
/** @brief Request timeout specified in milliseconds */
|
||||
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
|
||||
@@ -73,53 +73,25 @@ struct Settings
|
||||
uint32_t threads = std::thread::hardware_concurrency();
|
||||
|
||||
/** @brief The maximum number of outstanding write requests at any given moment */
|
||||
uint32_t maxWriteRequestsOutstanding = 10'000;
|
||||
uint32_t maxWriteRequestsOutstanding = DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The maximum number of outstanding read requests at any given moment */
|
||||
uint32_t maxReadRequestsOutstanding = 100'000;
|
||||
|
||||
/** @brief The maximum number of connections per host */
|
||||
uint32_t maxConnectionsPerHost = 2u;
|
||||
uint32_t maxReadRequestsOutstanding = DEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The number of connection per host to always have active */
|
||||
uint32_t coreConnectionsPerHost = 2u;
|
||||
|
||||
/** @brief The maximum concurrent requests per connection; new connections will be created when reached */
|
||||
uint32_t maxConcurrentRequestsThreshold =
|
||||
(maxWriteRequestsOutstanding + maxReadRequestsOutstanding) / coreConnectionsPerHost;
|
||||
|
||||
/** @brief Size of the event queue */
|
||||
std::optional<uint32_t> queueSizeEvent;
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO;
|
||||
|
||||
/** @brief High watermark for bytes written */
|
||||
std::optional<uint32_t> writeBytesHighWatermark;
|
||||
|
||||
/** @brief Low watermark for bytes written */
|
||||
std::optional<uint32_t> writeBytesLowWatermark;
|
||||
|
||||
/** @brief High watermark for pending requests */
|
||||
std::optional<uint32_t> pendingRequestsHighWatermark;
|
||||
|
||||
/** @brief Low watermark for pending requests */
|
||||
std::optional<uint32_t> pendingRequestsLowWatermark;
|
||||
|
||||
/** @brief Maximum number of requests per flush */
|
||||
std::optional<uint32_t> maxRequestsPerFlush;
|
||||
|
||||
/** @brief Maximum number of connections that will be created concurrently */
|
||||
std::optional<uint32_t> maxConcurrentCreation;
|
||||
std::optional<uint32_t> queueSizeIO{};
|
||||
|
||||
/** @brief SSL certificate */
|
||||
std::optional<std::string> certificate; // ssl context
|
||||
std::optional<std::string> certificate{}; // ssl context
|
||||
|
||||
/** @brief Username/login */
|
||||
std::optional<std::string> username;
|
||||
std::optional<std::string> username{};
|
||||
|
||||
/** @brief Password to match the `username` */
|
||||
std::optional<std::string> password;
|
||||
std::optional<std::string> password{};
|
||||
|
||||
/**
|
||||
* @brief Creates a new Settings object as a copy of the current one with overridden contact points.
|
||||
@@ -142,8 +114,7 @@ struct Settings
|
||||
}
|
||||
};
|
||||
|
||||
class Cluster : public ManagedObject<CassCluster>
|
||||
{
|
||||
class Cluster : public ManagedObject<CassCluster> {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
public:
|
||||
|
||||
87
src/data/cassandra/impl/Collection.h
Normal file
87
src/data/cassandra/impl/Collection.h
Normal file
@@ -0,0 +1,87 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Collection : public ManagedObject<CassCollection> {
|
||||
static constexpr auto deleter = [](CassCollection* ptr) { cass_collection_free(ptr); };
|
||||
|
||||
static void
|
||||
throwErrorIfNeeded(CassError const rc, std::string_view const label)
|
||||
{
|
||||
if (rc == CASS_OK)
|
||||
return;
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
|
||||
public:
|
||||
/* implicit */ Collection(CassCollection* ptr);
|
||||
|
||||
template <typename Type>
|
||||
explicit Collection(std::vector<Type> const& value)
|
||||
: ManagedObject{cass_collection_new(CASS_COLLECTION_TYPE_LIST, value.size()), deleter}
|
||||
{
|
||||
bind(value);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
void
|
||||
bind(std::vector<Type> const& values) const
|
||||
{
|
||||
for (auto const& value : values)
|
||||
append(value);
|
||||
}
|
||||
|
||||
void
|
||||
append(bool const value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_bool(*this, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
|
||||
void
|
||||
append(int64_t const value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_int64(*this, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
|
||||
void
|
||||
append(ripple::uint256 const& value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_bytes(
|
||||
*this,
|
||||
static_cast<cass_byte_t const*>(static_cast<unsigned char const*>(value.data())),
|
||||
ripple::uint256::size()
|
||||
);
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
}
|
||||
};
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendCounters.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/AsyncExecutor.h>
|
||||
@@ -46,9 +48,8 @@ namespace data::cassandra::detail {
|
||||
* Note: A lot of the code that uses yield is repeated below.
|
||||
* This is ok for now because we are hopefully going to be getting rid of it entirely later on.
|
||||
*/
|
||||
template <typename HandleType = Handle>
|
||||
class DefaultExecutionStrategy
|
||||
{
|
||||
template <typename HandleType = Handle, SomeBackendCounters BackendCountersType = BackendCounters>
|
||||
class DefaultExecutionStrategy {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
std::uint32_t maxWriteRequestsOutstanding_;
|
||||
@@ -69,6 +70,8 @@ class DefaultExecutionStrategy
|
||||
std::reference_wrapper<HandleType const> handle_;
|
||||
std::thread thread_;
|
||||
|
||||
typename BackendCountersType::PtrType counters_;
|
||||
|
||||
public:
|
||||
using ResultOrErrorType = typename HandleType::ResultOrErrorType;
|
||||
using StatementType = typename HandleType::StatementType;
|
||||
@@ -82,12 +85,17 @@ public:
|
||||
* @param settings The settings to use
|
||||
* @param handle A handle to the cassandra database
|
||||
*/
|
||||
DefaultExecutionStrategy(Settings const& settings, HandleType const& handle)
|
||||
DefaultExecutionStrategy(
|
||||
Settings const& settings,
|
||||
HandleType const& handle,
|
||||
typename BackendCountersType::PtrType counters = BackendCountersType::make()
|
||||
)
|
||||
: maxWriteRequestsOutstanding_{settings.maxWriteRequestsOutstanding}
|
||||
, maxReadRequestsOutstanding_{settings.maxReadRequestsOutstanding}
|
||||
, work_{ioc_}
|
||||
, handle_{std::cref(handle)}
|
||||
, thread_{[this]() { ioc_.run(); }}
|
||||
, counters_{std::move(counters)}
|
||||
{
|
||||
LOG(log_.info()) << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
|
||||
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
|
||||
@@ -118,7 +126,10 @@ public:
|
||||
bool
|
||||
isTooBusy() const
|
||||
{
|
||||
return numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
bool const result = numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
if (result)
|
||||
counters_->registerTooBusy();
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -129,17 +140,16 @@ public:
|
||||
ResultOrErrorType
|
||||
writeSync(StatementType const& statement)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
if (auto res = handle_.get().execute(statement); res)
|
||||
{
|
||||
counters_->registerWriteSync();
|
||||
while (true) {
|
||||
auto res = handle_.get().execute(statement);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
|
||||
counters_->registerWriteSyncRetry();
|
||||
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,9 +181,19 @@ public:
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statement)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statement), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
ioc_,
|
||||
handle_,
|
||||
std::move(statement),
|
||||
[this](auto const&) {
|
||||
decrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteFinished();
|
||||
},
|
||||
[this]() { counters_->registerWriteRetry(); }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -192,9 +212,18 @@ public:
|
||||
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statements)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statements), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
ioc_,
|
||||
handle_,
|
||||
std::move(statements),
|
||||
[this](auto const&) {
|
||||
decrementOutstandingRequestCount();
|
||||
counters_->registerWriteFinished();
|
||||
},
|
||||
[this]() { counters_->registerWriteRetry(); }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -230,41 +259,41 @@ public:
|
||||
{
|
||||
auto const numStatements = statements.size();
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
counters_->registerReadStarted(numStatements);
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
numReadRequestsOutstanding_ += numStatements;
|
||||
// TODO: see if we can avoid using shared_ptr for self here
|
||||
|
||||
auto init = [this, &statements, &future]<typename Self>(Self& self) {
|
||||
future.emplace(handle_.get().asyncExecute(
|
||||
statements, [sself = std::make_shared<Self>(std::move(self))](auto&& res) mutable {
|
||||
// Note: explicit work below needed on linux/gcc11
|
||||
auto executor = boost::asio::get_associated_executor(*sself);
|
||||
boost::asio::post(
|
||||
executor,
|
||||
[sself = std::move(sself),
|
||||
res = std::move(res),
|
||||
_ = boost::asio::make_work_guard(executor)]() mutable {
|
||||
sself->complete(std::move(res));
|
||||
sself.reset();
|
||||
});
|
||||
}));
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
|
||||
future.emplace(handle_.get().asyncExecute(statements, [sself](auto&& res) mutable {
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself),
|
||||
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); }
|
||||
);
|
||||
}));
|
||||
};
|
||||
|
||||
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
numReadRequestsOutstanding_ -= numStatements;
|
||||
|
||||
if (res)
|
||||
{
|
||||
if (res) {
|
||||
counters_->registerReadFinished(numStatements);
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
|
||||
|
||||
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
|
||||
try {
|
||||
throwErrorIfNeeded(res.error());
|
||||
} catch (...) {
|
||||
counters_->registerReadError(numStatements);
|
||||
throw;
|
||||
}
|
||||
counters_->registerReadRetry(numStatements);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,38 +311,40 @@ public:
|
||||
read(CompletionTokenType token, StatementType const& statement)
|
||||
{
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
counters_->registerReadStarted();
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
++numReadRequestsOutstanding_;
|
||||
// TODO: see if we can avoid using shared_ptr for self here
|
||||
auto init = [this, &statement, &future]<typename Self>(Self& self) {
|
||||
future.emplace(handle_.get().asyncExecute(
|
||||
statement, [sself = std::make_shared<Self>(std::move(self))](auto&&) mutable {
|
||||
// Note: explicit work below needed on linux/gcc11
|
||||
auto executor = boost::asio::get_associated_executor(*sself);
|
||||
boost::asio::post(
|
||||
executor, [sself = std::move(sself), _ = boost::asio::make_work_guard(executor)]() mutable {
|
||||
sself->complete();
|
||||
sself.reset();
|
||||
});
|
||||
}));
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
|
||||
future.emplace(handle_.get().asyncExecute(statement, [sself](auto&& res) mutable {
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself),
|
||||
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); }
|
||||
);
|
||||
}));
|
||||
};
|
||||
|
||||
boost::asio::async_compose<CompletionTokenType, void()>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
--numReadRequestsOutstanding_;
|
||||
|
||||
if (auto res = future->get(); res)
|
||||
{
|
||||
if (res) {
|
||||
counters_->registerReadFinished();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
|
||||
|
||||
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
|
||||
try {
|
||||
throwErrorIfNeeded(res.error());
|
||||
} catch (...) {
|
||||
counters_->registerReadError();
|
||||
throw;
|
||||
}
|
||||
counters_->registerReadRetry();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -331,29 +362,25 @@ public:
|
||||
std::vector<ResultType>
|
||||
readEach(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
std::atomic_bool hadError = false;
|
||||
std::atomic_uint64_t errorsCount = 0u;
|
||||
std::atomic_int numOutstanding = statements.size();
|
||||
numReadRequestsOutstanding_ += statements.size();
|
||||
|
||||
auto futures = std::vector<FutureWithCallbackType>{};
|
||||
futures.reserve(numOutstanding);
|
||||
counters_->registerReadStarted(statements.size());
|
||||
|
||||
auto init = [this, &statements, &futures, &hadError, &numOutstanding]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self)); // TODO: see if we can avoid this
|
||||
auto executionHandler = [&hadError, &numOutstanding, sself = std::move(sself)](auto const& res) mutable {
|
||||
auto init = [this, &statements, &futures, &errorsCount, &numOutstanding]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
auto executionHandler = [&errorsCount, &numOutstanding, sself](auto const& res) mutable {
|
||||
if (not res)
|
||||
hadError = true;
|
||||
++errorsCount;
|
||||
|
||||
// when all async operations complete unblock the result
|
||||
if (--numOutstanding == 0)
|
||||
{
|
||||
// Note: explicit work below needed on linux/gcc11
|
||||
auto executor = boost::asio::get_associated_executor(*sself);
|
||||
boost::asio::post(
|
||||
executor, [sself = std::move(sself), _ = boost::asio::make_work_guard(executor)]() mutable {
|
||||
sself->complete();
|
||||
sself.reset();
|
||||
});
|
||||
if (--numOutstanding == 0) {
|
||||
boost::asio::post(boost::asio::get_associated_executor(*sself), [sself]() mutable {
|
||||
sself->complete();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -363,15 +390,22 @@ public:
|
||||
std::back_inserter(futures),
|
||||
[this, &executionHandler](auto const& statement) {
|
||||
return handle_.get().asyncExecute(statement, executionHandler);
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
boost::asio::async_compose<CompletionTokenType, void()>(
|
||||
init, token, boost::asio::get_associated_executor(token));
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
numReadRequestsOutstanding_ -= statements.size();
|
||||
|
||||
if (hadError)
|
||||
if (errorsCount > 0) {
|
||||
assert(errorsCount <= statements.size());
|
||||
counters_->registerReadError(errorsCount);
|
||||
counters_->registerReadFinished(statements.size() - errorsCount);
|
||||
throw DatabaseTimeout{};
|
||||
}
|
||||
counters_->registerReadFinished(statements.size());
|
||||
|
||||
std::vector<ResultType> results;
|
||||
results.reserve(futures.size());
|
||||
@@ -385,21 +419,30 @@ public:
|
||||
auto entry = future.get();
|
||||
auto&& res = entry.value();
|
||||
return std::move(res);
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
assert(futures.size() == statements.size());
|
||||
assert(results.size() == statements.size());
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get statistics about the backend.
|
||||
*/
|
||||
boost::json::object
|
||||
stats() const
|
||||
{
|
||||
return counters_->report();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
incrementOutstandingRequestCount()
|
||||
{
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(throttleMutex_);
|
||||
if (!canAddWriteRequest())
|
||||
{
|
||||
if (!canAddWriteRequest()) {
|
||||
LOG(log_.trace()) << "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
throttleCv_.wait(lck, [this]() { return canAddWriteRequest(); });
|
||||
@@ -412,23 +455,21 @@ private:
|
||||
decrementOutstandingRequestCount()
|
||||
{
|
||||
// sanity check
|
||||
if (numWriteRequestsOutstanding_ == 0)
|
||||
{
|
||||
if (numWriteRequestsOutstanding_ == 0) {
|
||||
assert(false);
|
||||
throw std::runtime_error("decrementing num outstanding below 0");
|
||||
}
|
||||
size_t cur = (--numWriteRequestsOutstanding_);
|
||||
size_t const cur = (--numWriteRequestsOutstanding_);
|
||||
{
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
std::lock_guard lck(throttleMutex_);
|
||||
std::lock_guard const lck(throttleMutex_);
|
||||
throttleCv_.notify_one();
|
||||
}
|
||||
if (cur == 0)
|
||||
{
|
||||
if (cur == 0) {
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
std::lock_guard lck(syncMutex_);
|
||||
std::lock_guard const lck(syncMutex_);
|
||||
syncCv_.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
@@ -37,11 +37,10 @@ namespace data::cassandra::detail {
|
||||
MaybeError
|
||||
Future::await() const
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc)
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc) {
|
||||
auto errMsg = [this](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(*this, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}(cass_error_desc(rc));
|
||||
@@ -53,40 +52,37 @@ Future::await() const
|
||||
ResultOrError
|
||||
Future::get() const
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc)
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc) {
|
||||
auto const errMsg = [this](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(*this, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("future::get()");
|
||||
return Error{CassandraError{errMsg, rc}};
|
||||
}
|
||||
else
|
||||
{
|
||||
return Result{cass_future_get_result(*this)};
|
||||
}
|
||||
|
||||
return Result{cass_future_get_result(*this)};
|
||||
}
|
||||
|
||||
void
|
||||
invokeHelper(CassFuture* ptr, void* cbPtr)
|
||||
{
|
||||
// Note: can't use Future{ptr}.get() because double free will occur :/
|
||||
// Note2: we are moving/copying it locally as a workaround for an issue we are seeing from asio recently.
|
||||
// stackoverflow.com/questions/77004137/boost-asio-async-compose-gets-stuck-under-load
|
||||
auto* cb = static_cast<FutureWithCallback::FnType*>(cbPtr);
|
||||
if (auto const rc = cass_future_error_code(ptr); rc)
|
||||
{
|
||||
auto local = std::make_unique<FutureWithCallback::FnType>(std::move(*cb));
|
||||
if (auto const rc = cass_future_error_code(ptr); rc) {
|
||||
auto const errMsg = [&ptr](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(ptr, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("invokeHelper");
|
||||
(*cb)(Error{CassandraError{errMsg, rc}});
|
||||
}
|
||||
else
|
||||
{
|
||||
(*cb)(Result{cass_future_get_result(ptr)});
|
||||
(*local)(Error{CassandraError{errMsg, rc}});
|
||||
} else {
|
||||
(*local)(Result{cass_future_get_result(ptr)});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,8 +26,7 @@
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Future : public ManagedObject<CassFuture>
|
||||
{
|
||||
struct Future : public ManagedObject<CassFuture> {
|
||||
/* implicit */ Future(CassFuture* ptr);
|
||||
|
||||
MaybeError
|
||||
@@ -38,10 +37,9 @@ struct Future : public ManagedObject<CassFuture>
|
||||
};
|
||||
|
||||
void
|
||||
invokeHelper(CassFuture* ptr, void* self);
|
||||
invokeHelper(CassFuture* ptr, void* cbPtr);
|
||||
|
||||
class FutureWithCallback : public Future
|
||||
{
|
||||
class FutureWithCallback : public Future {
|
||||
public:
|
||||
using FnType = std::function<void(ResultOrError)>;
|
||||
using FnPtrType = std::unique_ptr<FnType>;
|
||||
|
||||
@@ -24,8 +24,7 @@
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
template <typename Managed>
|
||||
class ManagedObject
|
||||
{
|
||||
class ManagedObject {
|
||||
protected:
|
||||
std::unique_ptr<Managed, void (*)(Managed*)> ptr_;
|
||||
|
||||
@@ -36,9 +35,8 @@ public:
|
||||
if (rawPtr == nullptr)
|
||||
throw std::runtime_error("Could not create DB object - got nullptr");
|
||||
}
|
||||
ManagedObject(ManagedObject&&) = default;
|
||||
|
||||
operator Managed* const() const
|
||||
operator Managed*() const
|
||||
{
|
||||
return ptr_.get();
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
static constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
@@ -43,7 +43,7 @@ Result::hasRows() const
|
||||
}
|
||||
|
||||
/* implicit */ ResultIterator::ResultIterator(CassIterator* ptr)
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr)}
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr) != 0u}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ ResultIterator::fromResult(Result const& result)
|
||||
[[maybe_unused]] bool
|
||||
ResultIterator::moveForward()
|
||||
{
|
||||
hasMore_ = cass_iterator_next(*this);
|
||||
hasMore_ = (cass_iterator_next(*this) != 0u);
|
||||
return hasMore_;
|
||||
}
|
||||
|
||||
|
||||
@@ -44,8 +44,7 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
Type output;
|
||||
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
@@ -55,60 +54,46 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
using UintTupleType = std::tuple<uint32_t, uint32_t>;
|
||||
using UCharVectorType = std::vector<unsigned char>;
|
||||
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract ripple::uint256");
|
||||
output = ripple::uint256::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract ripple::AccountID");
|
||||
output = ripple::AccountID::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, UCharVectorType>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
} else if constexpr (std::is_same_v<DecayedType, UCharVectorType>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract vector<unsigned char>");
|
||||
output = UCharVectorType{buf, buf + bufSize};
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, UintTupleType>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType>) {
|
||||
auto const* tuple = cass_row_get_column(row, idx);
|
||||
output = TupleIterator::fromTuple(tuple).extract<uint32_t, uint32_t>();
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<DecayedType, std::string>)
|
||||
{
|
||||
char const* value;
|
||||
std::size_t len;
|
||||
} else if constexpr (std::is_convertible_v<DecayedType, std::string>) {
|
||||
char const* value = nullptr;
|
||||
std::size_t len = 0;
|
||||
auto const rc = cass_value_get_string(cass_row_get_column(row, idx), &value, &len);
|
||||
throwErrorIfNeeded(rc, "Extract string");
|
||||
output = std::string{value, len};
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
cass_bool_t flag;
|
||||
} else if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
cass_bool_t flag = cass_bool_t::cass_false;
|
||||
auto const rc = cass_value_get_bool(cass_row_get_column(row, idx), &flag);
|
||||
throwErrorIfNeeded(rc, "Extract bool");
|
||||
output = flag ? true : false;
|
||||
output = flag != cass_bool_t::cass_false;
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
int64_t out = 0;
|
||||
auto const rc = cass_value_get_int64(cass_row_get_column(row, idx), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64");
|
||||
output = static_cast<DecayedType>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
@@ -116,8 +101,7 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
return output;
|
||||
}
|
||||
|
||||
struct Result : public ManagedObject<CassResult const>
|
||||
{
|
||||
struct Result : public ManagedObject<CassResult const> {
|
||||
/* implicit */ Result(CassResult const* ptr);
|
||||
|
||||
[[nodiscard]] std::size_t
|
||||
@@ -128,7 +112,8 @@ struct Result : public ManagedObject<CassResult const>
|
||||
|
||||
template <typename... RowTypes>
|
||||
std::optional<std::tuple<RowTypes...>>
|
||||
get() const requires(std::tuple_size<std::tuple<RowTypes...>>{} > 1)
|
||||
get() const
|
||||
requires(std::tuple_size<std::tuple<RowTypes...>>{} > 1)
|
||||
{
|
||||
// row managed internally by cassandra driver, hence no ManagedObject.
|
||||
auto const* row = cass_result_first_row(*this);
|
||||
@@ -153,8 +138,7 @@ struct Result : public ManagedObject<CassResult const>
|
||||
}
|
||||
};
|
||||
|
||||
class ResultIterator : public ManagedObject<CassIterator>
|
||||
{
|
||||
class ResultIterator : public ManagedObject<CassIterator> {
|
||||
bool hasMore_ = false;
|
||||
|
||||
public:
|
||||
@@ -185,17 +169,13 @@ public:
|
||||
};
|
||||
|
||||
template <typename... Types>
|
||||
class ResultExtractor
|
||||
{
|
||||
class ResultExtractor {
|
||||
std::reference_wrapper<Result const> ref_;
|
||||
|
||||
public:
|
||||
struct Sentinel
|
||||
{
|
||||
};
|
||||
struct Sentinel {};
|
||||
|
||||
struct Iterator
|
||||
{
|
||||
struct Iterator {
|
||||
using iterator_category = std::input_iterator_tag;
|
||||
using difference_type = std::size_t; // rows count
|
||||
using value_type = std::tuple<Types...>;
|
||||
|
||||
@@ -35,8 +35,7 @@ namespace data::cassandra::detail {
|
||||
/**
|
||||
* @brief A retry policy that employs exponential backoff
|
||||
*/
|
||||
class ExponentialBackoffRetryPolicy
|
||||
{
|
||||
class ExponentialBackoffRetryPolicy {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
boost::asio::steady_timer timer_;
|
||||
@@ -75,7 +74,7 @@ public:
|
||||
retry(Fn&& fn)
|
||||
{
|
||||
timer_.expires_after(calculateDelay(attempt_++));
|
||||
timer_.async_wait([fn = std::forward<Fn>(fn)]([[maybe_unused]] const auto& err) {
|
||||
timer_.async_wait([fn = std::forward<Fn>(fn)]([[maybe_unused]] auto const& err) {
|
||||
// todo: deal with cancellation (thru err)
|
||||
fn();
|
||||
});
|
||||
@@ -84,7 +83,7 @@ public:
|
||||
/**
|
||||
* @brief Calculates the wait time before attempting another retry
|
||||
*/
|
||||
std::chrono::milliseconds
|
||||
static std::chrono::milliseconds
|
||||
calculateDelay(uint32_t attempt)
|
||||
{
|
||||
return std::chrono::milliseconds{lround(std::pow(2, std::min(10u, attempt)))};
|
||||
|
||||
@@ -25,8 +25,7 @@
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Session : public ManagedObject<CassSession>
|
||||
{
|
||||
class Session : public ManagedObject<CassSession> {
|
||||
static constexpr auto deleter = [](CassSession* ptr) { cass_session_free(ptr); };
|
||||
|
||||
public:
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#include <data/cassandra/impl/SslContext.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
@@ -28,8 +28,7 @@ namespace data::cassandra::detail {
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), contextDeleter}
|
||||
{
|
||||
cass_ssl_set_verify_flags(*this, CASS_SSL_VERIFY_NONE);
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK) {
|
||||
throw std::runtime_error(std::string{"Error setting Cassandra SSL Context: "} + cass_error_desc(rc));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,8 +27,7 @@
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct SslContext : public ManagedObject<CassSsl>
|
||||
{
|
||||
struct SslContext : public ManagedObject<CassSsl> {
|
||||
explicit SslContext(std::string const& certificate);
|
||||
};
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/Collection.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
@@ -35,8 +36,7 @@
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Statement : public ManagedObject<CassStatement>
|
||||
{
|
||||
class Statement : public ManagedObject<CassStatement> {
|
||||
static constexpr auto deleter = [](CassStatement* ptr) { cass_statement_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
@@ -64,8 +64,6 @@ public:
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
}
|
||||
|
||||
Statement(Statement&&) = default;
|
||||
|
||||
/**
|
||||
* @brief Binds the given arguments to the statement.
|
||||
*
|
||||
@@ -75,7 +73,7 @@ public:
|
||||
void
|
||||
bind(Args&&... args) const
|
||||
{
|
||||
std::size_t idx = 0;
|
||||
std::size_t idx = 0; // NOLINT(misc-const-correctness)
|
||||
(this->bindAt<Args>(idx++, std::forward<Args>(args)), ...);
|
||||
}
|
||||
|
||||
@@ -102,51 +100,40 @@ public:
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
using UCharVectorType = std::vector<unsigned char>;
|
||||
using UintTupleType = std::tuple<uint32_t, uint32_t>;
|
||||
using UintByteTupleType = std::tuple<uint32_t, ripple::uint256>;
|
||||
using ByteVectorType = std::vector<ripple::uint256>;
|
||||
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>)
|
||||
{
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::AccountID");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, UCharVectorType>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, UCharVectorType>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind vector<unsigned char>");
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<DecayedType, std::string>)
|
||||
{
|
||||
} else if constexpr (std::is_convertible_v<DecayedType, std::string>) {
|
||||
// reinterpret_cast is needed here :'(
|
||||
auto const rc = bindBytes(reinterpret_cast<unsigned char const*>(value.data()), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as bytes)");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, UintTupleType>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::move(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32>");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType> || std::is_same_v<DecayedType, UintByteTupleType>) {
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::forward<Type>(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32> or <uint32_t, ripple::uint256>");
|
||||
} else if constexpr (std::is_same_v<DecayedType, ByteVectorType>) {
|
||||
auto const rc = cass_statement_bind_collection(*this, idx, Collection{std::forward<Type>(value)});
|
||||
throwErrorIfNeeded(rc, "Bind collection");
|
||||
} else if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
auto const rc = cass_statement_bind_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
else if constexpr (std::is_same_v<DecayedType, Limit>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, Limit>) {
|
||||
auto const rc = cass_statement_bind_int32(*this, idx, value.limit);
|
||||
throwErrorIfNeeded(rc, "Bind limit (int32)");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
auto const rc = cass_statement_bind_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
@@ -158,8 +145,7 @@ public:
|
||||
*
|
||||
* This is used to produce Statement objects that can be executed.
|
||||
*/
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const>
|
||||
{
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const> {
|
||||
static constexpr auto deleter = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
|
||||
public:
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
static constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <functional>
|
||||
@@ -30,8 +31,7 @@
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Tuple : public ManagedObject<CassTuple>
|
||||
{
|
||||
class Tuple : public ManagedObject<CassTuple> {
|
||||
static constexpr auto deleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
@@ -61,8 +61,7 @@ public:
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [idx](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + " at idx " + to_string(idx) + ": " + cass_error_desc(rc));
|
||||
}
|
||||
@@ -70,27 +69,30 @@ public:
|
||||
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
|
||||
if constexpr (std::is_same_v<DecayedType, bool>)
|
||||
{
|
||||
if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
auto const rc = cass_tuple_set_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
auto const rc = cass_tuple_set_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
else
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
auto const rc = cass_tuple_set_bytes(
|
||||
*this,
|
||||
idx,
|
||||
static_cast<cass_byte_t const*>(static_cast<unsigned char const*>(value.data())),
|
||||
value.size()
|
||||
);
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
} else {
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class TupleIterator : public ManagedObject<CassIterator>
|
||||
{
|
||||
class TupleIterator : public ManagedObject<CassIterator> {
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
@@ -119,8 +121,7 @@ private:
|
||||
throw std::logic_error("Could not extract next value from tuple iterator");
|
||||
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
@@ -129,15 +130,12 @@ private:
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
if constexpr (std::is_convertible_v<DecayedType, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
int64_t out = 0;
|
||||
auto const rc = cass_value_get_int64(cass_iterator_get_value(*this), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64 from tuple");
|
||||
output = static_cast<DecayedType>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
|
||||
@@ -36,8 +36,7 @@ namespace etl {
|
||||
* Any later calls to methods of this datastructure will not wait. Once the datastructure is stopped, the datastructure
|
||||
* remains stopped for the rest of its lifetime.
|
||||
*/
|
||||
class NetworkValidatedLedgers
|
||||
{
|
||||
class NetworkValidatedLedgers {
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
|
||||
@@ -62,7 +61,7 @@ public:
|
||||
void
|
||||
push(uint32_t idx)
|
||||
{
|
||||
std::lock_guard lck(m_);
|
||||
std::lock_guard const lck(m_);
|
||||
if (!max_ || idx > *max_)
|
||||
max_ = idx;
|
||||
cv_.notify_all();
|
||||
@@ -95,10 +94,11 @@ public:
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
|
||||
if (maxWaitMs)
|
||||
if (maxWaitMs) {
|
||||
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
|
||||
else
|
||||
} else {
|
||||
cv_.wait(lck, pred);
|
||||
}
|
||||
return pred();
|
||||
}
|
||||
};
|
||||
@@ -111,8 +111,7 @@ public:
|
||||
* added or removed from the queue. These waits are blocking calls.
|
||||
*/
|
||||
template <class T>
|
||||
class ThreadSafeQueue
|
||||
{
|
||||
class ThreadSafeQueue {
|
||||
std::queue<T> queue_;
|
||||
|
||||
mutable std::mutex m_;
|
||||
@@ -190,7 +189,7 @@ public:
|
||||
std::optional<T>
|
||||
tryPop()
|
||||
{
|
||||
std::scoped_lock lck(m_);
|
||||
std::scoped_lock const lck(m_);
|
||||
if (queue_.empty())
|
||||
return {};
|
||||
|
||||
@@ -212,13 +211,12 @@ getMarkers(size_t numMarkers)
|
||||
{
|
||||
assert(numMarkers <= 256);
|
||||
|
||||
unsigned char incr = 256 / numMarkers;
|
||||
unsigned char const incr = 256 / numMarkers;
|
||||
|
||||
std::vector<ripple::uint256> markers;
|
||||
markers.reserve(numMarkers);
|
||||
ripple::uint256 base{0};
|
||||
for (size_t i = 0; i < numMarkers; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < numMarkers; ++i) {
|
||||
markers.push_back(base);
|
||||
base.data()[0] += incr;
|
||||
}
|
||||
|
||||
@@ -18,9 +18,12 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ETLService.h>
|
||||
#include <util/Constants.h>
|
||||
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace etl {
|
||||
// Database must be populated when this starts
|
||||
std::optional<uint32_t>
|
||||
@@ -33,8 +36,7 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
state_.isWriting = true;
|
||||
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng || rng->maxSequence < startSequence - 1)
|
||||
{
|
||||
if (!rng || rng->maxSequence < startSequence - 1) {
|
||||
assert(false);
|
||||
throw std::runtime_error("runETLPipeline: parent ledger is null");
|
||||
}
|
||||
@@ -43,11 +45,14 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
auto extractors = std::vector<std::unique_ptr<ExtractorType>>{};
|
||||
auto pipe = DataPipeType{numExtractors, startSequence};
|
||||
|
||||
for (auto i = 0u; i < numExtractors; ++i)
|
||||
for (auto i = 0u; i < numExtractors; ++i) {
|
||||
extractors.push_back(std::make_unique<ExtractorType>(
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_));
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_
|
||||
));
|
||||
}
|
||||
|
||||
auto transformer = TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, startSequence, state_};
|
||||
auto transformer =
|
||||
TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, startSequence, state_};
|
||||
transformer.waitTillFinished(); // suspend current thread until exit condition is met
|
||||
pipe.cleanup(); // TODO: this should probably happen automatically using destructor
|
||||
|
||||
@@ -57,8 +62,9 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
|
||||
static constexpr auto NANOSECONDS_PER_SECOND = 1'000'000'000.0;
|
||||
LOG(log_.debug()) << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / 1000000000.0;
|
||||
<< ((end - begin).count()) / NANOSECONDS_PER_SECOND;
|
||||
|
||||
state_.isWriting = false;
|
||||
|
||||
@@ -78,58 +84,40 @@ void
|
||||
ETLService::monitor()
|
||||
{
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng)
|
||||
{
|
||||
if (!rng) {
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
std::optional<ripple::LedgerHeader> ledger;
|
||||
|
||||
try
|
||||
{
|
||||
if (startSequence_)
|
||||
{
|
||||
try {
|
||||
if (startSequence_) {
|
||||
LOG(log_.info()) << "ledger sequence specified in config. "
|
||||
<< "Will begin ETL process starting with ledger " << *startSequence_;
|
||||
ledger = ledgerLoader_.loadInitialLedger(*startSequence_);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> mostRecentValidated = networkValidatedLedgers_->getMostRecent();
|
||||
|
||||
if (mostRecentValidated)
|
||||
{
|
||||
if (mostRecentValidated) {
|
||||
LOG(log_.info()) << "Ledger " << *mostRecentValidated << " has been validated. Downloading...";
|
||||
ledger = ledgerLoader_.loadInitialLedger(*mostRecentValidated);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. Exiting monitor loop";
|
||||
} else {
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. "
|
||||
"Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
setAmendmentBlocked();
|
||||
|
||||
log_.fatal()
|
||||
<< "Failed to load initial ledger, Exiting monitor loop: " << e.what()
|
||||
<< " Possible cause: The ETL node is not compatible with the version of the rippled lib Clio is using.";
|
||||
return;
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
|
||||
return amendmentBlockHandler_.onAmendmentBlock();
|
||||
}
|
||||
|
||||
if (ledger)
|
||||
{
|
||||
if (ledger) {
|
||||
rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Failed to load initial ledger. Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
if (startSequence_)
|
||||
LOG(log_.warn()) << "start sequence specified but db is already populated";
|
||||
|
||||
@@ -143,45 +131,45 @@ ETLService::monitor()
|
||||
LOG(log_.debug()) << "Database is populated. "
|
||||
<< "Starting monitor loop. sequence = " << nextSequence;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence)
|
||||
{
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
while (not isStopping()) {
|
||||
nextSequence = publishNextSequence(nextSequence);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t
|
||||
ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence) {
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
} else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::MILLISECONDS_PER_SECOND)) {
|
||||
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
// Attempt to take over responsibility of ETL writer after 10 failed
|
||||
// attempts to publish the ledger. publishLedger() fails if the
|
||||
// ledger that has been validated by the network is not found in the
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool const success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
|
||||
if (!success) {
|
||||
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
|
||||
// returns the most recent sequence published empty optional if no sequence was published
|
||||
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
|
||||
LOG(log_.info()) << "Aborting ETL. Falling back to publishing";
|
||||
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
} else {
|
||||
++nextSequence;
|
||||
}
|
||||
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
|
||||
{
|
||||
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
// Attempt to take over responsibility of ETL writer after 10 failed
|
||||
// attempts to publish the ledger. publishLedger() fails if the
|
||||
// ledger that has been validated by the network is not found in the
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
|
||||
// returns the most recent sequence published empty optional if no sequence was published
|
||||
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
|
||||
LOG(log_.info()) << "Aborting ETL. Falling back to publishing";
|
||||
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
++nextSequence;
|
||||
}
|
||||
}
|
||||
}
|
||||
return nextSequence;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -189,36 +177,37 @@ ETLService::monitorReadOnly()
|
||||
{
|
||||
LOG(log_.debug()) << "Starting reporting in strict read only mode";
|
||||
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
uint32_t latestSequence;
|
||||
auto const latestSequenceOpt = [this]() -> std::optional<uint32_t> {
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
|
||||
if (!rng)
|
||||
{
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent())
|
||||
latestSequence = *net;
|
||||
else
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
latestSequence = rng->maxSequence;
|
||||
if (!rng) {
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent()) {
|
||||
return *net;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return rng->maxSequence;
|
||||
}();
|
||||
|
||||
if (!latestSequenceOpt.has_value()) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t latestSequence = *latestSequenceOpt;
|
||||
|
||||
cacheLoader_.load(latestSequence);
|
||||
latestSequence++;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence)
|
||||
{
|
||||
while (not isStopping()) {
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence) {
|
||||
ledgerPublisher_.publish(latestSequence, {});
|
||||
latestSequence = latestSequence + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs first.
|
||||
// Even if we don't hear from rippled, if ledgers are being written to the db, we publish them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, 1000);
|
||||
} else {
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs
|
||||
// first. Even if we don't hear from rippled, if ledgers are being written to the db, we publish
|
||||
// them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::MILLISECONDS_PER_SECOND);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -238,10 +227,11 @@ ETLService::doWork()
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("ETLService worker");
|
||||
|
||||
if (state_.isReadOnly)
|
||||
if (state_.isReadOnly) {
|
||||
monitorReadOnly();
|
||||
else
|
||||
} else {
|
||||
monitor();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -251,14 +241,16 @@ ETLService::ETLService(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
)
|
||||
: backend_(backend)
|
||||
, loadBalancer_(balancer)
|
||||
, networkValidatedLedgers_(ledgers)
|
||||
, networkValidatedLedgers_(std::move(ledgers))
|
||||
, cacheLoader_(config, ioc, backend, backend->cache())
|
||||
, ledgerFetcher_(backend, balancer)
|
||||
, ledgerLoader_(backend, balancer, ledgerFetcher_, state_)
|
||||
, ledgerPublisher_(ioc, backend, subscriptions, state_)
|
||||
, ledgerPublisher_(ioc, backend, backend->cache(), subscriptions, state_)
|
||||
, amendmentBlockHandler_(ioc, state_)
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/Source.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/AmendmentBlock.h>
|
||||
#include <etl/impl/CacheLoader.h>
|
||||
#include <etl/impl/ExtractionDataPipe.h>
|
||||
#include <etl/impl/Extractor.h>
|
||||
@@ -35,6 +36,7 @@
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <memory>
|
||||
@@ -44,7 +46,7 @@ struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
}
|
||||
} // namespace feed
|
||||
|
||||
/**
|
||||
* @brief This namespace contains everything to do with the ETL and ETL sources.
|
||||
@@ -64,19 +66,21 @@ namespace etl {
|
||||
* the others will fall back to monitoring/publishing. In this sense, this class dynamically transitions from monitoring
|
||||
* to writing and from writing to monitoring, based on the activity of other processes running on different machines.
|
||||
*/
|
||||
class ETLService
|
||||
{
|
||||
class ETLService {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using SubscriptionManagerType = feed::SubscriptionManager;
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using NetworkValidatedLedgersType = NetworkValidatedLedgers;
|
||||
using DataPipeType = etl::detail::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheLoaderType = etl::detail::CacheLoader<data::LedgerCache>;
|
||||
using CacheType = data::LedgerCache;
|
||||
using CacheLoaderType = etl::detail::CacheLoader<CacheType>;
|
||||
using LedgerFetcherType = etl::detail::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = etl::detail::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::detail::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::detail::LedgerPublisher<SubscriptionManagerType>;
|
||||
using TransformerType = etl::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType>;
|
||||
using LedgerPublisherType = etl::detail::LedgerPublisher<SubscriptionManagerType, CacheType>;
|
||||
using AmendmentBlockHandlerType = etl::detail::AmendmentBlockHandler<>;
|
||||
using TransformerType =
|
||||
etl::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
@@ -91,6 +95,7 @@ class ETLService
|
||||
LedgerFetcherType ledgerFetcher_;
|
||||
LedgerLoaderType ledgerLoader_;
|
||||
LedgerPublisherType ledgerPublisher_;
|
||||
AmendmentBlockHandlerType amendmentBlockHandler_;
|
||||
|
||||
SystemState state_;
|
||||
|
||||
@@ -116,7 +121,8 @@ public:
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers);
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief A factory function to spawn new ETLService instances.
|
||||
@@ -137,7 +143,8 @@ public:
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
)
|
||||
{
|
||||
auto etl = std::make_shared<ETLService>(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
etl->run();
|
||||
@@ -199,6 +206,16 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the etl nodes' state
|
||||
* @return the etl nodes' state, nullopt if etl nodes are not connected
|
||||
*/
|
||||
std::optional<etl::ETLState>
|
||||
getETLState() const noexcept
|
||||
{
|
||||
return loadBalancer_->getETLState();
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Run the ETL pipeline.
|
||||
@@ -225,6 +242,15 @@ private:
|
||||
void
|
||||
monitor();
|
||||
|
||||
/**
|
||||
* @brief Monitor the network for newly validated ledgers and publish them to the ledgers stream
|
||||
*
|
||||
* @param nextSequence the ledger sequence to publish
|
||||
* @return the next ledger sequence to publish
|
||||
*/
|
||||
uint32_t
|
||||
publishNextSequence(uint32_t nextSequence);
|
||||
|
||||
/**
|
||||
* @brief Monitor the database for newly written ledgers.
|
||||
*
|
||||
@@ -238,7 +264,7 @@ private:
|
||||
* @return true if stopping; false otherwise
|
||||
*/
|
||||
bool
|
||||
isStopping()
|
||||
isStopping() const
|
||||
{
|
||||
return state_.isStopping;
|
||||
}
|
||||
@@ -251,7 +277,7 @@ private:
|
||||
* @return the number of markers
|
||||
*/
|
||||
std::uint32_t
|
||||
getNumMarkers()
|
||||
getNumMarkers() const
|
||||
{
|
||||
return numMarkers_;
|
||||
}
|
||||
@@ -267,14 +293,5 @@ private:
|
||||
*/
|
||||
void
|
||||
doWork();
|
||||
|
||||
/**
|
||||
* @brief Sets amendment blocked flag.
|
||||
*/
|
||||
void
|
||||
setAmendmentBlocked()
|
||||
{
|
||||
state_.isAmendmentBlocked = true;
|
||||
}
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
42
src/etl/ETLState.cpp
Normal file
42
src/etl/ETLState.cpp
Normal file
@@ -0,0 +1,42 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ETLState.h>
|
||||
#include <rpc/JS.h>
|
||||
|
||||
namespace etl {
|
||||
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv)
|
||||
{
|
||||
ETLState state;
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
if (!jsonObject.contains(JS(error))) {
|
||||
if (jsonObject.contains(JS(result)) && jsonObject.at(JS(result)).as_object().contains(JS(info))) {
|
||||
auto const rippledInfo = jsonObject.at(JS(result)).as_object().at(JS(info)).as_object();
|
||||
if (rippledInfo.contains(JS(network_id)))
|
||||
state.networkID.emplace(boost::json::value_to<int64_t>(rippledInfo.at(JS(network_id))));
|
||||
}
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
60
src/etl/ETLState.h
Normal file
60
src/etl/ETLState.h
Normal file
@@ -0,0 +1,60 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for fetching and storing the state of the ETL information, such as the network id
|
||||
*/
|
||||
struct ETLState {
|
||||
std::optional<uint32_t> networkID;
|
||||
|
||||
/**
|
||||
* @brief Fetch the ETL state from the rippled server
|
||||
* @param source The source to fetch the state from
|
||||
* @return The ETL state, nullopt if source not available
|
||||
*/
|
||||
template <typename Forward>
|
||||
static std::optional<ETLState>
|
||||
fetchETLStateFromSource(Forward const& source) noexcept
|
||||
{
|
||||
auto const serverInfoRippled = data::synchronous([&source](auto yield) {
|
||||
return source.forwardToRippled({{"command", "server_info"}}, std::nullopt, yield);
|
||||
});
|
||||
|
||||
if (serverInfoRippled)
|
||||
return boost::json::value_to<ETLState>(boost::json::value(*serverInfoRippled));
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv);
|
||||
|
||||
} // namespace etl
|
||||
@@ -24,6 +24,7 @@
|
||||
#include <etl/Source.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/Random.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
@@ -47,7 +48,8 @@ LoadBalancer::make_Source(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer)
|
||||
LoadBalancer& balancer
|
||||
)
|
||||
{
|
||||
auto src = std::make_unique<ProbingSource>(config, ioc, backend, subscriptions, validatedLedgers, balancer);
|
||||
src->run();
|
||||
@@ -61,7 +63,8 @@ LoadBalancer::make_LoadBalancer(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
)
|
||||
{
|
||||
return std::make_shared<LoadBalancer>(config, ioc, backend, subscriptions, validatedLedgers);
|
||||
}
|
||||
@@ -71,20 +74,54 @@ LoadBalancer::LoadBalancer(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
)
|
||||
{
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value)
|
||||
downloadRanges_ = std::clamp(*value, 1u, 256u);
|
||||
else if (backend->fetchLedgerRange())
|
||||
static constexpr std::uint32_t MAX_DOWNLOAD = 256;
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value) {
|
||||
downloadRanges_ = std::clamp(*value, 1u, MAX_DOWNLOAD);
|
||||
} else if (backend->fetchLedgerRange()) {
|
||||
downloadRanges_ = 4;
|
||||
}
|
||||
|
||||
for (auto const& entry : config.array("etl_sources"))
|
||||
{
|
||||
auto const allowNoEtl = config.valueOr("allow_no_etl", false);
|
||||
|
||||
auto const checkOnETLFailure = [this, allowNoEtl](std::string const& log) {
|
||||
LOG(log_.error()) << log;
|
||||
|
||||
if (!allowNoEtl) {
|
||||
LOG(log_.error()) << "Set allow_no_etl as true in config to allow clio run without valid ETL sources.";
|
||||
throw std::logic_error("ETL configuration error.");
|
||||
}
|
||||
};
|
||||
|
||||
for (auto const& entry : config.array("etl_sources")) {
|
||||
std::unique_ptr<Source> source = make_Source(entry, ioc, backend, subscriptions, validatedLedgers, *this);
|
||||
|
||||
// checking etl node validity
|
||||
auto const stateOpt = ETLState::fetchETLStateFromSource(*source);
|
||||
|
||||
if (!stateOpt) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"Failed to fetch ETL state from source = {} Please check the configuration and network",
|
||||
source->toString()
|
||||
));
|
||||
} else if (etlState_ && etlState_->networkID && stateOpt->networkID && etlState_->networkID != stateOpt->networkID) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"ETL sources must be on the same network. Source network id = {} does not match others network id = {}",
|
||||
*(stateOpt->networkID),
|
||||
*(etlState_->networkID)
|
||||
));
|
||||
} else {
|
||||
etlState_ = stateOpt;
|
||||
}
|
||||
|
||||
sources_.push_back(std::move(source));
|
||||
LOG(log_.info()) << "Added etl source - " << sources_.back()->toString();
|
||||
}
|
||||
|
||||
if (sources_.empty())
|
||||
checkOnETLFailure("No ETL sources configured. Please check the configuration");
|
||||
}
|
||||
|
||||
LoadBalancer::~LoadBalancer()
|
||||
@@ -100,15 +137,17 @@ LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
|
||||
[this, &response, &sequence, cacheOnly](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
|
||||
if (!res)
|
||||
if (!res) {
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
else
|
||||
} else {
|
||||
response = std::move(data);
|
||||
}
|
||||
|
||||
return res;
|
||||
},
|
||||
sequence);
|
||||
sequence
|
||||
);
|
||||
return {std::move(response), success};
|
||||
}
|
||||
|
||||
@@ -116,43 +155,43 @@ LoadBalancer::OptionalGetLedgerResponseType
|
||||
LoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
GetLedgerResponseType response;
|
||||
bool success = execute(
|
||||
bool const success = execute(
|
||||
[&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](auto& source) {
|
||||
auto [status, data] = source->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
response = std::move(data);
|
||||
if (status.ok() && response.validated())
|
||||
{
|
||||
if (status.ok() && response.validated()) {
|
||||
LOG(log.info()) << "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log.warn()) << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG(log.warn()) << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
},
|
||||
ledgerSequence);
|
||||
if (success)
|
||||
ledgerSequence
|
||||
);
|
||||
if (success) {
|
||||
return response;
|
||||
else
|
||||
return {};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
LoadBalancer::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
std::size_t sourceIdx = 0;
|
||||
if (!sources_.empty())
|
||||
sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0u;
|
||||
|
||||
while (numAttempts < sources_.size())
|
||||
{
|
||||
while (numAttempts < sources_.size()) {
|
||||
if (auto res = sources_[sourceIdx]->forwardToRippled(request, clientIp, yield))
|
||||
return res;
|
||||
|
||||
@@ -166,8 +205,7 @@ LoadBalancer::forwardToRippled(
|
||||
bool
|
||||
LoadBalancer::shouldPropagateTxnStream(Source* in) const
|
||||
{
|
||||
for (auto& src : sources_)
|
||||
{
|
||||
for (auto& src : sources_) {
|
||||
assert(src);
|
||||
|
||||
// We pick the first Source encountered that is connected
|
||||
@@ -193,12 +231,13 @@ template <class Func>
|
||||
bool
|
||||
LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
std::size_t sourceIdx = 0;
|
||||
if (!sources_.empty())
|
||||
sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0;
|
||||
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
auto& source = sources_[sourceIdx];
|
||||
|
||||
LOG(log_.debug()) << "Attempting to execute func. ledger sequence = " << ledgerSequence
|
||||
@@ -207,30 +246,23 @@ LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
/* Sometimes rippled has ledger but doesn't actually know. However,
|
||||
but this does NOT happen in the normal case and is safe to remove
|
||||
This || true is only needed when loading full history standalone */
|
||||
if (source->hasLedger(ledgerSequence))
|
||||
{
|
||||
bool res = f(source);
|
||||
if (res)
|
||||
{
|
||||
if (source->hasLedger(ledgerSequence)) {
|
||||
bool const res = f(source);
|
||||
if (res) {
|
||||
LOG(log_.debug()) << "Successfully executed func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.warn()) << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
LOG(log_.warn()) << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
} else {
|
||||
LOG(log_.warn()) << "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
numAttempts++;
|
||||
if (numAttempts % sources_.size() == 0)
|
||||
{
|
||||
if (numAttempts % sources_.size() == 0) {
|
||||
LOG(log_.info()) << "Ledger sequence " << ledgerSequence
|
||||
<< " is not yet available from any configured sources. "
|
||||
<< "Sleeping and trying again";
|
||||
@@ -239,4 +271,15 @@ LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<ETLState>
|
||||
LoadBalancer::getETLState() noexcept
|
||||
{
|
||||
if (!etlState_) {
|
||||
// retry ETLState fetch
|
||||
etlState_ = ETLState::fetchETLStateFromSource(*this);
|
||||
}
|
||||
return etlState_;
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <etl/ETLState.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
@@ -47,17 +48,20 @@ namespace etl {
|
||||
* which ledgers have been validated by the network, and the range of ledgers each etl source has). This class also
|
||||
* allows requests for ledger data to be load balanced across all possible ETL sources.
|
||||
*/
|
||||
class LoadBalancer
|
||||
{
|
||||
class LoadBalancer {
|
||||
public:
|
||||
using RawLedgerObjectType = org::xrpl::rpc::v1::RawLedgerObject;
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
using OptionalGetLedgerResponseType = std::optional<GetLedgerResponseType>;
|
||||
|
||||
private:
|
||||
static constexpr std::uint32_t DEFAULT_DOWNLOAD_RANGES = 16;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
std::vector<std::unique_ptr<Source>> sources_;
|
||||
std::uint32_t downloadRanges_ = 16; /*< The number of markers to use when downloading intial ledger */
|
||||
std::optional<ETLState> etlState_;
|
||||
std::uint32_t downloadRanges_ =
|
||||
DEFAULT_DOWNLOAD_RANGES; /*< The number of markers to use when downloading intial ledger */
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -74,7 +78,8 @@ public:
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers);
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief A factory function for the load balancer.
|
||||
@@ -91,7 +96,8 @@ public:
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers);
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief A factory function for the ETL source.
|
||||
@@ -110,7 +116,8 @@ public:
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer);
|
||||
LoadBalancer& balancer
|
||||
);
|
||||
|
||||
~LoadBalancer();
|
||||
|
||||
@@ -161,13 +168,23 @@ public:
|
||||
* @brief Forward a JSON RPC request to a randomly selected rippled node.
|
||||
*
|
||||
* @param request JSON-RPC request to forward
|
||||
* @param clientIp The IP address of the peer
|
||||
* @param clientIp The IP address of the peer, if known
|
||||
* @param yield The coroutine context
|
||||
* @return Response received from rippled node as JSON object on success; nullopt on failure
|
||||
*/
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const;
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Return state of ETL nodes.
|
||||
* @return ETL state, nullopt if etl nodes not available
|
||||
*/
|
||||
std::optional<ETLState>
|
||||
getETLState() noexcept;
|
||||
|
||||
private:
|
||||
/**
|
||||
|
||||
@@ -45,27 +45,26 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// that were changed.
|
||||
std::optional<ripple::AccountID> owner;
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE)
|
||||
continue;
|
||||
|
||||
if (!owner)
|
||||
owner = ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
{
|
||||
if (node.getFName() == ripple::sfCreatedNode) {
|
||||
ripple::STArray const& toAddNFTs =
|
||||
node.peekAtField(ripple::sfNewFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(finalIDs), [](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
}
|
||||
// Else it's modified, as there should never be a deleted NFToken page
|
||||
// as a result of a mint.
|
||||
else
|
||||
{
|
||||
else {
|
||||
// When a mint results in splitting an existing page,
|
||||
// it results in a created page and a modified node. Sometimes,
|
||||
// the created node needs to be linked to a third page, resulting
|
||||
@@ -82,9 +81,11 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
|
||||
ripple::STArray const& toAddNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(prevIDs), [](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(prevIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
|
||||
ripple::STArray const& toAddFinalNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
@@ -92,7 +93,8 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
toAddFinalNFTs.begin(),
|
||||
toAddFinalNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); });
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,9 +107,10 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
|
||||
// There should always be a difference so the returned finalIDs
|
||||
// iterator should never be end(). But better safe than sorry.
|
||||
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner)
|
||||
throw std::runtime_error(
|
||||
fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID())));
|
||||
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner) {
|
||||
throw std::runtime_error(fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID()))
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
{NFTTransactionsData(*diff.first, txMeta, sttx.getTransactionID())},
|
||||
@@ -123,8 +126,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// Determine who owned the token when it was burned by finding an
|
||||
// NFTokenPage that was deleted or modified that contains this
|
||||
// tokenID.
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfCreatedNode)
|
||||
continue;
|
||||
@@ -139,16 +141,15 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// need to look in the FinalFields.
|
||||
std::optional<ripple::STArray> prevNFTs;
|
||||
|
||||
if (node.isFieldPresent(ripple::sfPreviousFields))
|
||||
{
|
||||
if (node.isFieldPresent(ripple::sfPreviousFields)) {
|
||||
ripple::STObject const& previousFields =
|
||||
node.peekAtField(ripple::sfPreviousFields).downcast<ripple::STObject>();
|
||||
if (previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
|
||||
} else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode) {
|
||||
prevNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
|
||||
if (!prevNFTs)
|
||||
continue;
|
||||
@@ -157,14 +158,14 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
std::find_if(prevNFTs->begin(), prevNFTs->end(), [&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != prevNFTs->end())
|
||||
if (nft != prevNFTs->end()) {
|
||||
return std::make_pair(
|
||||
txs,
|
||||
NFTsData(
|
||||
tokenID,
|
||||
ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()),
|
||||
txMeta,
|
||||
true));
|
||||
tokenID, ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()), txMeta, true
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
@@ -178,14 +179,12 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
// If we have the buy offer from this tx, we can determine the owner
|
||||
// more easily by just looking at the owner of the accepted NFTokenOffer
|
||||
// object.
|
||||
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer))
|
||||
{
|
||||
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer)) {
|
||||
auto const affectedBuyOffer =
|
||||
std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
|
||||
});
|
||||
if (affectedBuyOffer == txMeta.getNodes().end())
|
||||
{
|
||||
if (affectedBuyOffer == txMeta.getNodes().end()) {
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
@@ -207,8 +206,7 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenSellOffer);
|
||||
});
|
||||
if (affectedSellOffer == txMeta.getNodes().end())
|
||||
{
|
||||
if (affectedSellOffer == txMeta.getNodes().end()) {
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
@@ -222,8 +220,7 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfDeletedNode)
|
||||
continue;
|
||||
@@ -234,10 +231,11 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
continue;
|
||||
|
||||
ripple::STArray const& nfts = [&node] {
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
if (node.getFName() == ripple::sfCreatedNode) {
|
||||
return node.peekAtField(ripple::sfNewFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
return node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
@@ -246,10 +244,11 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
auto const nft = std::find_if(nfts.begin(), nfts.end(), [&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != nfts.end())
|
||||
if (nft != nfts.end()) {
|
||||
return {
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenID, nodeOwner, txMeta, false)};
|
||||
}
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
@@ -265,8 +264,7 @@ std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenCancelOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
std::vector<NFTTransactionsData> txs;
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_OFFER)
|
||||
continue;
|
||||
|
||||
@@ -302,8 +300,7 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
if (txMeta.getResultTER() != ripple::tesSUCCESS)
|
||||
return {{}, {}};
|
||||
|
||||
switch (sttx.getTxnType())
|
||||
{
|
||||
switch (sttx.getTxnType()) {
|
||||
case ripple::TxType::ttNFTOKEN_MINT:
|
||||
return getNFTokenMintData(txMeta, sttx);
|
||||
|
||||
|
||||
@@ -46,6 +46,6 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||
* @return The NFT data as a vector
|
||||
*/
|
||||
std::vector<NFTsData>
|
||||
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob);
|
||||
getNFTDataFromObj(std::uint32_t seq, std::string const& key, std::string const& blob);
|
||||
|
||||
} // namespace etl
|
||||
@@ -28,7 +28,8 @@ ProbingSource::ProbingSource(
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx)
|
||||
boost::asio::ssl::context sslCtx
|
||||
)
|
||||
: sslCtx_{std::move(sslCtx)}
|
||||
, sslSrc_{make_shared<
|
||||
SslSource>(config, ioc, std::ref(sslCtx_), backend, subscriptions, nwvl, balancer, make_SSLHooks())}
|
||||
@@ -74,8 +75,7 @@ ProbingSource::hasLedger(uint32_t sequence) const
|
||||
boost::json::object
|
||||
ProbingSource::toJson() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
{
|
||||
if (!currentSrc_) {
|
||||
boost::json::object sourcesJson = {
|
||||
{"ws", plainSrc_->toJson()},
|
||||
{"wss", sslSrc_->toJson()},
|
||||
@@ -123,19 +123,26 @@ ProbingSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNei
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
if (!currentSrc_) // Source may connect to rippled before the connection built to check the validity
|
||||
{
|
||||
if (auto res = plainSrc_->forwardToRippled(request, clientIp, yield))
|
||||
return res;
|
||||
|
||||
return sslSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
return currentSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
@@ -147,12 +154,11 @@ ProbingSource::make_SSLHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return SourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
if (!ec) {
|
||||
plainSrc_->pause();
|
||||
currentSrc_ = sslSrc_;
|
||||
LOG(log_.info()) << "Selected WSS as the main source: " << currentSrc_->toString();
|
||||
@@ -160,10 +166,9 @@ ProbingSource::make_SSLHooks() noexcept
|
||||
return SourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
[this](auto /* ec */) {
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_) {
|
||||
currentSrc_ = nullptr;
|
||||
plainSrc_->resume();
|
||||
}
|
||||
@@ -176,12 +181,11 @@ ProbingSource::make_PlainHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return SourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
if (!ec) {
|
||||
sslSrc_->pause();
|
||||
currentSrc_ = plainSrc_;
|
||||
LOG(log_.info()) << "Selected Plain WS as the main source: " << currentSrc_->toString();
|
||||
@@ -189,10 +193,9 @@ ProbingSource::make_PlainHooks() noexcept
|
||||
return SourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
[this](auto /* ec */) {
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_) {
|
||||
currentSrc_ = nullptr;
|
||||
sslSrc_->resume();
|
||||
}
|
||||
|
||||
@@ -39,8 +39,7 @@ namespace etl {
|
||||
* First to connect pauses the other and the probing is considered done at this point.
|
||||
* If however the connected source loses connection the probing is kickstarted again.
|
||||
*/
|
||||
class ProbingSource : public Source
|
||||
{
|
||||
class ProbingSource : public Source {
|
||||
public:
|
||||
// TODO: inject when unit tests will be written for ProbingSource
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
@@ -73,9 +72,10 @@ public:
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12});
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12}
|
||||
);
|
||||
|
||||
~ProbingSource() = default;
|
||||
~ProbingSource() override = default;
|
||||
|
||||
void
|
||||
run() override;
|
||||
@@ -105,8 +105,11 @@ public:
|
||||
fetchLedger(uint32_t sequence, bool getObjects = true, bool getObjectNeighbors = false) override;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const override;
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override;
|
||||
|
||||
boost::uuids::uuid
|
||||
token() const override;
|
||||
@@ -115,8 +118,9 @@ private:
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const override;
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override;
|
||||
|
||||
SourceHooks
|
||||
make_SSLHooks() noexcept;
|
||||
@@ -124,4 +128,4 @@ private:
|
||||
SourceHooks
|
||||
make_PlainHooks() noexcept;
|
||||
};
|
||||
} // namespace etl
|
||||
} // namespace etl
|
||||
|
||||
@@ -50,27 +50,22 @@ PlainSource::close(bool startAgain)
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
if (derived().ws().is_open()) {
|
||||
// onStop() also calls close(). If the async_close is called twice,
|
||||
// an assertion fails. Using closing_ makes sure async_close is only
|
||||
// called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "async_close: error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
} else if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_);
|
||||
run();
|
||||
}
|
||||
@@ -85,26 +80,21 @@ SslSource::close(bool startAgain)
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
// onStop() also calls close(). If the async_close is called twice, an assertion fails. Using closing_ makes
|
||||
// sure async_close is only called once
|
||||
if (derived().ws().is_open()) {
|
||||
// onStop() also calls close(). If the async_close is called twice, an assertion fails. Using closing_
|
||||
// makes sure async_close is only called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "async_close: error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_, *sslCtx_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
} else if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_, *sslCtx_);
|
||||
run();
|
||||
}
|
||||
@@ -114,15 +104,13 @@ SslSource::close(bool startAgain)
|
||||
void
|
||||
PlainSource::onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint
|
||||
)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
connected_ = true;
|
||||
numFailures_ = 0;
|
||||
|
||||
@@ -134,7 +122,8 @@ PlainSource::onConnect(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
@@ -147,13 +136,10 @@ PlainSource::onConnect(
|
||||
void
|
||||
SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
connected_ = true;
|
||||
numFailures_ = 0;
|
||||
|
||||
@@ -165,28 +151,28 @@ SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
ws().next_layer().async_handshake(
|
||||
boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) { onSslHandshake(ec, endpoint); });
|
||||
ws().next_layer().async_handshake(boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) {
|
||||
onSslHandshake(ec, endpoint);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SslSource::onSslHandshake(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint
|
||||
)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
|
||||
322
src/etl/Source.h
322
src/etl/Source.h
@@ -38,11 +38,11 @@
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <boost/uuid/uuid_generators.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <utility>
|
||||
|
||||
class ProbingSource;
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
}
|
||||
} // namespace feed
|
||||
|
||||
// TODO: we use Source so that we can store a vector of Sources
|
||||
// but we also use CRTP for implementation of the common logic - this is a bit strange because CRTP as used here is
|
||||
@@ -52,14 +52,15 @@ class SubscriptionManager;
|
||||
|
||||
namespace etl {
|
||||
|
||||
class ProbingSource;
|
||||
|
||||
/**
|
||||
* @brief Base class for all ETL sources.
|
||||
*
|
||||
* Note: Since sources below are implemented via CRTP, it sort of makes no sense to have a virtual base class.
|
||||
* We should consider using a vector of ProbingSources instead of vector of unique ptrs to this virtual base.
|
||||
*/
|
||||
class Source
|
||||
{
|
||||
class Source {
|
||||
public:
|
||||
/** @return true if source is connected; false otherwise */
|
||||
virtual bool
|
||||
@@ -123,13 +124,16 @@ public:
|
||||
* @brief Forward a request to rippled.
|
||||
*
|
||||
* @param request The request to forward
|
||||
* @param clientIp IP of the client forwarding this request
|
||||
* @param clientIp IP of the client forwarding this request if known
|
||||
* @param yield The coroutine context
|
||||
* @return Response wrapped in an optional on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& forwardToRippledclientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @return A token that uniquely identifies this source instance.
|
||||
@@ -161,15 +165,15 @@ private:
|
||||
virtual std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const = 0;
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Hooks for source events such as connects and disconnects.
|
||||
*/
|
||||
struct SourceHooks
|
||||
{
|
||||
struct SourceHooks {
|
||||
enum class Action { STOP, PROCEED };
|
||||
|
||||
std::function<Action(boost::beast::error_code)> onConnected;
|
||||
@@ -182,8 +186,7 @@ struct SourceHooks
|
||||
* @tparam Derived The derived class for CRTP
|
||||
*/
|
||||
template <class Derived>
|
||||
class SourceImpl : public Source
|
||||
{
|
||||
class SourceImpl : public Source {
|
||||
std::string wsPort_;
|
||||
std::string grpcPort_;
|
||||
|
||||
@@ -206,7 +209,7 @@ class SourceImpl : public Source
|
||||
LoadBalancer& balancer_;
|
||||
|
||||
etl::detail::ForwardCache forwardCache_;
|
||||
boost::uuids::uuid uuid_;
|
||||
boost::uuids::uuid uuid_{};
|
||||
|
||||
protected:
|
||||
std::string ip_;
|
||||
@@ -244,16 +247,17 @@ public:
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
: networkValidatedLedgers_(validatedLedgers)
|
||||
, backend_(backend)
|
||||
, subscriptions_(subscriptions)
|
||||
SourceHooks hooks
|
||||
)
|
||||
: networkValidatedLedgers_(std::move(validatedLedgers))
|
||||
, backend_(std::move(backend))
|
||||
, subscriptions_(std::move(subscriptions))
|
||||
, balancer_(balancer)
|
||||
, forwardCache_(config, ioc, *this)
|
||||
, strand_(boost::asio::make_strand(ioc))
|
||||
, timer_(strand_)
|
||||
, resolver_(strand_)
|
||||
, hooks_(hooks)
|
||||
, hooks_(std::move(hooks))
|
||||
{
|
||||
static boost::uuids::random_generator uuidGenerator;
|
||||
uuid_ = uuidGenerator();
|
||||
@@ -261,28 +265,25 @@ public:
|
||||
ip_ = config.valueOr<std::string>("ip", {});
|
||||
wsPort_ = config.valueOr<std::string>("ws_port", {});
|
||||
|
||||
if (auto value = config.maybeValue<std::string>("grpc_port"); value)
|
||||
{
|
||||
if (auto value = config.maybeValue<std::string>("grpc_port"); value) {
|
||||
grpcPort_ = *value;
|
||||
try
|
||||
{
|
||||
boost::asio::ip::tcp::endpoint endpoint{boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||
try {
|
||||
boost::asio::ip::tcp::endpoint const endpoint{boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)};
|
||||
std::stringstream ss;
|
||||
ss << endpoint;
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(ss.str(), grpc::InsecureChannelCredentials(), chArgs));
|
||||
grpc::CreateCustomChannel(ss.str(), grpc::InsecureChannelCredentials(), chArgs)
|
||||
);
|
||||
LOG(log_.debug()) << "Made stub for remote = " << toString();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.debug()) << "Exception while creating stub = " << e.what() << " . Remote = " << toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~SourceImpl()
|
||||
~SourceImpl() override
|
||||
{
|
||||
derived().close(false);
|
||||
}
|
||||
@@ -302,29 +303,23 @@ public:
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context yield) const override
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
LOG(log_.trace()) << "Attempting to forward request to tx. "
|
||||
<< "request = " << boost::json::serialize(request);
|
||||
LOG(log_.trace()) << "Attempting to forward request to tx. Request = " << boost::json::serialize(request);
|
||||
|
||||
boost::json::object response;
|
||||
if (!isConnected())
|
||||
{
|
||||
LOG(log_.error()) << "Attempted to proxy but failed to connect to tx";
|
||||
return {};
|
||||
}
|
||||
|
||||
namespace beast = boost::beast;
|
||||
namespace http = beast::http;
|
||||
namespace http = boost::beast::http;
|
||||
namespace websocket = beast::websocket;
|
||||
namespace net = boost::asio;
|
||||
using tcp = boost::asio::ip::tcp;
|
||||
|
||||
try
|
||||
{
|
||||
try {
|
||||
auto executor = boost::asio::get_associated_executor(yield);
|
||||
boost::beast::error_code ec;
|
||||
beast::error_code ec;
|
||||
tcp::resolver resolver{executor};
|
||||
|
||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(executor);
|
||||
@@ -338,12 +333,15 @@ public:
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake and to tell rippled to charge the client IP for
|
||||
// RPC resources. See "secure_gateway" in
|
||||
// if client ip is know, change the User-Agent of the handshake and to tell rippled to charge the client
|
||||
// IP for RPC resources. See "secure_gateway" in
|
||||
// https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg
|
||||
|
||||
// TODO: user-agent can be clio-[version]
|
||||
ws->set_option(websocket::stream_base::decorator([&clientIp](websocket::request_type& req) {
|
||||
req.set(http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " websocket-client-coro");
|
||||
req.set(http::field::forwarded, "for=" + clientIp);
|
||||
if (clientIp)
|
||||
req.set(http::field::forwarded, "for=" + *clientIp);
|
||||
}));
|
||||
|
||||
ws->async_handshake(ip_, "/", yield[ec]);
|
||||
@@ -363,8 +361,7 @@ public:
|
||||
auto end = begin + buffer.data().size();
|
||||
auto parsed = boost::json::parse(std::string(begin, end));
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
if (!parsed.is_object()) {
|
||||
LOG(log_.error()) << "Error parsing response: " << std::string{begin, end};
|
||||
return {};
|
||||
}
|
||||
@@ -373,9 +370,7 @@ public:
|
||||
response["forwarded"] = true;
|
||||
|
||||
return response;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.error()) << "Encountered exception : " << e.what();
|
||||
return {};
|
||||
}
|
||||
@@ -384,15 +379,12 @@ public:
|
||||
bool
|
||||
hasLedger(uint32_t sequence) const override
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
for (auto& pair : validatedLedgers_)
|
||||
{
|
||||
if (sequence >= pair.first && sequence <= pair.second)
|
||||
{
|
||||
std::lock_guard const lck(mtx_);
|
||||
for (auto& pair : validatedLedgers_) {
|
||||
if (sequence >= pair.first && sequence <= pair.second) {
|
||||
return true;
|
||||
}
|
||||
else if (sequence < pair.first)
|
||||
{
|
||||
if (sequence < pair.first) {
|
||||
// validatedLedgers_ is a sorted list of disjoint ranges
|
||||
// if the sequence comes before this range, the sequence will
|
||||
// come before all subsequent ranges
|
||||
@@ -420,13 +412,12 @@ public:
|
||||
request.set_get_object_neighbors(getObjectNeighbors);
|
||||
request.set_user("ETL");
|
||||
|
||||
grpc::Status status = stub_->GetLedger(&context, request, &response);
|
||||
grpc::Status const status = stub_->GetLedger(&context, request, &response);
|
||||
|
||||
if (status.ok() && !response.is_unlimited())
|
||||
{
|
||||
log_.warn()
|
||||
<< "is_unlimited is false. Make sure secure_gateway is set correctly on the ETL source. source = "
|
||||
<< toString() << "; status = " << status.error_message();
|
||||
if (status.ok() && !response.is_unlimited()) {
|
||||
log_.warn(
|
||||
) << "is_unlimited is false. Make sure secure_gateway is set correctly on the ETL source. source = "
|
||||
<< toString() << "; status = " << status.error_message();
|
||||
}
|
||||
|
||||
return {status, std::move(response)};
|
||||
@@ -451,10 +442,12 @@ public:
|
||||
res["grpc_port"] = grpcPort_;
|
||||
|
||||
auto last = getLastMsgTime();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
if (last.time_since_epoch().count() != 0) {
|
||||
res["last_msg_age_seconds"] = std::to_string(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now() - getLastMsgTime())
|
||||
.count());
|
||||
.count()
|
||||
);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -466,13 +459,12 @@ public:
|
||||
return {{}, false};
|
||||
|
||||
grpc::CompletionQueue cq;
|
||||
void* tag;
|
||||
void* tag = nullptr;
|
||||
bool ok = false;
|
||||
std::vector<etl::detail::AsyncCallData> calls;
|
||||
auto markers = getMarkers(numMarkers);
|
||||
|
||||
for (size_t i = 0; i < markers.size(); ++i)
|
||||
{
|
||||
for (size_t i = 0; i < markers.size(); ++i) {
|
||||
std::optional<ripple::uint256> nextMarker;
|
||||
|
||||
if (i + 1 < markers.size())
|
||||
@@ -488,45 +480,39 @@ public:
|
||||
|
||||
size_t numFinished = 0;
|
||||
bool abort = false;
|
||||
size_t incr = 500000;
|
||||
size_t const incr = 500000;
|
||||
size_t progress = incr;
|
||||
std::vector<std::string> edgeKeys;
|
||||
|
||||
while (numFinished < calls.size() && cq.Next(&tag, &ok))
|
||||
{
|
||||
while (numFinished < calls.size() && cq.Next(&tag, &ok)) {
|
||||
assert(tag);
|
||||
auto ptr = static_cast<etl::detail::AsyncCallData*>(tag);
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
if (!ok) {
|
||||
LOG(log_.error()) << "loadInitialLedger - ok is false";
|
||||
return {{}, false}; // handle cancelled
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(log_.trace()) << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
|
||||
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
|
||||
if (result != etl::detail::AsyncCallData::CallStatus::MORE)
|
||||
{
|
||||
++numFinished;
|
||||
LOG(log_.debug()) << "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
LOG(log_.trace()) << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
|
||||
std::string lastKey = ptr->getLastKey();
|
||||
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
|
||||
if (result != etl::detail::AsyncCallData::CallStatus::MORE) {
|
||||
++numFinished;
|
||||
LOG(log_.debug()) << "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
|
||||
if (lastKey.size())
|
||||
edgeKeys.push_back(ptr->getLastKey());
|
||||
}
|
||||
std::string const lastKey = ptr->getLastKey();
|
||||
|
||||
if (result == etl::detail::AsyncCallData::CallStatus::ERRORED)
|
||||
abort = true;
|
||||
if (!lastKey.empty())
|
||||
edgeKeys.push_back(ptr->getLastKey());
|
||||
}
|
||||
|
||||
if (backend_->cache().size() > progress)
|
||||
{
|
||||
LOG(log_.info()) << "Downloaded " << backend_->cache().size() << " records from rippled";
|
||||
progress += incr;
|
||||
}
|
||||
if (result == etl::detail::AsyncCallData::CallStatus::ERRORED)
|
||||
abort = true;
|
||||
|
||||
if (backend_->cache().size() > progress) {
|
||||
LOG(log_.info()) << "Downloaded " << backend_->cache().size() << " records from rippled";
|
||||
progress += incr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -535,11 +521,13 @@ public:
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context yield)
|
||||
const override
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
if (auto resp = forwardCache_.get(request); resp)
|
||||
{
|
||||
if (auto resp = forwardCache_.get(request); resp) {
|
||||
LOG(log_.debug()) << "request hit forwardCache";
|
||||
return resp;
|
||||
}
|
||||
@@ -570,14 +558,13 @@ public:
|
||||
void
|
||||
onResolve(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// try again
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_after(std::chrono::seconds(30));
|
||||
} else {
|
||||
static constexpr std::size_t LOWEST_LAYER_TIMEOUT_SECONDS = 30;
|
||||
boost::beast::get_lowest_layer(derived().ws())
|
||||
.expires_after(std::chrono::seconds(LOWEST_LAYER_TIMEOUT_SECONDS));
|
||||
boost::beast::get_lowest_layer(derived().ws()).async_connect(results, [this](auto ec, auto ep) {
|
||||
derived().onConnect(ec, ep);
|
||||
});
|
||||
@@ -595,14 +582,11 @@ public:
|
||||
if (auto action = hooks_.onConnected(ec); action == SourceHooks::Action::STOP)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::json::object jv{
|
||||
} else {
|
||||
boost::json::object const jv{
|
||||
{"command", "subscribe"},
|
||||
{"streams", {"ledger", "manifests", "validations", "transactions_proposed"}},
|
||||
};
|
||||
@@ -612,10 +596,11 @@ public:
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(
|
||||
boost::beast::http::field::user_agent,
|
||||
std::string(BOOST_BEAST_VERSION_STRING) + " clio-client");
|
||||
boost::beast::http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " clio-client"
|
||||
);
|
||||
req.set("X-User", "coro-client");
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
// Send subscription message
|
||||
derived().ws().async_write(boost::asio::buffer(s), [this](auto ec, size_t size) { onWrite(ec, size); });
|
||||
@@ -631,10 +616,11 @@ public:
|
||||
void
|
||||
onWrite(boost::beast::error_code ec, [[maybe_unused]] size_t size)
|
||||
{
|
||||
if (ec)
|
||||
if (ec) {
|
||||
reconnect(ec);
|
||||
else
|
||||
} else {
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -646,12 +632,9 @@ public:
|
||||
void
|
||||
onRead(boost::beast::error_code ec, size_t size)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
handleMessage(size);
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
@@ -668,8 +651,7 @@ public:
|
||||
{
|
||||
setLastMsgTime();
|
||||
|
||||
try
|
||||
{
|
||||
try {
|
||||
auto const msg = boost::beast::buffers_to_string(readBuffer_.data());
|
||||
readBuffer_.consume(size);
|
||||
|
||||
@@ -677,65 +659,48 @@ public:
|
||||
auto const response = raw.as_object();
|
||||
uint32_t ledgerIndex = 0;
|
||||
|
||||
if (response.contains("result"))
|
||||
{
|
||||
if (response.contains("result")) {
|
||||
auto const& result = response.at("result").as_object();
|
||||
if (result.contains("ledger_index"))
|
||||
ledgerIndex = result.at("ledger_index").as_int64();
|
||||
|
||||
if (result.contains("validated_ledgers"))
|
||||
{
|
||||
if (result.contains("validated_ledgers")) {
|
||||
auto const& validatedLedgers = result.at("validated_ledgers").as_string();
|
||||
setValidatedRange({validatedLedgers.data(), validatedLedgers.size()});
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
}
|
||||
else if (response.contains("type") && response.at("type") == "ledgerClosed")
|
||||
{
|
||||
} else if (response.contains("type") && response.at("type") == "ledgerClosed") {
|
||||
LOG(log_.info()) << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
if (response.contains("ledger_index"))
|
||||
{
|
||||
if (response.contains("ledger_index")) {
|
||||
ledgerIndex = response.at("ledger_index").as_int64();
|
||||
}
|
||||
if (response.contains("validated_ledgers"))
|
||||
{
|
||||
if (response.contains("validated_ledgers")) {
|
||||
auto const& validatedLedgers = response.at("validated_ledgers").as_string();
|
||||
setValidatedRange({validatedLedgers.data(), validatedLedgers.size()});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (balancer_.shouldPropagateTxnStream(this))
|
||||
{
|
||||
if (response.contains("transaction"))
|
||||
{
|
||||
} else {
|
||||
if (balancer_.shouldPropagateTxnStream(this)) {
|
||||
if (response.contains("transaction")) {
|
||||
forwardCache_.freshen();
|
||||
subscriptions_->forwardProposedTransaction(response);
|
||||
}
|
||||
else if (response.contains("type") && response.at("type") == "validationReceived")
|
||||
{
|
||||
} else if (response.contains("type") && response.at("type") == "validationReceived") {
|
||||
subscriptions_->forwardValidation(response);
|
||||
}
|
||||
else if (response.contains("type") && response.at("type") == "manifestReceived")
|
||||
{
|
||||
} else if (response.contains("type") && response.at("type") == "manifestReceived") {
|
||||
subscriptions_->forwardManifest(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ledgerIndex != 0)
|
||||
{
|
||||
if (ledgerIndex != 0) {
|
||||
LOG(log_.trace()) << "Pushing ledger sequence = " << ledgerIndex << " - " << toString();
|
||||
networkValidatedLedgers_->push(ledgerIndex);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.error()) << "Exception in handleMessage : " << e.what();
|
||||
return false;
|
||||
}
|
||||
@@ -757,6 +722,7 @@ protected:
|
||||
void
|
||||
reconnect(boost::beast::error_code ec)
|
||||
{
|
||||
static constexpr std::size_t BUFFER_SIZE = 128;
|
||||
if (paused_)
|
||||
return;
|
||||
|
||||
@@ -770,34 +736,30 @@ protected:
|
||||
// when the timer is cancelled. connection_refused will occur repeatedly
|
||||
std::string err = ec.message();
|
||||
// if we cannot connect to the transaction processing process
|
||||
if (ec.category() == boost::asio::error::get_ssl_category())
|
||||
{
|
||||
if (ec.category() == boost::asio::error::get_ssl_category()) {
|
||||
err = std::string(" (") + boost::lexical_cast<std::string>(ERR_GET_LIB(ec.value())) + "," +
|
||||
boost::lexical_cast<std::string>(ERR_GET_REASON(ec.value())) + ") ";
|
||||
|
||||
// ERR_PACK /* crypto/err/err.h */
|
||||
char buf[128];
|
||||
char buf[BUFFER_SIZE];
|
||||
::ERR_error_string_n(ec.value(), buf, sizeof(buf));
|
||||
err += buf;
|
||||
|
||||
LOG(log_.error()) << err;
|
||||
}
|
||||
|
||||
if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused)
|
||||
{
|
||||
if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused) {
|
||||
LOG(log_.error()) << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.warn()) << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
|
||||
// exponentially increasing timeouts, with a max of 30 seconds
|
||||
size_t waitTime = std::min(pow(2, numFailures_), 30.0);
|
||||
size_t const waitTime = std::min(pow(2, numFailures_), 30.0);
|
||||
numFailures_++;
|
||||
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
|
||||
timer_.async_wait([this](auto ec) {
|
||||
bool startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
bool const startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
derived().close(startAgain);
|
||||
});
|
||||
}
|
||||
@@ -806,14 +768,14 @@ private:
|
||||
void
|
||||
setLastMsgTime()
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
std::lock_guard const lck(lastMsgTimeMtx_);
|
||||
lastMsgTime_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
std::chrono::system_clock::time_point
|
||||
getLastMsgTime() const
|
||||
{
|
||||
std::lock_guard lck(lastMsgTimeMtx_);
|
||||
std::lock_guard const lck(lastMsgTimeMtx_);
|
||||
return lastMsgTime_;
|
||||
}
|
||||
|
||||
@@ -823,29 +785,25 @@ private:
|
||||
std::vector<std::pair<uint32_t, uint32_t>> pairs;
|
||||
std::vector<std::string> ranges;
|
||||
boost::split(ranges, range, boost::is_any_of(","));
|
||||
for (auto& pair : ranges)
|
||||
{
|
||||
for (auto& pair : ranges) {
|
||||
std::vector<std::string> minAndMax;
|
||||
|
||||
boost::split(minAndMax, pair, boost::is_any_of("-"));
|
||||
|
||||
if (minAndMax.size() == 1)
|
||||
{
|
||||
uint32_t sequence = std::stoll(minAndMax[0]);
|
||||
pairs.push_back(std::make_pair(sequence, sequence));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (minAndMax.size() == 1) {
|
||||
uint32_t const sequence = std::stoll(minAndMax[0]);
|
||||
pairs.emplace_back(sequence, sequence);
|
||||
} else {
|
||||
assert(minAndMax.size() == 2);
|
||||
uint32_t min = std::stoll(minAndMax[0]);
|
||||
uint32_t max = std::stoll(minAndMax[1]);
|
||||
pairs.push_back(std::make_pair(min, max));
|
||||
uint32_t const min = std::stoll(minAndMax[0]);
|
||||
uint32_t const max = std::stoll(minAndMax[1]);
|
||||
pairs.emplace_back(min, max);
|
||||
}
|
||||
}
|
||||
std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { return left.first < right.first; });
|
||||
|
||||
// we only hold the lock here, to avoid blocking while string processing
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
validatedLedgers_ = std::move(pairs);
|
||||
validatedLedgersRaw_ = range;
|
||||
}
|
||||
@@ -853,7 +811,7 @@ private:
|
||||
std::string
|
||||
getValidatedRange() const
|
||||
{
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
return validatedLedgersRaw_;
|
||||
}
|
||||
};
|
||||
@@ -861,8 +819,7 @@ private:
|
||||
/**
|
||||
* @brief Implementation of a source that uses a regular, non-secure websocket connection.
|
||||
*/
|
||||
class PlainSource : public SourceImpl<PlainSource>
|
||||
{
|
||||
class PlainSource : public SourceImpl<PlainSource> {
|
||||
using StreamType = boost::beast::websocket::stream<boost::beast::tcp_stream>;
|
||||
std::unique_ptr<StreamType> ws_;
|
||||
|
||||
@@ -885,7 +842,8 @@ public:
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
SourceHooks hooks
|
||||
)
|
||||
: SourceImpl(config, ioc, backend, subscriptions, validatedLedgers, balancer, std::move(hooks))
|
||||
, ws_(std::make_unique<StreamType>(strand_))
|
||||
{
|
||||
@@ -919,8 +877,7 @@ public:
|
||||
/**
|
||||
* @brief Implementation of a source that uses a secure websocket connection.
|
||||
*/
|
||||
class SslSource : public SourceImpl<SslSource>
|
||||
{
|
||||
class SslSource : public SourceImpl<SslSource> {
|
||||
using StreamType = boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>;
|
||||
std::optional<std::reference_wrapper<boost::asio::ssl::context>> sslCtx_;
|
||||
std::unique_ptr<StreamType> ws_;
|
||||
@@ -946,7 +903,8 @@ public:
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer,
|
||||
SourceHooks hooks)
|
||||
SourceHooks hooks
|
||||
)
|
||||
: SourceImpl(config, ioc, backend, subscriptions, validatedLedgers, balancer, std::move(hooks))
|
||||
, sslCtx_(sslCtx)
|
||||
, ws_(std::make_unique<StreamType>(strand_, *sslCtx_))
|
||||
|
||||
@@ -26,8 +26,7 @@ namespace etl {
|
||||
/**
|
||||
* @brief Represents the state of the ETL subsystem.
|
||||
*/
|
||||
struct SystemState
|
||||
{
|
||||
struct SystemState {
|
||||
/**
|
||||
* @brief Whether the process is in strict read-only mode.
|
||||
*
|
||||
@@ -36,10 +35,18 @@ struct SystemState
|
||||
*/
|
||||
bool isReadOnly = false;
|
||||
|
||||
std::atomic_bool isWriting = false; /**< @brief Whether the process is writing to the database. */
|
||||
std::atomic_bool isStopping = false; /**< @brief Whether the software is stopping. */
|
||||
std::atomic_bool writeConflict = false; /**< @brief Whether a write conflict was detected. */
|
||||
std::atomic_bool isAmendmentBlocked = false; /**< @brief Whether we detected an amendment block. */
|
||||
std::atomic_bool isWriting = false; /**< @brief Whether the process is writing to the database. */
|
||||
std::atomic_bool isStopping = false; /**< @brief Whether the software is stopping. */
|
||||
std::atomic_bool writeConflict = false; /**< @brief Whether a write conflict was detected. */
|
||||
|
||||
/**
|
||||
* @brief Whether clio detected an amendment block.
|
||||
*
|
||||
* Being amendment blocked means that Clio was compiled with libxrpl that does not yet support some field that
|
||||
* arrived from rippled and therefore can't extract the ledger diff. When this happens, Clio can't proceed with ETL
|
||||
* and should log this error and only handle RPC requests.
|
||||
*/
|
||||
std::atomic_bool isAmendmentBlocked = false;
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
94
src/etl/impl/AmendmentBlock.h
Normal file
94
src/etl/impl/AmendmentBlock.h
Normal file
@@ -0,0 +1,94 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <etl/SystemState.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
struct AmendmentBlockAction {
|
||||
void
|
||||
operator()()
|
||||
{
|
||||
static util::Logger const log{"ETL"};
|
||||
LOG(log.fatal()) << "Can't process new ledgers: The current ETL source is not compatible with the version of "
|
||||
<< "the libxrpl Clio is currently using. Please upgrade Clio to a newer version.";
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ActionCallableType = AmendmentBlockAction>
|
||||
class AmendmentBlockHandler {
|
||||
std::reference_wrapper<boost::asio::io_context> ctx_;
|
||||
std::reference_wrapper<SystemState> state_;
|
||||
boost::asio::steady_timer timer_;
|
||||
std::chrono::milliseconds interval_;
|
||||
|
||||
ActionCallableType action_;
|
||||
|
||||
public:
|
||||
template <typename DurationType = std::chrono::seconds>
|
||||
AmendmentBlockHandler(
|
||||
boost::asio::io_context& ioc,
|
||||
SystemState& state,
|
||||
DurationType interval = DurationType{1},
|
||||
ActionCallableType&& action = ActionCallableType()
|
||||
)
|
||||
: ctx_{std::ref(ioc)}
|
||||
, state_{std::ref(state)}
|
||||
, timer_{ioc}
|
||||
, interval_{std::chrono::duration_cast<std::chrono::milliseconds>(interval)}
|
||||
, action_{std::move(action)}
|
||||
{
|
||||
}
|
||||
|
||||
~AmendmentBlockHandler()
|
||||
{
|
||||
boost::asio::post(ctx_.get(), [this]() { timer_.cancel(); });
|
||||
}
|
||||
|
||||
void
|
||||
onAmendmentBlock()
|
||||
{
|
||||
state_.get().isAmendmentBlocked = true;
|
||||
startReportingTimer();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
startReportingTimer()
|
||||
{
|
||||
action_();
|
||||
|
||||
timer_.expires_after(interval_);
|
||||
timer_.async_wait([this](auto ec) {
|
||||
if (!ec)
|
||||
boost::asio::post(ctx_.get(), [this] { startReportingTimer(); });
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etl::detail
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
@@ -27,8 +28,7 @@
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
class AsyncCallData
|
||||
{
|
||||
class AsyncCallData {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
|
||||
@@ -46,16 +46,15 @@ public:
|
||||
AsyncCallData(uint32_t seq, ripple::uint256 const& marker, std::optional<ripple::uint256> const& nextMarker)
|
||||
{
|
||||
request_.mutable_ledger()->set_sequence(seq);
|
||||
if (marker.isNonZero())
|
||||
{
|
||||
request_.set_marker(marker.data(), marker.size());
|
||||
if (marker.isNonZero()) {
|
||||
request_.set_marker(marker.data(), ripple::uint256::size());
|
||||
}
|
||||
request_.set_user("ETL");
|
||||
nextPrefix_ = 0x00;
|
||||
if (nextMarker)
|
||||
nextPrefix_ = nextMarker->data()[0];
|
||||
|
||||
unsigned char prefix = marker.data()[0];
|
||||
unsigned char const prefix = marker.data()[0];
|
||||
|
||||
LOG(log_.debug()) << "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
|
||||
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
|
||||
@@ -76,25 +75,23 @@ public:
|
||||
grpc::CompletionQueue& cq,
|
||||
BackendInterface& backend,
|
||||
bool abort,
|
||||
bool cacheOnly = false)
|
||||
bool cacheOnly = false
|
||||
)
|
||||
{
|
||||
LOG(log_.trace()) << "Processing response. "
|
||||
<< "Marker prefix = " << getMarkerPrefix();
|
||||
if (abort)
|
||||
{
|
||||
if (abort) {
|
||||
LOG(log_.error()) << "AsyncCallData aborted";
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok())
|
||||
{
|
||||
if (!status_.ok()) {
|
||||
LOG(log_.error()) << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code() << " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited())
|
||||
{
|
||||
LOG(log_.warn()) << "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
if (!next_->is_unlimited()) {
|
||||
LOG(log_.warn()) << "AsyncCallData is_unlimited is false. "
|
||||
<< "Make sure secure_gateway is set correctly at the ETL source";
|
||||
}
|
||||
|
||||
std::swap(cur_, next_);
|
||||
@@ -102,18 +99,17 @@ public:
|
||||
bool more = true;
|
||||
|
||||
// if no marker returned, we are done
|
||||
if (cur_->marker().size() == 0)
|
||||
if (cur_->marker().empty())
|
||||
more = false;
|
||||
|
||||
// if returned marker is greater than our end, we are done
|
||||
unsigned char prefix = cur_->marker()[0];
|
||||
unsigned char const prefix = cur_->marker()[0];
|
||||
if (nextPrefix_ != 0x00 && prefix >= nextPrefix_)
|
||||
more = false;
|
||||
|
||||
// if we are not done, make the next async call
|
||||
if (more)
|
||||
{
|
||||
request_.set_marker(std::move(cur_->marker()));
|
||||
if (more) {
|
||||
request_.set_marker(cur_->marker());
|
||||
call(stub, cq);
|
||||
}
|
||||
|
||||
@@ -123,25 +119,23 @@ public:
|
||||
std::vector<data::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(numObjects);
|
||||
|
||||
for (int i = 0; i < numObjects; ++i)
|
||||
{
|
||||
for (int i = 0; i < numObjects; ++i) {
|
||||
auto& obj = *(cur_->mutable_ledger_objects()->mutable_objects(i));
|
||||
if (!more && nextPrefix_ != 0x00)
|
||||
{
|
||||
if (((unsigned char)obj.key()[0]) >= nextPrefix_)
|
||||
if (!more && nextPrefix_ != 0x00) {
|
||||
if (static_cast<unsigned char>(obj.key()[0]) >= nextPrefix_)
|
||||
continue;
|
||||
}
|
||||
cacheUpdates.push_back(
|
||||
{*ripple::uint256::fromVoidChecked(obj.key()),
|
||||
{obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
if (!cacheOnly)
|
||||
{
|
||||
if (lastKey_.size())
|
||||
{*ripple::uint256::fromVoidChecked(obj.key()), {obj.mutable_data()->begin(), obj.mutable_data()->end()}}
|
||||
);
|
||||
if (!cacheOnly) {
|
||||
if (!lastKey_.empty())
|
||||
backend.writeSuccessor(std::move(lastKey_), request_.ledger().sequence(), std::string{obj.key()});
|
||||
lastKey_ = obj.key();
|
||||
backend.writeNFTs(getNFTDataFromObj(request_.ledger().sequence(), obj.key(), obj.data()));
|
||||
backend.writeLedgerObject(
|
||||
std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data()));
|
||||
std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data())
|
||||
);
|
||||
}
|
||||
}
|
||||
backend.cache().update(cacheUpdates, request_.ledger().sequence(), cacheOnly);
|
||||
@@ -156,7 +150,8 @@ public:
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
|
||||
std::unique_ptr<grpc::ClientAsyncResponseReader<org::xrpl::rpc::v1::GetLedgerDataResponse>> rpc(
|
||||
stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq));
|
||||
stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq)
|
||||
);
|
||||
|
||||
rpc->StartCall();
|
||||
|
||||
@@ -166,10 +161,10 @@ public:
|
||||
std::string
|
||||
getMarkerPrefix()
|
||||
{
|
||||
if (next_->marker().size() == 0)
|
||||
if (next_->marker().empty()) {
|
||||
return "";
|
||||
else
|
||||
return ripple::strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
return ripple::strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
|
||||
std::string
|
||||
|
||||
@@ -40,8 +40,11 @@ namespace etl::detail {
|
||||
* @brief Cache loading interface
|
||||
*/
|
||||
template <typename CacheType>
|
||||
class CacheLoader
|
||||
{
|
||||
class CacheLoader {
|
||||
static constexpr size_t DEFAULT_NUM_CACHE_DIFFS = 32;
|
||||
static constexpr size_t DEFAULT_NUM_CACHE_MARKERS = 48;
|
||||
static constexpr size_t DEFAULT_CACHE_PAGE_FETCH_SIZE = 512;
|
||||
|
||||
enum class LoadStyle { ASYNC, SYNC, NOT_AT_ALL };
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
@@ -52,22 +55,22 @@ class CacheLoader
|
||||
LoadStyle cacheLoadStyle_ = LoadStyle::ASYNC;
|
||||
|
||||
// number of diffs to use to generate cursors to traverse the ledger in parallel during initial cache download
|
||||
size_t numCacheDiffs_ = 32;
|
||||
size_t numCacheDiffs_ = DEFAULT_NUM_CACHE_DIFFS;
|
||||
|
||||
// number of markers to use at one time to traverse the ledger in parallel during initial cache download
|
||||
size_t numCacheMarkers_ = 48;
|
||||
size_t numCacheMarkers_ = DEFAULT_NUM_CACHE_MARKERS;
|
||||
|
||||
// number of ledger objects to fetch concurrently per marker during cache download
|
||||
size_t cachePageFetchSize_ = 512;
|
||||
size_t cachePageFetchSize_ = DEFAULT_CACHE_PAGE_FETCH_SIZE;
|
||||
|
||||
struct ClioPeer
|
||||
{
|
||||
struct ClioPeer {
|
||||
std::string ip;
|
||||
int port;
|
||||
int port{};
|
||||
};
|
||||
|
||||
std::vector<ClioPeer> clioPeers_;
|
||||
|
||||
std::thread thread_;
|
||||
std::atomic_bool stopping_ = false;
|
||||
|
||||
public:
|
||||
@@ -75,14 +78,13 @@ public:
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> const& backend,
|
||||
CacheType& ledgerCache)
|
||||
CacheType& ledgerCache
|
||||
)
|
||||
: ioContext_{std::ref(ioc)}, backend_{backend}, cache_{ledgerCache}
|
||||
{
|
||||
if (config.contains("cache"))
|
||||
{
|
||||
if (config.contains("cache")) {
|
||||
auto const cache = config.section("cache");
|
||||
if (auto entry = cache.maybeValue<std::string>("load"); entry)
|
||||
{
|
||||
if (auto entry = cache.maybeValue<std::string>("load"); entry) {
|
||||
if (boost::iequals(*entry, "sync"))
|
||||
cacheLoadStyle_ = LoadStyle::SYNC;
|
||||
if (boost::iequals(*entry, "async"))
|
||||
@@ -95,10 +97,8 @@ public:
|
||||
numCacheMarkers_ = cache.valueOr<size_t>("num_markers", numCacheMarkers_);
|
||||
cachePageFetchSize_ = cache.valueOr<size_t>("page_fetch_size", cachePageFetchSize_);
|
||||
|
||||
if (auto peers = cache.maybeArray("peers"); peers)
|
||||
{
|
||||
for (auto const& peer : *peers)
|
||||
{
|
||||
if (auto peers = cache.maybeArray("peers"); peers) {
|
||||
for (auto const& peer : *peers) {
|
||||
auto ip = peer.value<std::string>("ip");
|
||||
auto port = peer.value<uint32_t>("port");
|
||||
|
||||
@@ -106,7 +106,7 @@ public:
|
||||
clioPeers_.push_back({ip, port});
|
||||
}
|
||||
|
||||
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
|
||||
unsigned const seed = std::chrono::system_clock::now().time_since_epoch().count();
|
||||
std::shuffle(std::begin(clioPeers_), std::end(clioPeers_), std::default_random_engine(seed));
|
||||
}
|
||||
}
|
||||
@@ -115,6 +115,8 @@ public:
|
||||
~CacheLoader()
|
||||
{
|
||||
stop();
|
||||
if (thread_.joinable())
|
||||
thread_.join();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -126,24 +128,20 @@ public:
|
||||
void
|
||||
load(uint32_t seq)
|
||||
{
|
||||
if (cacheLoadStyle_ == LoadStyle::NOT_AT_ALL)
|
||||
{
|
||||
if (cacheLoadStyle_ == LoadStyle::NOT_AT_ALL) {
|
||||
cache_.get().setDisabled();
|
||||
LOG(log_.warn()) << "Cache is disabled. Not loading";
|
||||
return;
|
||||
}
|
||||
|
||||
if (cache_.get().isFull())
|
||||
{
|
||||
if (cache_.get().isFull()) {
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (clioPeers_.size() > 0)
|
||||
{
|
||||
if (!clioPeers_.empty()) {
|
||||
boost::asio::spawn(ioContext_.get(), [this, seq](boost::asio::yield_context yield) {
|
||||
for (auto const& peer : clioPeers_)
|
||||
{
|
||||
for (auto const& peer : clioPeers_) {
|
||||
// returns true on success
|
||||
if (loadCacheFromClioPeer(seq, peer.ip, std::to_string(peer.port), yield))
|
||||
return;
|
||||
@@ -154,16 +152,14 @@ public:
|
||||
});
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
loadCacheFromDb(seq);
|
||||
}
|
||||
|
||||
loadCacheFromDb(seq);
|
||||
|
||||
// If loading synchronously, poll cache until full
|
||||
while (cacheLoadStyle_ == LoadStyle::SYNC && not cache_.get().isFull())
|
||||
{
|
||||
static constexpr size_t SLEEP_TIME_SECONDS = 10;
|
||||
while (cacheLoadStyle_ == LoadStyle::SYNC && not cache_.get().isFull()) {
|
||||
LOG(log_.debug()) << "Cache not full. Cache size = " << cache_.get().size() << ". Sleeping ...";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(10));
|
||||
std::this_thread::sleep_for(std::chrono::seconds(SLEEP_TIME_SECONDS));
|
||||
if (cache_.get().isFull())
|
||||
LOG(log_.info()) << "Cache is full. Cache size = " << cache_.get().size();
|
||||
}
|
||||
@@ -181,17 +177,16 @@ private:
|
||||
uint32_t ledgerIndex,
|
||||
std::string const& ip,
|
||||
std::string const& port,
|
||||
boost::asio::yield_context yield)
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
LOG(log_.info()) << "Loading cache from peer. ip = " << ip << " . port = " << port;
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
namespace http = beast::http; // from <boost/beast/http.hpp>
|
||||
namespace websocket = beast::websocket; // from
|
||||
namespace net = boost::asio; // from
|
||||
using tcp = boost::asio::ip::tcp; // from
|
||||
try
|
||||
{
|
||||
boost::beast::error_code ec;
|
||||
try {
|
||||
beast::error_code ec;
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioContext_.get()};
|
||||
|
||||
@@ -218,13 +213,14 @@ private:
|
||||
std::optional<boost::json::value> marker;
|
||||
|
||||
LOG(log_.trace()) << "Sending request";
|
||||
static constexpr int LIMIT = 2048;
|
||||
auto getRequest = [&](auto marker) {
|
||||
boost::json::object request = {
|
||||
{"command", "ledger_data"},
|
||||
{"ledger_index", ledgerIndex},
|
||||
{"binary", true},
|
||||
{"out_of_order", true},
|
||||
{"limit", 2048}};
|
||||
{"limit", LIMIT}};
|
||||
|
||||
if (marker)
|
||||
request["marker"] = *marker;
|
||||
@@ -233,20 +229,17 @@ private:
|
||||
|
||||
bool started = false;
|
||||
size_t numAttempts = 0;
|
||||
do
|
||||
{
|
||||
do {
|
||||
// Send the message
|
||||
ws->async_write(net::buffer(boost::json::serialize(getRequest(marker))), yield[ec]);
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "error writing = " << ec.message();
|
||||
return false;
|
||||
}
|
||||
|
||||
beast::flat_buffer buffer;
|
||||
ws->async_read(buffer, yield[ec]);
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "error reading = " << ec.message();
|
||||
return false;
|
||||
}
|
||||
@@ -254,26 +247,22 @@ private:
|
||||
auto raw = beast::buffers_to_string(buffer.data());
|
||||
auto parsed = boost::json::parse(raw);
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
if (!parsed.is_object()) {
|
||||
LOG(log_.error()) << "Error parsing response: " << raw;
|
||||
return false;
|
||||
}
|
||||
LOG(log_.trace()) << "Successfully parsed response " << parsed;
|
||||
|
||||
if (auto const& response = parsed.as_object(); response.contains("error"))
|
||||
{
|
||||
if (auto const& response = parsed.as_object(); response.contains("error")) {
|
||||
LOG(log_.error()) << "Response contains error: " << response;
|
||||
auto const& err = response.at("error");
|
||||
if (err.is_string() && err.as_string() == "lgrNotFound")
|
||||
{
|
||||
if (err.is_string() && err.as_string() == "lgrNotFound") {
|
||||
static constexpr size_t MAX_ATTEMPTS = 5;
|
||||
++numAttempts;
|
||||
if (numAttempts >= 5)
|
||||
{
|
||||
LOG(log_.error()) << " ledger not found at peer after 5 attempts. "
|
||||
"peer = "
|
||||
<< ip << " ledger = " << ledgerIndex
|
||||
<< ". Check your config and the health of the peer";
|
||||
if (numAttempts >= MAX_ATTEMPTS) {
|
||||
LOG(log_.error())
|
||||
<< " ledger not found at peer after 5 attempts. peer = " << ip
|
||||
<< " ledger = " << ledgerIndex << ". Check your config and the health of the peer";
|
||||
return false;
|
||||
}
|
||||
LOG(log_.warn()) << "Ledger not found. ledger = " << ledgerIndex
|
||||
@@ -286,28 +275,26 @@ private:
|
||||
started = true;
|
||||
auto const& response = parsed.as_object()["result"].as_object();
|
||||
|
||||
if (!response.contains("cache_full") || !response.at("cache_full").as_bool())
|
||||
{
|
||||
if (!response.contains("cache_full") || !response.at("cache_full").as_bool()) {
|
||||
LOG(log_.error()) << "cache not full for clio node. ip = " << ip;
|
||||
return false;
|
||||
}
|
||||
if (response.contains("marker"))
|
||||
if (response.contains("marker")) {
|
||||
marker = response.at("marker");
|
||||
else
|
||||
} else {
|
||||
marker = {};
|
||||
}
|
||||
|
||||
auto const& state = response.at("state").as_array();
|
||||
|
||||
std::vector<data::LedgerObject> objects;
|
||||
objects.reserve(state.size());
|
||||
for (auto const& ledgerObject : state)
|
||||
{
|
||||
for (auto const& ledgerObject : state) {
|
||||
auto const& obj = ledgerObject.as_object();
|
||||
|
||||
data::LedgerObject stateObject = {};
|
||||
|
||||
if (!stateObject.key.parseHex(obj.at("index").as_string().c_str()))
|
||||
{
|
||||
if (!stateObject.key.parseHex(obj.at("index").as_string().c_str())) {
|
||||
LOG(log_.error()) << "failed to parse object id";
|
||||
return false;
|
||||
}
|
||||
@@ -324,9 +311,7 @@ private:
|
||||
|
||||
cache_.get().setFull();
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.error()) << "Encountered exception : " << e.what() << " - ip = " << ip;
|
||||
return false;
|
||||
}
|
||||
@@ -340,8 +325,7 @@ private:
|
||||
|
||||
auto append = [](auto&& a, auto&& b) { a.insert(std::end(a), std::begin(b), std::end(b)); };
|
||||
|
||||
for (size_t i = 0; i < numCacheDiffs_; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < numCacheDiffs_; ++i) {
|
||||
append(diff, data::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
return backend_->fetchLedgerDiff(seq - i, yield);
|
||||
}));
|
||||
@@ -353,27 +337,28 @@ private:
|
||||
|
||||
diff.erase(std::unique(diff.begin(), diff.end(), [](auto a, auto b) { return a.key == b.key; }), diff.end());
|
||||
|
||||
cursors.push_back({});
|
||||
for (auto const& obj : diff)
|
||||
if (obj.blob.size())
|
||||
cursors.push_back({obj.key});
|
||||
cursors.push_back({});
|
||||
cursors.emplace_back();
|
||||
for (auto const& obj : diff) {
|
||||
if (!obj.blob.empty())
|
||||
cursors.emplace_back(obj.key);
|
||||
}
|
||||
cursors.emplace_back();
|
||||
|
||||
std::stringstream cursorStr;
|
||||
for (auto const& c : cursors)
|
||||
for (auto const& c : cursors) {
|
||||
if (c)
|
||||
cursorStr << ripple::strHex(*c) << ", ";
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Loading cache. num cursors = " << cursors.size() - 1;
|
||||
LOG(log_.trace()) << "cursors = " << cursorStr.str();
|
||||
|
||||
boost::asio::post(ioContext_.get(), [this, seq, cursors = std::move(cursors)]() {
|
||||
thread_ = std::thread{[this, seq, cursors = std::move(cursors)]() {
|
||||
auto startTime = std::chrono::system_clock::now();
|
||||
auto markers = std::make_shared<std::atomic_int>(0);
|
||||
auto numRemaining = std::make_shared<std::atomic_int>(cursors.size() - 1);
|
||||
|
||||
for (size_t i = 0; i < cursors.size() - 1; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < cursors.size() - 1; ++i) {
|
||||
auto const start = cursors.at(i);
|
||||
auto const end = cursors.at(i + 1);
|
||||
|
||||
@@ -388,8 +373,7 @@ private:
|
||||
cursor.has_value() ? ripple::strHex(cursor.value()) : ripple::strHex(data::firstKey);
|
||||
LOG(log_.debug()) << "Starting a cursor: " << cursorStr << " markers = " << *markers;
|
||||
|
||||
while (not stopping_)
|
||||
{
|
||||
while (not stopping_) {
|
||||
auto res = data::retryOnTimeout([this, seq, &cursor, yield]() {
|
||||
return backend_->fetchLedgerPage(cursor, seq, cachePageFetchSize_, false, yield);
|
||||
});
|
||||
@@ -409,23 +393,21 @@ private:
|
||||
--(*markers);
|
||||
markers->notify_one();
|
||||
|
||||
if (--(*numRemaining) == 0)
|
||||
{
|
||||
if (--(*numRemaining) == 0) {
|
||||
auto endTime = std::chrono::system_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::seconds>(endTime - startTime);
|
||||
|
||||
LOG(log_.info()) << "Finished loading cache. cache size = " << cache_.get().size()
|
||||
<< ". Took " << duration.count() << " seconds";
|
||||
cache_.get().setFull();
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.info()) << "Finished a cursor. num remaining = " << *numRemaining
|
||||
<< " start = " << cursorStr << " markers = " << *markers;
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
});
|
||||
}};
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -31,8 +31,7 @@ namespace etl::detail {
|
||||
* @brief A collection of thread safe async queues used by Extractor and Transformer to communicate
|
||||
*/
|
||||
template <typename RawDataType>
|
||||
class ExtractionDataPipe
|
||||
{
|
||||
class ExtractionDataPipe {
|
||||
public:
|
||||
using DataType = std::optional<RawDataType>;
|
||||
using QueueType = ThreadSafeQueue<DataType>; // TODO: probably should use boost::lockfree::queue instead?
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <chrono>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
@@ -35,8 +36,7 @@ namespace etl::detail {
|
||||
* @brief Extractor thread that is fetching GRPC data and enqueue it on the DataPipeType
|
||||
*/
|
||||
template <typename DataPipeType, typename NetworkValidatedLedgersType, typename LedgerFetcherType>
|
||||
class Extractor
|
||||
{
|
||||
class Extractor {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::reference_wrapper<DataPipeType> pipe_;
|
||||
@@ -55,9 +55,10 @@ public:
|
||||
LedgerFetcherType& ledgerFetcher,
|
||||
uint32_t startSequence,
|
||||
std::optional<uint32_t> finishSequence,
|
||||
SystemState const& state)
|
||||
SystemState const& state
|
||||
)
|
||||
: pipe_(std::ref(pipe))
|
||||
, networkValidatedLedgers_{networkValidatedLedgers}
|
||||
, networkValidatedLedgers_{std::move(networkValidatedLedgers)}
|
||||
, ledgerFetcher_{std::ref(ledgerFetcher)}
|
||||
, startSequence_{startSequence}
|
||||
, finishSequence_{finishSequence}
|
||||
@@ -88,16 +89,17 @@ private:
|
||||
double totalTime = 0.0;
|
||||
auto currentSequence = startSequence_;
|
||||
|
||||
while (!shouldFinish(currentSequence) && networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence))
|
||||
{
|
||||
auto [fetchResponse, time] = ::util::timed<std::chrono::duration<double>>(
|
||||
[this, currentSequence]() { return ledgerFetcher_.get().fetchDataAndDiff(currentSequence); });
|
||||
while (!shouldFinish(currentSequence) && networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence)
|
||||
) {
|
||||
auto [fetchResponse, time] = ::util::timed<std::chrono::duration<double>>([this, currentSequence]() {
|
||||
return ledgerFetcher_.get().fetchDataAndDiff(currentSequence);
|
||||
});
|
||||
totalTime += time;
|
||||
|
||||
// if the fetch is unsuccessful, stop. fetchLedger only returns false if the server is shutting down, or if
|
||||
// the ledger was found in the database (which means another process already wrote the ledger that this
|
||||
// process was trying to extract; this is a form of a write conflict).
|
||||
// Otherwise, fetchDataAndDiff will keep trying to fetch the specified ledger until successful.
|
||||
// if the fetch is unsuccessful, stop. fetchLedger only returns false if the server is shutting down, or
|
||||
// if the ledger was found in the database (which means another process already wrote the ledger that
|
||||
// this process was trying to extract; this is a form of a write conflict). Otherwise, fetchDataAndDiff
|
||||
// will keep trying to fetch the specified ledger until successful.
|
||||
if (!fetchResponse)
|
||||
break;
|
||||
|
||||
|
||||
@@ -33,28 +33,29 @@ ForwardCache::freshen()
|
||||
|
||||
auto numOutstanding = std::make_shared<std::atomic_uint>(latestForwarded_.size());
|
||||
|
||||
for (auto const& cacheEntry : latestForwarded_)
|
||||
{
|
||||
for (auto const& cacheEntry : latestForwarded_) {
|
||||
boost::asio::spawn(
|
||||
strand_, [this, numOutstanding, command = cacheEntry.first](boost::asio::yield_context yield) {
|
||||
boost::json::object request = {{"command", command}};
|
||||
auto resp = source_.requestFromRippled(request, {}, yield);
|
||||
strand_,
|
||||
[this, numOutstanding, command = cacheEntry.first](boost::asio::yield_context yield) {
|
||||
boost::json::object const request = {{"command", command}};
|
||||
auto resp = source_.requestFromRippled(request, std::nullopt, yield);
|
||||
|
||||
if (!resp || resp->contains("error"))
|
||||
resp = {};
|
||||
|
||||
{
|
||||
std::scoped_lock lk(mtx_);
|
||||
std::scoped_lock const lk(mtx_);
|
||||
latestForwarded_[command] = resp;
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ForwardCache::clear()
|
||||
{
|
||||
std::scoped_lock lk(mtx_);
|
||||
std::scoped_lock const lk(mtx_);
|
||||
for (auto& cacheEntry : latestForwarded_)
|
||||
latestForwarded_[cacheEntry.first] = {};
|
||||
}
|
||||
@@ -63,17 +64,18 @@ std::optional<boost::json::object>
|
||||
ForwardCache::get(boost::json::object const& request) const
|
||||
{
|
||||
std::optional<std::string> command = {};
|
||||
if (request.contains("command") && !request.contains("method") && request.at("command").is_string())
|
||||
if (request.contains("command") && !request.contains("method") && request.at("command").is_string()) {
|
||||
command = request.at("command").as_string().c_str();
|
||||
else if (request.contains("method") && !request.contains("command") && request.at("method").is_string())
|
||||
} else if (request.contains("method") && !request.contains("command") && request.at("method").is_string()) {
|
||||
command = request.at("method").as_string().c_str();
|
||||
}
|
||||
|
||||
if (!command)
|
||||
return {};
|
||||
if (rpc::specifiesCurrentOrClosedLedger(request))
|
||||
return {};
|
||||
|
||||
std::shared_lock lk(mtx_);
|
||||
std::shared_lock const lk(mtx_);
|
||||
if (!latestForwarded_.contains(*command))
|
||||
return {};
|
||||
|
||||
|
||||
@@ -31,16 +31,18 @@
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace etl {
|
||||
class Source;
|
||||
} // namespace etl
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
/**
|
||||
* @brief Cache for rippled responses
|
||||
*/
|
||||
class ForwardCache
|
||||
{
|
||||
class ForwardCache {
|
||||
using ResponseType = std::optional<boost::json::object>;
|
||||
static constexpr std::uint32_t DEFAULT_DURATION = 10;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
@@ -48,7 +50,7 @@ class ForwardCache
|
||||
std::unordered_map<std::string, ResponseType> latestForwarded_;
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
etl::Source const& source_;
|
||||
std::uint32_t duration_ = 10;
|
||||
std::uint32_t duration_ = DEFAULT_DURATION;
|
||||
|
||||
void
|
||||
clear();
|
||||
@@ -57,15 +59,13 @@ public:
|
||||
ForwardCache(util::Config const& config, boost::asio::io_context& ioc, Source const& source)
|
||||
: strand_(boost::asio::make_strand(ioc)), source_(source)
|
||||
{
|
||||
if (config.contains("cache"))
|
||||
{
|
||||
if (config.contains("cache")) {
|
||||
auto commands = config.arrayOrThrow("cache", "Source cache must be array");
|
||||
|
||||
if (config.contains("cache_duration"))
|
||||
duration_ = config.valueOrThrow<uint32_t>("cache_duration", "Source cache_duration must be a number");
|
||||
|
||||
for (auto const& command : commands)
|
||||
{
|
||||
for (auto const& command : commands) {
|
||||
auto key = command.valueOrThrow<std::string>("Source forward command must be array of strings");
|
||||
latestForwarded_[key] = {};
|
||||
}
|
||||
@@ -76,7 +76,7 @@ public:
|
||||
freshen();
|
||||
|
||||
std::optional<boost::json::object>
|
||||
get(boost::json::object const& command) const;
|
||||
get(boost::json::object const& request) const;
|
||||
};
|
||||
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
@@ -34,8 +35,7 @@ namespace etl::detail {
|
||||
* @brief GRPC Ledger data fetcher
|
||||
*/
|
||||
template <typename LoadBalancerType>
|
||||
class LedgerFetcher
|
||||
{
|
||||
class LedgerFetcher {
|
||||
public:
|
||||
using OptionalGetLedgerResponseType = typename LoadBalancerType::OptionalGetLedgerResponseType;
|
||||
|
||||
@@ -50,7 +50,7 @@ public:
|
||||
* @brief Create an instance of the fetcher
|
||||
*/
|
||||
LedgerFetcher(std::shared_ptr<BackendInterface> backend, std::shared_ptr<LoadBalancerType> balancer)
|
||||
: backend_(backend), loadBalancer_(balancer)
|
||||
: backend_(std::move(backend)), loadBalancer_(std::move(balancer))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -90,7 +90,8 @@ public:
|
||||
LOG(log_.debug()) << "Attempting to fetch ledger with sequence = " << sequence;
|
||||
|
||||
auto response = loadBalancer_->fetchLedger(
|
||||
sequence, true, !backend_->cache().isFull() || backend_->cache().latestLedgerSequence() >= sequence);
|
||||
sequence, true, !backend_->cache().isFull() || backend_->cache().latestLedgerSequence() >= sequence
|
||||
);
|
||||
if (response)
|
||||
LOG(log_.trace()) << "GetLedger reply = " << response->DebugString();
|
||||
|
||||
|
||||
@@ -30,12 +30,12 @@
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
/**
|
||||
* @brief Account transactions, NFT transactions and NFT data bundled togeher.
|
||||
*/
|
||||
struct FormattedTransactionsData
|
||||
{
|
||||
struct FormattedTransactionsData {
|
||||
std::vector<AccountTransactionsData> accountTxData;
|
||||
std::vector<NFTTransactionsData> nfTokenTxData;
|
||||
std::vector<NFTsData> nfTokensData;
|
||||
@@ -47,8 +47,7 @@ namespace etl::detail {
|
||||
* @brief Loads ledger data into the DB
|
||||
*/
|
||||
template <typename LoadBalancerType, typename LedgerFetcherType>
|
||||
class LedgerLoader
|
||||
{
|
||||
class LedgerLoader {
|
||||
public:
|
||||
using GetLedgerResponseType = typename LoadBalancerType::GetLedgerResponseType;
|
||||
using OptionalGetLedgerResponseType = typename LoadBalancerType::OptionalGetLedgerResponseType;
|
||||
@@ -70,8 +69,12 @@ public:
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
LedgerFetcherType& fetcher,
|
||||
SystemState const& state)
|
||||
: backend_{backend}, loadBalancer_{balancer}, fetcher_{std::ref(fetcher)}, state_{std::cref(state)}
|
||||
SystemState const& state
|
||||
)
|
||||
: backend_{std::move(backend)}
|
||||
, loadBalancer_{std::move(balancer)}
|
||||
, fetcher_{std::ref(fetcher)}
|
||||
, state_{std::cref(state)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -91,12 +94,11 @@ public:
|
||||
{
|
||||
FormattedTransactionsData result;
|
||||
|
||||
for (auto& txn : *(data.mutable_transactions_list()->mutable_transactions()))
|
||||
{
|
||||
for (auto& txn : *(data.mutable_transactions_list()->mutable_transactions())) {
|
||||
std::string* raw = txn.mutable_transaction_blob();
|
||||
|
||||
ripple::SerialIter it{raw->data(), raw->size()};
|
||||
ripple::STTx sttx{it};
|
||||
ripple::STTx const sttx{it};
|
||||
|
||||
LOG(log_.trace()) << "Inserting transaction = " << sttx.getTransactionID();
|
||||
|
||||
@@ -107,15 +109,16 @@ public:
|
||||
if (maybeNFT)
|
||||
result.nfTokensData.push_back(*maybeNFT);
|
||||
|
||||
auto journal = ripple::debugLog();
|
||||
result.accountTxData.emplace_back(txMeta, sttx.getTransactionID(), journal);
|
||||
std::string keyStr{(const char*)sttx.getTransactionID().data(), 32};
|
||||
result.accountTxData.emplace_back(txMeta, sttx.getTransactionID());
|
||||
static constexpr std::size_t KEY_SIZE = 32;
|
||||
std::string keyStr{reinterpret_cast<char const*>(sttx.getTransactionID().data()), KEY_SIZE};
|
||||
backend_->writeTransaction(
|
||||
std::move(keyStr),
|
||||
ledger.seq,
|
||||
ledger.closeTime.time_since_epoch().count(),
|
||||
std::move(*raw),
|
||||
std::move(*txn.mutable_metadata_blob()));
|
||||
std::move(*txn.mutable_metadata_blob())
|
||||
);
|
||||
}
|
||||
|
||||
// Remove all but the last NFTsData for each id. unique removes all but the first of a group, so we want to
|
||||
@@ -126,9 +129,10 @@ public:
|
||||
|
||||
// Now we can unique the NFTs by tokenID.
|
||||
auto last = std::unique(
|
||||
result.nfTokensData.begin(), result.nfTokensData.end(), [](NFTsData const& a, NFTsData const& b) {
|
||||
return a.tokenID == b.tokenID;
|
||||
});
|
||||
result.nfTokensData.begin(),
|
||||
result.nfTokensData.end(),
|
||||
[](NFTsData const& a, NFTsData const& b) { return a.tokenID == b.tokenID; }
|
||||
);
|
||||
result.nfTokensData.erase(last, result.nfTokensData.end());
|
||||
|
||||
return result;
|
||||
@@ -147,8 +151,7 @@ public:
|
||||
{
|
||||
// check that database is actually empty
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
{
|
||||
if (rng) {
|
||||
LOG(log_.fatal()) << "Database is not empty";
|
||||
assert(false);
|
||||
return {};
|
||||
@@ -182,15 +185,13 @@ public:
|
||||
// into the queue
|
||||
auto [edgeKeys, success] = loadBalancer_->loadInitialLedger(sequence);
|
||||
|
||||
if (success)
|
||||
{
|
||||
if (success) {
|
||||
size_t numWrites = 0;
|
||||
backend_->cache().setFull();
|
||||
|
||||
auto seconds =
|
||||
::util::timed<std::chrono::seconds>([this, edgeKeys = &edgeKeys, sequence, &numWrites]() {
|
||||
for (auto& key : *edgeKeys)
|
||||
{
|
||||
for (auto& key : *edgeKeys) {
|
||||
LOG(log_.debug()) << "Writing edge key = " << ripple::strHex(key);
|
||||
auto succ =
|
||||
backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence);
|
||||
@@ -199,35 +200,33 @@ public:
|
||||
}
|
||||
|
||||
ripple::uint256 prev = data::firstKey;
|
||||
while (auto cur = backend_->cache().getSuccessor(prev, sequence))
|
||||
{
|
||||
while (auto cur = backend_->cache().getSuccessor(prev, sequence)) {
|
||||
assert(cur);
|
||||
if (prev == data::firstKey)
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key));
|
||||
|
||||
if (isBookDir(cur->key, cur->blob))
|
||||
{
|
||||
if (isBookDir(cur->key, cur->blob)) {
|
||||
auto base = getBookBase(cur->key);
|
||||
// make sure the base is not an actual object
|
||||
if (!backend_->cache().get(cur->key, sequence))
|
||||
{
|
||||
if (!backend_->cache().get(cur->key, sequence)) {
|
||||
auto succ = backend_->cache().getSuccessor(base, sequence);
|
||||
assert(succ);
|
||||
if (succ->key == cur->key)
|
||||
{
|
||||
if (succ->key == cur->key) {
|
||||
LOG(log_.debug()) << "Writing book successor = " << ripple::strHex(base)
|
||||
<< " - " << ripple::strHex(cur->key);
|
||||
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(base), sequence, uint256ToString(cur->key));
|
||||
uint256ToString(base), sequence, uint256ToString(cur->key)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
++numWrites;
|
||||
}
|
||||
|
||||
prev = std::move(cur->key);
|
||||
if (numWrites % 100000 == 0 && numWrites != 0)
|
||||
prev = cur->key;
|
||||
static constexpr std::size_t LOG_INTERVAL = 100000;
|
||||
if (numWrites % LOG_INTERVAL == 0 && numWrites != 0)
|
||||
LOG(log_.info()) << "Wrote " << numWrites << " book successors";
|
||||
}
|
||||
|
||||
@@ -241,8 +240,7 @@ public:
|
||||
|
||||
LOG(log_.debug()) << "Loaded initial ledger";
|
||||
|
||||
if (not state_.get().isStopping)
|
||||
{
|
||||
if (not state_.get().isStopping) {
|
||||
backend_->writeAccountTransactions(std::move(insertTxResult.accountTxData));
|
||||
backend_->writeNFTs(std::move(insertTxResult.nfTokensData));
|
||||
backend_->writeNFTTransactions(std::move(insertTxResult.nfTokenTxData));
|
||||
|
||||
@@ -21,13 +21,14 @@
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <utility>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
@@ -42,14 +43,14 @@ namespace etl::detail {
|
||||
* includes reading all of the transactions from the database) is done from the application wide asio io_service, and a
|
||||
* strand is used to ensure ledgers are published in order.
|
||||
*/
|
||||
template <typename SubscriptionManagerType>
|
||||
class LedgerPublisher
|
||||
{
|
||||
template <typename SubscriptionManagerType, typename CacheType>
|
||||
class LedgerPublisher {
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> publishStrand_;
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<CacheType> cache_;
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions_;
|
||||
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
||||
|
||||
@@ -69,11 +70,14 @@ public:
|
||||
LedgerPublisher(
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
SystemState const& state)
|
||||
CacheType& cache,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
SystemState const& state
|
||||
)
|
||||
: publishStrand_{boost::asio::make_strand(ioc)}
|
||||
, backend_{backend}
|
||||
, subscriptions_{subscriptions}
|
||||
, backend_{std::move(backend)}
|
||||
, cache_{cache}
|
||||
, subscriptions_{std::move(subscriptions)}
|
||||
, state_{std::cref(state)}
|
||||
{
|
||||
}
|
||||
@@ -91,42 +95,36 @@ public:
|
||||
{
|
||||
LOG(log_.info()) << "Attempting to publish ledger = " << ledgerSequence;
|
||||
size_t numAttempts = 0;
|
||||
while (not state_.get().isStopping)
|
||||
{
|
||||
while (not state_.get().isStopping) {
|
||||
auto range = backend_->hardFetchLedgerRangeNoThrow();
|
||||
|
||||
if (!range || range->maxSequence < ledgerSequence)
|
||||
{
|
||||
LOG(log_.debug()) << "Trying to publish. Could not find "
|
||||
"ledger with sequence = "
|
||||
<< ledgerSequence;
|
||||
if (!range || range->maxSequence < ledgerSequence) {
|
||||
++numAttempts;
|
||||
LOG(log_.debug()) << "Trying to publish. Could not find ledger with sequence = " << ledgerSequence;
|
||||
|
||||
// We try maxAttempts times to publish the ledger, waiting one second in between each attempt.
|
||||
if (maxAttempts && numAttempts >= maxAttempts)
|
||||
{
|
||||
if (maxAttempts && numAttempts >= maxAttempts) {
|
||||
LOG(log_.debug()) << "Failed to publish ledger after " << numAttempts << " attempts.";
|
||||
return false;
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
++numAttempts;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto lgr = data::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchLedgerBySequence(ledgerSequence, yield); });
|
||||
|
||||
assert(lgr);
|
||||
publish(*lgr);
|
||||
auto lgr = data::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
return backend_->fetchLedgerBySequence(ledgerSequence, yield);
|
||||
});
|
||||
|
||||
return true;
|
||||
}
|
||||
assert(lgr);
|
||||
publish(*lgr);
|
||||
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Publish the passed in ledger
|
||||
* @brief Publish the passed ledger asynchronously.
|
||||
*
|
||||
* All ledgers are published thru publishStrand_ which ensures that all publishes are performed in a serial fashion.
|
||||
*
|
||||
@@ -138,39 +136,52 @@ public:
|
||||
boost::asio::post(publishStrand_, [this, lgrInfo = lgrInfo]() {
|
||||
LOG(log_.info()) << "Publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
|
||||
if (!state_.get().isWriting)
|
||||
{
|
||||
if (!state_.get().isWriting) {
|
||||
LOG(log_.info()) << "Updating cache";
|
||||
|
||||
std::vector<data::LedgerObject> diff = data::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchLedgerDiff(lgrInfo.seq, yield); });
|
||||
std::vector<data::LedgerObject> const diff = data::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
return backend_->fetchLedgerDiff(lgrInfo.seq, yield);
|
||||
});
|
||||
|
||||
backend_->cache().update(diff, lgrInfo.seq); // todo: inject cache to update, don't use backend cache
|
||||
cache_.get().update(diff, lgrInfo.seq);
|
||||
backend_->updateRange(lgrInfo.seq);
|
||||
}
|
||||
|
||||
setLastClose(lgrInfo.closeTime);
|
||||
auto age = lastCloseAgeSeconds();
|
||||
|
||||
// if the ledger closed over 10 minutes ago, assume we are still catching up and don't publish
|
||||
// if the ledger closed over MAX_LEDGER_AGE_SECONDS ago, assume we are still catching up and don't publish
|
||||
// TODO: this probably should be a strategy
|
||||
if (age < 600)
|
||||
{
|
||||
std::optional<ripple::Fees> fees = data::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchFees(lgrInfo.seq, yield); });
|
||||
|
||||
std::vector<data::TransactionAndMetadata> transactions = data::synchronousAndRetryOnTimeout(
|
||||
[&](auto yield) { return backend_->fetchAllTransactionsInLedger(lgrInfo.seq, yield); });
|
||||
|
||||
auto ledgerRange = backend_->fetchLedgerRange();
|
||||
assert(ledgerRange);
|
||||
static constexpr std::uint32_t MAX_LEDGER_AGE_SECONDS = 600;
|
||||
if (age < MAX_LEDGER_AGE_SECONDS) {
|
||||
std::optional<ripple::Fees> fees = data::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
return backend_->fetchFees(lgrInfo.seq, yield);
|
||||
});
|
||||
assert(fees);
|
||||
|
||||
std::string range =
|
||||
std::vector<data::TransactionAndMetadata> transactions =
|
||||
data::synchronousAndRetryOnTimeout([&](auto yield) {
|
||||
return backend_->fetchAllTransactionsInLedger(lgrInfo.seq, yield);
|
||||
});
|
||||
|
||||
auto const ledgerRange = backend_->fetchLedgerRange();
|
||||
assert(ledgerRange);
|
||||
|
||||
std::string const range =
|
||||
std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence);
|
||||
|
||||
subscriptions_->pubLedger(lgrInfo, *fees, range, transactions.size());
|
||||
|
||||
// order with transaction index
|
||||
std::sort(transactions.begin(), transactions.end(), [](auto const& t1, auto const& t2) {
|
||||
ripple::SerialIter iter1{t1.metadata.data(), t1.metadata.size()};
|
||||
ripple::STObject const object1(iter1, ripple::sfMetadata);
|
||||
ripple::SerialIter iter2{t2.metadata.data(), t2.metadata.size()};
|
||||
ripple::STObject const object2(iter2, ripple::sfMetadata);
|
||||
return object1.getFieldU32(ripple::sfTransactionIndex) <
|
||||
object2.getFieldU32(ripple::sfTransactionIndex);
|
||||
});
|
||||
|
||||
for (auto& txAndMeta : transactions)
|
||||
subscriptions_->pubTransaction(txAndMeta, lgrInfo);
|
||||
|
||||
@@ -178,8 +189,7 @@ public:
|
||||
|
||||
setLastPublishTime();
|
||||
LOG(log_.info()) << "Published ledger " << std::to_string(lgrInfo.seq);
|
||||
}
|
||||
else
|
||||
} else
|
||||
LOG(log_.info()) << "Skipping publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
});
|
||||
|
||||
@@ -203,7 +213,7 @@ public:
|
||||
std::chrono::time_point<std::chrono::system_clock>
|
||||
getLastPublish() const
|
||||
{
|
||||
std::shared_lock lck(publishTimeMtx_);
|
||||
std::shared_lock const lck(publishTimeMtx_);
|
||||
return lastPublish_;
|
||||
}
|
||||
|
||||
@@ -213,7 +223,7 @@ public:
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const
|
||||
{
|
||||
std::shared_lock lck(closeTimeMtx_);
|
||||
std::shared_lock const lck(closeTimeMtx_);
|
||||
auto now = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch())
|
||||
.count();
|
||||
auto closeTime = lastCloseTime_.time_since_epoch().count();
|
||||
@@ -222,10 +232,14 @@ public:
|
||||
return now - (rippleEpochStart + closeTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the sequence of the last schueduled ledger to publish, Be aware that the ledger may not have been
|
||||
* published to network
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
getLastPublishedSequence() const
|
||||
{
|
||||
std::scoped_lock lck(lastPublishedSeqMtx_);
|
||||
std::scoped_lock const lck(lastPublishedSeqMtx_);
|
||||
return lastPublishedSequence_;
|
||||
}
|
||||
|
||||
@@ -233,21 +247,21 @@ private:
|
||||
void
|
||||
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
||||
{
|
||||
std::scoped_lock lck(closeTimeMtx_);
|
||||
std::scoped_lock const lck(closeTimeMtx_);
|
||||
lastCloseTime_ = lastCloseTime;
|
||||
}
|
||||
|
||||
void
|
||||
setLastPublishTime()
|
||||
{
|
||||
std::scoped_lock lck(publishTimeMtx_);
|
||||
std::scoped_lock const lck(publishTimeMtx_);
|
||||
lastPublish_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
void
|
||||
setLastPublishedSequence(std::optional<uint32_t> lastPublishedSequence)
|
||||
{
|
||||
std::scoped_lock lck(lastPublishedSeqMtx_);
|
||||
std::scoped_lock const lck(lastPublishedSeqMtx_);
|
||||
lastPublishedSequence_ = lastPublishedSequence;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/AmendmentBlock.h>
|
||||
#include <etl/impl/LedgerLoader.h>
|
||||
#include <util/LedgerUtils.h>
|
||||
#include <util/Profiler.h>
|
||||
@@ -33,6 +34,7 @@
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
namespace etl::detail {
|
||||
|
||||
@@ -47,9 +49,12 @@ namespace etl::detail {
|
||||
/**
|
||||
* @brief Transformer thread that prepares new ledger out of raw data from GRPC.
|
||||
*/
|
||||
template <typename DataPipeType, typename LedgerLoaderType, typename LedgerPublisherType>
|
||||
class Transformer
|
||||
{
|
||||
template <
|
||||
typename DataPipeType,
|
||||
typename LedgerLoaderType,
|
||||
typename LedgerPublisherType,
|
||||
typename AmendmentBlockHandlerType>
|
||||
class Transformer {
|
||||
using GetLedgerResponseType = typename LedgerLoaderType::GetLedgerResponseType;
|
||||
using RawLedgerObjectType = typename LedgerLoaderType::RawLedgerObjectType;
|
||||
|
||||
@@ -59,6 +64,8 @@ class Transformer
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::reference_wrapper<LedgerLoaderType> loader_;
|
||||
std::reference_wrapper<LedgerPublisherType> publisher_;
|
||||
std::reference_wrapper<AmendmentBlockHandlerType> amendmentBlockHandler_;
|
||||
|
||||
uint32_t startSequence_;
|
||||
std::reference_wrapper<SystemState> state_; // shared state for ETL
|
||||
|
||||
@@ -76,12 +83,15 @@ public:
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
LedgerLoaderType& loader,
|
||||
LedgerPublisherType& publisher,
|
||||
AmendmentBlockHandlerType& amendmentBlockHandler,
|
||||
uint32_t startSequence,
|
||||
SystemState& state)
|
||||
: pipe_(std::ref(pipe))
|
||||
, backend_{backend}
|
||||
, loader_(std::ref(loader))
|
||||
, publisher_(std::ref(publisher))
|
||||
SystemState& state
|
||||
)
|
||||
: pipe_{std::ref(pipe)}
|
||||
, backend_{std::move(backend)}
|
||||
, loader_{std::ref(loader)}
|
||||
, publisher_{std::ref(publisher)}
|
||||
, amendmentBlockHandler_{std::ref(amendmentBlockHandler)}
|
||||
, startSequence_{startSequence}
|
||||
, state_{std::ref(state)}
|
||||
{
|
||||
@@ -114,8 +124,7 @@ private:
|
||||
beast::setCurrentThreadName("ETLService transform");
|
||||
uint32_t currentSequence = startSequence_;
|
||||
|
||||
while (not hasWriteConflict())
|
||||
{
|
||||
while (not hasWriteConflict()) {
|
||||
auto fetchResponse = pipe_.get().popNext(currentSequence);
|
||||
++currentSequence;
|
||||
|
||||
@@ -130,8 +139,7 @@ private:
|
||||
auto const start = std::chrono::system_clock::now();
|
||||
auto [lgrInfo, success] = buildNextLedger(*fetchResponse);
|
||||
|
||||
if (success)
|
||||
{
|
||||
if (success) {
|
||||
auto const numTxns = fetchResponse->transactions_list().transactions_size();
|
||||
auto const numObjects = fetchResponse->ledger_objects().objects_size();
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
@@ -145,9 +153,7 @@ private:
|
||||
|
||||
// success is false if the ledger was already written
|
||||
publisher_.get().publish(lgrInfo);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.error()) << "Error writing ledger. " << util::toString(lgrInfo);
|
||||
}
|
||||
|
||||
@@ -174,22 +180,17 @@ private:
|
||||
|
||||
writeSuccessors(lgrInfo, rawData);
|
||||
std::optional<FormattedTransactionsData> insertTxResultOp;
|
||||
try
|
||||
{
|
||||
try {
|
||||
updateCache(lgrInfo, rawData);
|
||||
|
||||
LOG(log_.debug()) << "Inserted/modified/deleted all objects. Number of objects = "
|
||||
<< rawData.ledger_objects().objects_size();
|
||||
|
||||
insertTxResultOp.emplace(loader_.get().insertTransactions(lgrInfo, rawData));
|
||||
}
|
||||
catch (std::runtime_error const& e)
|
||||
{
|
||||
setAmendmentBlocked();
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to build next ledger: " << e.what();
|
||||
|
||||
log_.fatal()
|
||||
<< "Failed to build next ledger: " << e.what()
|
||||
<< " Possible cause: The ETL node is not compatible with the version of the rippled lib Clio is using.";
|
||||
amendmentBlockHandler_.get().onAmendmentBlock();
|
||||
return {ripple::LedgerHeader{}, false};
|
||||
}
|
||||
|
||||
@@ -225,47 +226,41 @@ private:
|
||||
std::set<ripple::uint256> bookSuccessorsToCalculate;
|
||||
std::set<ripple::uint256> modified;
|
||||
|
||||
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects()))
|
||||
{
|
||||
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects())) {
|
||||
auto key = ripple::uint256::fromVoidChecked(obj.key());
|
||||
assert(key);
|
||||
|
||||
cacheUpdates.push_back({*key, {obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
LOG(log_.debug()) << "key = " << ripple::strHex(*key) << " - mod type = " << obj.mod_type();
|
||||
|
||||
if (obj.mod_type() != RawLedgerObjectType::MODIFIED && !rawData.object_neighbors_included())
|
||||
{
|
||||
if (obj.mod_type() != RawLedgerObjectType::MODIFIED && !rawData.object_neighbors_included()) {
|
||||
LOG(log_.debug()) << "object neighbors not included. using cache";
|
||||
|
||||
if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq - 1)
|
||||
throw std::runtime_error("Cache is not full, but object neighbors were not included");
|
||||
throw std::logic_error("Cache is not full, but object neighbors were not included");
|
||||
|
||||
auto const blob = obj.mutable_data();
|
||||
auto checkBookBase = false;
|
||||
auto const isDeleted = (blob->size() == 0);
|
||||
|
||||
if (isDeleted)
|
||||
{
|
||||
if (isDeleted) {
|
||||
auto const old = backend_->cache().get(*key, lgrInfo.seq - 1);
|
||||
assert(old);
|
||||
checkBookBase = isBookDir(*key, *old);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
checkBookBase = isBookDir(*key, *blob);
|
||||
}
|
||||
|
||||
if (checkBookBase)
|
||||
{
|
||||
if (checkBookBase) {
|
||||
LOG(log_.debug()) << "Is book dir. Key = " << ripple::strHex(*key);
|
||||
|
||||
auto const bookBase = getBookBase(*key);
|
||||
auto const oldFirstDir = backend_->cache().getSuccessor(bookBase, lgrInfo.seq - 1);
|
||||
assert(oldFirstDir);
|
||||
|
||||
// We deleted the first directory, or we added a directory prior to the old first directory
|
||||
if ((isDeleted && key == oldFirstDir->key) || (!isDeleted && key < oldFirstDir->key))
|
||||
{
|
||||
// We deleted the first directory, or we added a directory prior to the old first
|
||||
// directory
|
||||
if ((isDeleted && key == oldFirstDir->key) || (!isDeleted && key < oldFirstDir->key)) {
|
||||
LOG(log_.debug())
|
||||
<< "Need to recalculate book base successor. base = " << ripple::strHex(bookBase)
|
||||
<< " - key = " << ripple::strHex(*key) << " - isDeleted = " << isDeleted
|
||||
@@ -284,15 +279,13 @@ private:
|
||||
backend_->cache().update(cacheUpdates, lgrInfo.seq);
|
||||
|
||||
// rippled didn't send successor information, so use our cache
|
||||
if (!rawData.object_neighbors_included())
|
||||
{
|
||||
if (!rawData.object_neighbors_included()) {
|
||||
LOG(log_.debug()) << "object neighbors not included. using cache";
|
||||
if (!backend_->cache().isFull() || backend_->cache().latestLedgerSequence() != lgrInfo.seq)
|
||||
throw std::runtime_error("Cache is not full, but object neighbors were not included");
|
||||
throw std::logic_error("Cache is not full, but object neighbors were not included");
|
||||
|
||||
for (auto const& obj : cacheUpdates)
|
||||
{
|
||||
if (modified.count(obj.key))
|
||||
for (auto const& obj : cacheUpdates) {
|
||||
if (modified.contains(obj.key))
|
||||
continue;
|
||||
|
||||
auto lb = backend_->cache().getPredecessor(obj.key, lgrInfo.seq);
|
||||
@@ -303,15 +296,12 @@ private:
|
||||
if (!ub)
|
||||
ub = {data::lastKey, {}};
|
||||
|
||||
if (obj.blob.size() == 0)
|
||||
{
|
||||
if (obj.blob.empty()) {
|
||||
LOG(log_.debug()) << "writing successor for deleted object " << ripple::strHex(obj.key) << " - "
|
||||
<< ripple::strHex(lb->key) << " - " << ripple::strHex(ub->key);
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(lb->key), lgrInfo.seq, uint256ToString(ub->key));
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
backend_->writeSuccessor(uint256ToString(lb->key), lgrInfo.seq, uint256ToString(obj.key));
|
||||
backend_->writeSuccessor(uint256ToString(obj.key), lgrInfo.seq, uint256ToString(ub->key));
|
||||
|
||||
@@ -320,18 +310,14 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
for (auto const& base : bookSuccessorsToCalculate)
|
||||
{
|
||||
for (auto const& base : bookSuccessorsToCalculate) {
|
||||
auto succ = backend_->cache().getSuccessor(base, lgrInfo.seq);
|
||||
if (succ)
|
||||
{
|
||||
if (succ) {
|
||||
backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(succ->key));
|
||||
|
||||
LOG(log_.debug()) << "Updating book successor " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(succ->key);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
backend_->writeSuccessor(uint256ToString(base), lgrInfo.seq, uint256ToString(data::lastKey));
|
||||
|
||||
LOG(log_.debug()) << "Updating book successor " << ripple::strHex(base) << " - "
|
||||
@@ -351,12 +337,10 @@ private:
|
||||
writeSuccessors(ripple::LedgerHeader const& lgrInfo, GetLedgerResponseType& rawData)
|
||||
{
|
||||
// Write successor info, if included from rippled
|
||||
if (rawData.object_neighbors_included())
|
||||
{
|
||||
if (rawData.object_neighbors_included()) {
|
||||
LOG(log_.debug()) << "object neighbors included";
|
||||
|
||||
for (auto& obj : *(rawData.mutable_book_successors()))
|
||||
{
|
||||
for (auto& obj : *(rawData.mutable_book_successors())) {
|
||||
auto firstBook = std::move(*obj.mutable_first_book());
|
||||
if (!firstBook.size())
|
||||
firstBook = uint256ToString(data::lastKey);
|
||||
@@ -366,34 +350,28 @@ private:
|
||||
backend_->writeSuccessor(std::move(*obj.mutable_book_base()), lgrInfo.seq, std::move(firstBook));
|
||||
}
|
||||
|
||||
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects()))
|
||||
{
|
||||
if (obj.mod_type() != RawLedgerObjectType::MODIFIED)
|
||||
{
|
||||
for (auto& obj : *(rawData.mutable_ledger_objects()->mutable_objects())) {
|
||||
if (obj.mod_type() != RawLedgerObjectType::MODIFIED) {
|
||||
std::string* predPtr = obj.mutable_predecessor();
|
||||
if (!predPtr->size())
|
||||
if (predPtr->empty())
|
||||
*predPtr = uint256ToString(data::firstKey);
|
||||
std::string* succPtr = obj.mutable_successor();
|
||||
if (!succPtr->size())
|
||||
if (succPtr->empty())
|
||||
*succPtr = uint256ToString(data::lastKey);
|
||||
|
||||
if (obj.mod_type() == RawLedgerObjectType::DELETED)
|
||||
{
|
||||
if (obj.mod_type() == RawLedgerObjectType::DELETED) {
|
||||
LOG(log_.debug()) << "Modifying successors for deleted object " << ripple::strHex(obj.key())
|
||||
<< " - " << ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr);
|
||||
|
||||
backend_->writeSuccessor(std::move(*predPtr), lgrInfo.seq, std::move(*succPtr));
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
LOG(log_.debug()) << "adding successor for new object " << ripple::strHex(obj.key()) << " - "
|
||||
<< ripple::strHex(*predPtr) << " - " << ripple::strHex(*succPtr);
|
||||
|
||||
backend_->writeSuccessor(std::move(*predPtr), lgrInfo.seq, std::string{obj.key()});
|
||||
backend_->writeSuccessor(std::string{obj.key()}, lgrInfo.seq, std::move(*succPtr));
|
||||
}
|
||||
}
|
||||
else
|
||||
} else
|
||||
LOG(log_.debug()) << "object modified " << ripple::strHex(obj.key());
|
||||
}
|
||||
}
|
||||
@@ -423,19 +401,6 @@ private:
|
||||
{
|
||||
state_.get().writeConflict = conflict;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sets the amendment blocked flag.
|
||||
*
|
||||
* Being amendment blocked means that Clio was compiled with libxrpl that does not yet support some field that
|
||||
* arrived from rippled and therefore can't extract the ledger diff. When this happens, Clio can't proceed with ETL
|
||||
* and should log this error and only handle RPC requests.
|
||||
*/
|
||||
void
|
||||
setAmendmentBlocked()
|
||||
{
|
||||
state_.get().isAmendmentBlocked = true;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etl::detail
|
||||
|
||||
@@ -35,6 +35,12 @@ Subscription::unsubscribe(SessionPtrType const& session)
|
||||
boost::asio::post(strand_, [this, session]() { removeSession(session, subscribers_, subCount_); });
|
||||
}
|
||||
|
||||
bool
|
||||
Subscription::hasSession(SessionPtrType const& session)
|
||||
{
|
||||
return subscribers_.contains(session);
|
||||
}
|
||||
|
||||
void
|
||||
Subscription::publish(std::shared_ptr<std::string> const& message)
|
||||
{
|
||||
@@ -46,7 +52,8 @@ getLedgerPubMessage(
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount)
|
||||
std::uint32_t txnCount
|
||||
)
|
||||
{
|
||||
boost::json::object pubMsg;
|
||||
|
||||
@@ -78,7 +85,7 @@ SubscriptionManager::subLedger(boost::asio::yield_context yield, SessionPtrType
|
||||
fees = backend_->fetchFees(lgrInfo->seq, yield);
|
||||
assert(fees);
|
||||
|
||||
std::string range = std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence);
|
||||
std::string const range = std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence);
|
||||
|
||||
auto pubMsg = getLedgerPubMessage(*lgrInfo, *fees, range, 0);
|
||||
pubMsg.erase("txn_count");
|
||||
@@ -121,8 +128,9 @@ SubscriptionManager::unsubAccount(ripple::AccountID const& account, SessionPtrTy
|
||||
void
|
||||
SubscriptionManager::subBook(ripple::Book const& book, SessionPtrType session)
|
||||
{
|
||||
subscribeHelper(
|
||||
session, book, bookSubscribers_, [this, book](SessionPtrType session) { unsubBook(book, session); });
|
||||
subscribeHelper(session, book, bookSubscribers_, [this, book](SessionPtrType session) {
|
||||
unsubBook(book, session);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
@@ -148,10 +156,12 @@ SubscriptionManager::pubLedger(
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount)
|
||||
std::uint32_t txnCount
|
||||
)
|
||||
{
|
||||
auto message = std::make_shared<std::string>(
|
||||
boost::json::serialize(getLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount)));
|
||||
auto message =
|
||||
std::make_shared<std::string>(boost::json::serialize(getLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount))
|
||||
);
|
||||
|
||||
ledgerSubscribers_.publish(message);
|
||||
}
|
||||
@@ -164,6 +174,8 @@ SubscriptionManager::pubTransaction(data::TransactionAndMetadata const& blobs, r
|
||||
pubObj["transaction"] = rpc::toJson(*tx);
|
||||
pubObj["meta"] = rpc::toJson(*meta);
|
||||
rpc::insertDeliveredAmount(pubObj["meta"].as_object(), tx, meta, blobs.date);
|
||||
// hardcode api_version to 1 for now, until https://github.com/XRPLF/clio/issues/978 fixed
|
||||
rpc::insertDeliverMaxAlias(pubObj["transaction"].as_object(), 1);
|
||||
pubObj["type"] = "transaction";
|
||||
pubObj["validated"] = true;
|
||||
pubObj["status"] = "closed";
|
||||
@@ -178,12 +190,10 @@ SubscriptionManager::pubTransaction(data::TransactionAndMetadata const& blobs, r
|
||||
ripple::transResultInfo(meta->getResultTER(), token, human);
|
||||
pubObj["engine_result"] = token;
|
||||
pubObj["engine_result_message"] = human;
|
||||
if (tx->getTxnType() == ripple::ttOFFER_CREATE)
|
||||
{
|
||||
if (tx->getTxnType() == ripple::ttOFFER_CREATE) {
|
||||
auto account = tx->getAccountID(ripple::sfAccount);
|
||||
auto amount = tx->getFieldAmount(ripple::sfTakerGets);
|
||||
if (account != amount.issue().account)
|
||||
{
|
||||
if (account != amount.issue().account) {
|
||||
ripple::STAmount ownerFunds;
|
||||
auto fetchFundsSynchronous = [&]() {
|
||||
data::synchronous([&](boost::asio::yield_context yield) {
|
||||
@@ -207,33 +217,30 @@ SubscriptionManager::pubTransaction(data::TransactionAndMetadata const& blobs, r
|
||||
|
||||
std::unordered_set<ripple::Book> alreadySent;
|
||||
|
||||
for (auto const& node : meta->getNodes())
|
||||
{
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) == ripple::ltOFFER)
|
||||
{
|
||||
for (auto const& node : meta->getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) == ripple::ltOFFER) {
|
||||
ripple::SField const* field = nullptr;
|
||||
|
||||
// We need a field that contains the TakerGets and TakerPays
|
||||
// parameters.
|
||||
if (node.getFName() == ripple::sfModifiedNode)
|
||||
if (node.getFName() == ripple::sfModifiedNode) {
|
||||
field = &ripple::sfPreviousFields;
|
||||
else if (node.getFName() == ripple::sfCreatedNode)
|
||||
} else if (node.getFName() == ripple::sfCreatedNode) {
|
||||
field = &ripple::sfNewFields;
|
||||
else if (node.getFName() == ripple::sfDeletedNode)
|
||||
} else if (node.getFName() == ripple::sfDeletedNode) {
|
||||
field = &ripple::sfFinalFields;
|
||||
}
|
||||
|
||||
if (field)
|
||||
{
|
||||
auto data = dynamic_cast<const ripple::STObject*>(node.peekAtPField(*field));
|
||||
if (field != nullptr) {
|
||||
auto data = dynamic_cast<ripple::STObject const*>(node.peekAtPField(*field));
|
||||
|
||||
if (data && data->isFieldPresent(ripple::sfTakerPays) && data->isFieldPresent(ripple::sfTakerGets))
|
||||
{
|
||||
if ((data != nullptr) && data->isFieldPresent(ripple::sfTakerPays) &&
|
||||
data->isFieldPresent(ripple::sfTakerGets)) {
|
||||
// determine the OrderBook
|
||||
ripple::Book book{
|
||||
ripple::Book const book{
|
||||
data->getFieldAmount(ripple::sfTakerGets).issue(),
|
||||
data->getFieldAmount(ripple::sfTakerPays).issue()};
|
||||
if (alreadySent.find(book) == alreadySent.end())
|
||||
{
|
||||
if (alreadySent.find(book) == alreadySent.end()) {
|
||||
bookSubscribers_.publish(pubMsg, book);
|
||||
alreadySent.insert(book);
|
||||
}
|
||||
@@ -246,7 +253,8 @@ SubscriptionManager::pubTransaction(data::TransactionAndMetadata const& blobs, r
|
||||
void
|
||||
SubscriptionManager::pubBookChanges(
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
std::vector<data::TransactionAndMetadata> const& transactions)
|
||||
std::vector<data::TransactionAndMetadata> const& transactions
|
||||
)
|
||||
{
|
||||
auto const json = rpc::computeBookChanges(lgrInfo, transactions);
|
||||
auto const bookChangesMsg = std::make_shared<std::string>(boost::json::serialize(json));
|
||||
@@ -321,8 +329,9 @@ SubscriptionManager::unsubProposedAccount(ripple::AccountID const& account, Sess
|
||||
void
|
||||
SubscriptionManager::subProposedTransactions(SessionPtrType session)
|
||||
{
|
||||
subscribeHelper(
|
||||
session, txProposedSubscribers_, [this](SessionPtrType session) { unsubProposedTransactions(session); });
|
||||
subscribeHelper(session, txProposedSubscribers_, [this](SessionPtrType session) {
|
||||
unsubProposedTransactions(session);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
@@ -334,8 +343,10 @@ SubscriptionManager::unsubProposedTransactions(SessionPtrType session)
|
||||
void
|
||||
SubscriptionManager::subscribeHelper(SessionPtrType const& session, Subscription& subs, CleanupFunction&& func)
|
||||
{
|
||||
if (subs.hasSession(session))
|
||||
return;
|
||||
subs.subscribe(session);
|
||||
std::scoped_lock lk(cleanupMtx_);
|
||||
std::scoped_lock const lk(cleanupMtx_);
|
||||
cleanupFuncs_[session].push_back(std::move(func));
|
||||
}
|
||||
|
||||
@@ -345,22 +356,24 @@ SubscriptionManager::subscribeHelper(
|
||||
SessionPtrType const& session,
|
||||
Key const& k,
|
||||
SubscriptionMap<Key>& subs,
|
||||
CleanupFunction&& func)
|
||||
CleanupFunction&& func
|
||||
)
|
||||
{
|
||||
if (subs.hasSession(session, k))
|
||||
return;
|
||||
subs.subscribe(session, k);
|
||||
std::scoped_lock lk(cleanupMtx_);
|
||||
std::scoped_lock const lk(cleanupMtx_);
|
||||
cleanupFuncs_[session].push_back(std::move(func));
|
||||
}
|
||||
|
||||
void
|
||||
SubscriptionManager::cleanup(SessionPtrType session)
|
||||
{
|
||||
std::scoped_lock lk(cleanupMtx_);
|
||||
std::scoped_lock const lk(cleanupMtx_);
|
||||
if (!cleanupFuncs_.contains(session))
|
||||
return;
|
||||
|
||||
for (auto f : cleanupFuncs_[session])
|
||||
{
|
||||
for (auto const& f : cleanupFuncs_[session]) {
|
||||
f(session);
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <data/BackendInterface.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
#include <web/interface/ConnectionBase.h>
|
||||
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
@@ -44,18 +45,14 @@ using SessionPtrType = std::shared_ptr<web::ConnectionBase>;
|
||||
*/
|
||||
template <class T>
|
||||
inline void
|
||||
sendToSubscribers(std::shared_ptr<std::string> const& message, T& subscribers, std::atomic_uint64_t& counter)
|
||||
sendToSubscribers(std::shared_ptr<std::string> const& message, T& subscribers, util::prometheus::GaugeInt& counter)
|
||||
{
|
||||
for (auto it = subscribers.begin(); it != subscribers.end();)
|
||||
{
|
||||
for (auto it = subscribers.begin(); it != subscribers.end();) {
|
||||
auto& session = *it;
|
||||
if (session->dead())
|
||||
{
|
||||
if (session->dead()) {
|
||||
it = subscribers.erase(it);
|
||||
--counter;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
session->send(message);
|
||||
++it;
|
||||
}
|
||||
@@ -71,10 +68,9 @@ sendToSubscribers(std::shared_ptr<std::string> const& message, T& subscribers, s
|
||||
*/
|
||||
template <class T>
|
||||
inline void
|
||||
addSession(SessionPtrType session, T& subscribers, std::atomic_uint64_t& counter)
|
||||
addSession(SessionPtrType session, T& subscribers, util::prometheus::GaugeInt& counter)
|
||||
{
|
||||
if (!subscribers.contains(session))
|
||||
{
|
||||
if (!subscribers.contains(session)) {
|
||||
subscribers.insert(session);
|
||||
++counter;
|
||||
}
|
||||
@@ -89,10 +85,9 @@ addSession(SessionPtrType session, T& subscribers, std::atomic_uint64_t& counter
|
||||
*/
|
||||
template <class T>
|
||||
inline void
|
||||
removeSession(SessionPtrType session, T& subscribers, std::atomic_uint64_t& counter)
|
||||
removeSession(SessionPtrType session, T& subscribers, util::prometheus::GaugeInt& counter)
|
||||
{
|
||||
if (subscribers.contains(session))
|
||||
{
|
||||
if (subscribers.contains(session)) {
|
||||
subscribers.erase(session);
|
||||
--counter;
|
||||
}
|
||||
@@ -101,11 +96,10 @@ removeSession(SessionPtrType session, T& subscribers, std::atomic_uint64_t& coun
|
||||
/**
|
||||
* @brief Represents a subscription stream.
|
||||
*/
|
||||
class Subscription
|
||||
{
|
||||
class Subscription {
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
std::unordered_set<SessionPtrType> subscribers_ = {};
|
||||
std::atomic_uint64_t subCount_ = 0;
|
||||
util::prometheus::GaugeInt& subCount_;
|
||||
|
||||
public:
|
||||
Subscription() = delete;
|
||||
@@ -117,7 +111,13 @@ public:
|
||||
*
|
||||
* @param ioc The io_context to run on
|
||||
*/
|
||||
explicit Subscription(boost::asio::io_context& ioc) : strand_(boost::asio::make_strand(ioc))
|
||||
explicit Subscription(boost::asio::io_context& ioc, std::string const& name)
|
||||
: strand_(boost::asio::make_strand(ioc))
|
||||
, subCount_(PrometheusService::gaugeInt(
|
||||
"subscriptions_current_number",
|
||||
util::prometheus::Labels({util::prometheus::Label{"stream", name}}),
|
||||
fmt::format("Current subscribers number on the {} stream", name)
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -139,6 +139,15 @@ public:
|
||||
void
|
||||
unsubscribe(SessionPtrType const& session);
|
||||
|
||||
/**
|
||||
* @brief Check if a session has been in subscribers list.
|
||||
*
|
||||
* @param session The session to check
|
||||
* @return true if the session is in the subscribers list; false otherwise
|
||||
*/
|
||||
bool
|
||||
hasSession(SessionPtrType const& session);
|
||||
|
||||
/**
|
||||
* @brief Sends the given message to all subscribers.
|
||||
*
|
||||
@@ -153,7 +162,7 @@ public:
|
||||
std::uint64_t
|
||||
count() const
|
||||
{
|
||||
return subCount_.load();
|
||||
return subCount_.value();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -170,13 +179,12 @@ public:
|
||||
* @brief Represents a collection of subscriptions where each stream is mapped to a key.
|
||||
*/
|
||||
template <class Key>
|
||||
class SubscriptionMap
|
||||
{
|
||||
class SubscriptionMap {
|
||||
using SubscribersType = std::set<SessionPtrType>;
|
||||
|
||||
boost::asio::strand<boost::asio::io_context::executor_type> strand_;
|
||||
std::unordered_map<Key, SubscribersType> subscribers_ = {};
|
||||
std::atomic_uint64_t subCount_ = 0;
|
||||
util::prometheus::GaugeInt& subCount_;
|
||||
|
||||
public:
|
||||
SubscriptionMap() = delete;
|
||||
@@ -188,7 +196,13 @@ public:
|
||||
*
|
||||
* @param ioc The io_context to run on
|
||||
*/
|
||||
explicit SubscriptionMap(boost::asio::io_context& ioc) : strand_(boost::asio::make_strand(ioc))
|
||||
explicit SubscriptionMap(boost::asio::io_context& ioc, std::string const& name)
|
||||
: strand_(boost::asio::make_strand(ioc))
|
||||
, subCount_(PrometheusService::gaugeInt(
|
||||
"subscriptions_current_number",
|
||||
util::prometheus::Labels({util::prometheus::Label{"collection", name}}),
|
||||
fmt::format("Current subscribers number on the {} collection", name)
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -225,13 +239,28 @@ public:
|
||||
--subCount_;
|
||||
subscribers_[key].erase(session);
|
||||
|
||||
if (subscribers_[key].size() == 0)
|
||||
{
|
||||
if (subscribers_[key].size() == 0) {
|
||||
subscribers_.erase(key);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if a session has been in subscribers list.
|
||||
*
|
||||
* @param session The session to check
|
||||
* @param key The key for the subscription to check
|
||||
* @return true if the session is in the subscribers list; false otherwise
|
||||
*/
|
||||
bool
|
||||
hasSession(SessionPtrType const& session, Key const& key)
|
||||
{
|
||||
if (!subscribers_.contains(key))
|
||||
return false;
|
||||
|
||||
return subscribers_[key].contains(session);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sends the given message to all subscribers.
|
||||
*
|
||||
@@ -255,15 +284,14 @@ public:
|
||||
std::uint64_t
|
||||
count() const
|
||||
{
|
||||
return subCount_.load();
|
||||
return subCount_.value();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Manages subscriptions.
|
||||
*/
|
||||
class SubscriptionManager
|
||||
{
|
||||
class SubscriptionManager {
|
||||
util::Logger log_{"Subscriptions"};
|
||||
|
||||
std::vector<std::thread> workers_;
|
||||
@@ -304,15 +332,15 @@ public:
|
||||
* @param backend The backend to use
|
||||
*/
|
||||
SubscriptionManager(std::uint64_t numThreads, std::shared_ptr<data::BackendInterface const> const& backend)
|
||||
: ledgerSubscribers_(ioc_)
|
||||
, txSubscribers_(ioc_)
|
||||
, txProposedSubscribers_(ioc_)
|
||||
, manifestSubscribers_(ioc_)
|
||||
, validationsSubscribers_(ioc_)
|
||||
, bookChangesSubscribers_(ioc_)
|
||||
, accountSubscribers_(ioc_)
|
||||
, accountProposedSubscribers_(ioc_)
|
||||
, bookSubscribers_(ioc_)
|
||||
: ledgerSubscribers_(ioc_, "ledger")
|
||||
, txSubscribers_(ioc_, "tx")
|
||||
, txProposedSubscribers_(ioc_, "tx_proposed")
|
||||
, manifestSubscribers_(ioc_, "manifest")
|
||||
, validationsSubscribers_(ioc_, "validations")
|
||||
, bookChangesSubscribers_(ioc_, "book_changes")
|
||||
, accountSubscribers_(ioc_, "account")
|
||||
, accountProposedSubscribers_(ioc_, "account_proposed")
|
||||
, bookSubscribers_(ioc_, "book")
|
||||
, backend_(backend)
|
||||
{
|
||||
work_.emplace(ioc_);
|
||||
@@ -360,7 +388,8 @@ public:
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount);
|
||||
std::uint32_t txnCount
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Publish to the book changes stream.
|
||||
|
||||
@@ -18,12 +18,6 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <grpc/impl/codegen/port_platform.h>
|
||||
#ifdef GRPC_TSAN_ENABLED
|
||||
#undef GRPC_TSAN_ENABLED
|
||||
#endif
|
||||
#ifdef GRPC_ASAN_ENABLED
|
||||
#undef GRPC_ASAN_ENABLED
|
||||
#endif
|
||||
|
||||
#include <data/BackendFactory.h>
|
||||
#include <etl/ETLService.h>
|
||||
@@ -31,6 +25,7 @@
|
||||
#include <rpc/RPCEngine.h>
|
||||
#include <rpc/common/impl/HandlerProvider.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
#include <web/RPCServerHandler.h>
|
||||
#include <web/Server.h>
|
||||
|
||||
@@ -79,14 +74,12 @@ parseCli(int argc, char* argv[])
|
||||
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
|
||||
po::notify(parsed);
|
||||
|
||||
if (parsed.count("version"))
|
||||
{
|
||||
if (parsed.count("version") != 0u) {
|
||||
std::cout << Build::getClioFullVersionString() << '\n';
|
||||
std::exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
if (parsed.count("help"))
|
||||
{
|
||||
if (parsed.count("help") != 0u) {
|
||||
std::cout << "Clio server " << Build::getClioFullVersionString() << "\n\n" << description;
|
||||
std::exit(EXIT_SUCCESS);
|
||||
}
|
||||
@@ -109,7 +102,7 @@ parseCerts(Config const& config)
|
||||
auto certFilename = config.value<std::string>("ssl_cert_file");
|
||||
auto keyFilename = config.value<std::string>("ssl_key_file");
|
||||
|
||||
std::ifstream readCert(certFilename, std::ios::in | std::ios::binary);
|
||||
std::ifstream const readCert(certFilename, std::ios::in | std::ios::binary);
|
||||
if (!readCert)
|
||||
return {};
|
||||
|
||||
@@ -153,12 +146,10 @@ start(io_context& ioc, std::uint32_t numThreads)
|
||||
|
||||
int
|
||||
main(int argc, char* argv[])
|
||||
try
|
||||
{
|
||||
try {
|
||||
auto const configPath = parseCli(argc, argv);
|
||||
auto const config = ConfigReader::open(configPath);
|
||||
if (!config)
|
||||
{
|
||||
if (!config) {
|
||||
std::cerr << "Couldnt parse config '" << configPath << "'." << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
@@ -166,9 +157,10 @@ try
|
||||
LogService::init(config);
|
||||
LOG(LogService::info()) << "Clio version: " << Build::getClioFullVersionString();
|
||||
|
||||
PrometheusService::init(config);
|
||||
|
||||
auto const threads = config.valueOr("io_threads", 2);
|
||||
if (threads <= 0)
|
||||
{
|
||||
if (threads <= 0) {
|
||||
LOG(LogService::fatal()) << "io_threads is less than 1";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
@@ -204,13 +196,16 @@ try
|
||||
auto workQueue = rpc::WorkQueue::make_WorkQueue(config);
|
||||
auto counters = rpc::Counters::make_Counters(workQueue);
|
||||
auto const handlerProvider = std::make_shared<rpc::detail::ProductionHandlerProvider const>(
|
||||
config, backend, subscriptions, balancer, etl, counters);
|
||||
config, backend, subscriptions, balancer, etl, counters
|
||||
);
|
||||
auto const rpcEngine = rpc::RPCEngine::make_RPCEngine(
|
||||
config, backend, subscriptions, balancer, etl, dosGuard, workQueue, counters, handlerProvider);
|
||||
backend, subscriptions, balancer, dosGuard, workQueue, counters, handlerProvider
|
||||
);
|
||||
|
||||
// Init the web server
|
||||
auto handler = std::make_shared<web::RPCServerHandler<rpc::RPCEngine, etl::ETLService>>(
|
||||
config, backend, rpcEngine, etl, subscriptions);
|
||||
config, backend, rpcEngine, etl, subscriptions
|
||||
);
|
||||
auto ctx = parseCerts(config);
|
||||
auto const ctxRef = ctx ? std::optional<std::reference_wrapper<ssl::context>>{ctx.value()} : std::nullopt;
|
||||
auto const httpServer = web::make_HttpServer(config, ioc, ctxRef, dosGuard, handler);
|
||||
@@ -221,8 +216,6 @@ try
|
||||
start(ioc, threads);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
} catch (std::exception const& e) {
|
||||
LOG(LogService::fatal()) << "Exit on exception: " << e.what();
|
||||
}
|
||||
|
||||
@@ -30,13 +30,12 @@ namespace rpc {
|
||||
/**
|
||||
* @brief Represents a list of amendments in the XRPL.
|
||||
*/
|
||||
struct Amendments
|
||||
{
|
||||
struct Amendments {
|
||||
/**
|
||||
* @param name The name of the amendment
|
||||
* @return The corresponding amendment Id
|
||||
*/
|
||||
static ripple::uint256 const
|
||||
static ripple::uint256
|
||||
GetAmendmentId(std::string_view const name)
|
||||
{
|
||||
return ripple::sha512Half(ripple::Slice(name.data(), name.size()));
|
||||
|
||||
@@ -29,8 +29,7 @@ namespace rpc {
|
||||
/**
|
||||
* @brief Represents an entry in the book_changes' changes array.
|
||||
*/
|
||||
struct BookChange
|
||||
{
|
||||
struct BookChange {
|
||||
ripple::STAmount sideAVolume;
|
||||
ripple::STAmount sideBVolume;
|
||||
ripple::STAmount highRate;
|
||||
@@ -42,8 +41,7 @@ struct BookChange
|
||||
/**
|
||||
* @brief Encapsulates the book_changes computations and transformations.
|
||||
*/
|
||||
class BookChanges final
|
||||
{
|
||||
class BookChanges final {
|
||||
public:
|
||||
BookChanges() = delete; // only accessed via static handle function
|
||||
|
||||
@@ -60,8 +58,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
class HandlerImpl final
|
||||
{
|
||||
class HandlerImpl final {
|
||||
std::map<std::string, BookChange> tally_ = {};
|
||||
std::optional<uint32_t> offerCancel_ = {};
|
||||
|
||||
@@ -78,7 +75,8 @@ private:
|
||||
std::make_move_iterator(std::begin(tally_)),
|
||||
std::make_move_iterator(std::end(tally_)),
|
||||
std::back_inserter(changes),
|
||||
[](auto obj) { return obj.second; });
|
||||
[](auto obj) { return obj.second; }
|
||||
);
|
||||
return changes;
|
||||
}
|
||||
|
||||
@@ -148,8 +146,7 @@ private:
|
||||
second = -second;
|
||||
|
||||
auto const key = noswap ? (g + '|' + p) : (p + '|' + g);
|
||||
if (tally_.contains(key))
|
||||
{
|
||||
if (tally_.contains(key)) {
|
||||
auto& entry = tally_.at(key);
|
||||
|
||||
entry.sideAVolume += first;
|
||||
@@ -162,9 +159,7 @@ private:
|
||||
entry.lowRate = rate;
|
||||
|
||||
entry.closeRate = rate;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
tally_[key] = {
|
||||
.sideAVolume = first,
|
||||
.sideBVolume = second,
|
||||
@@ -188,17 +183,17 @@ private:
|
||||
handleAffectedNode(node);
|
||||
}
|
||||
|
||||
std::optional<uint32_t>
|
||||
shouldCancelOffer(std::shared_ptr<ripple::STTx const> const& tx) const
|
||||
static std::optional<uint32_t>
|
||||
shouldCancelOffer(std::shared_ptr<ripple::STTx const> const& tx)
|
||||
{
|
||||
switch (tx->getFieldU16(ripple::sfTransactionType))
|
||||
{
|
||||
switch (tx->getFieldU16(ripple::sfTransactionType)) {
|
||||
// in future if any other ways emerge to cancel an offer
|
||||
// this switch makes them easy to add
|
||||
case ripple::ttOFFER_CANCEL:
|
||||
case ripple::ttOFFER_CREATE:
|
||||
if (tx->isFieldPresent(ripple::sfOfferSequence))
|
||||
return tx->getFieldU32(ripple::sfOfferSequence);
|
||||
[[fallthrough]];
|
||||
default:
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -241,7 +236,7 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, BookChange const
|
||||
* @param lgrInfo The ledger header
|
||||
* @param transactions The vector of transactions with heir metadata
|
||||
*/
|
||||
[[nodiscard]] boost::json::object const
|
||||
[[nodiscard]] boost::json::object
|
||||
computeBookChanges(ripple::LedgerHeader const& lgrInfo, std::vector<data::TransactionAndMetadata> const& transactions);
|
||||
|
||||
} // namespace rpc
|
||||
|
||||
@@ -23,78 +23,161 @@
|
||||
|
||||
namespace rpc {
|
||||
|
||||
using util::prometheus::Label;
|
||||
using util::prometheus::Labels;
|
||||
|
||||
Counters::MethodInfo::MethodInfo(std::string const& method)
|
||||
: started(PrometheusService::counterInt(
|
||||
"rpc_method_total_number",
|
||||
Labels{{{"status", "started"}, {"method", method}}},
|
||||
fmt::format("Total number of started calls to the method {}", method)
|
||||
))
|
||||
, finished(PrometheusService::counterInt(
|
||||
"rpc_method_total_number",
|
||||
Labels{{{"status", "finished"}, {"method", method}}},
|
||||
fmt::format("Total number of finished calls to the method {}", method)
|
||||
))
|
||||
, failed(PrometheusService::counterInt(
|
||||
"rpc_method_total_number",
|
||||
Labels{{{"status", "failed"}, {"method", method}}},
|
||||
fmt::format("Total number of failed calls to the method {}", method)
|
||||
))
|
||||
, errored(PrometheusService::counterInt(
|
||||
"rpc_method_total_number",
|
||||
Labels{{{"status", "errored"}, {"method", method}}},
|
||||
fmt::format("Total number of errored calls to the method {}", method)
|
||||
))
|
||||
, forwarded(PrometheusService::counterInt(
|
||||
"rpc_method_total_number",
|
||||
Labels{{{"status", "forwarded"}, {"method", method}}},
|
||||
fmt::format("Total number of forwarded calls to the method {}", method)
|
||||
))
|
||||
, failedForward(PrometheusService::counterInt(
|
||||
"rpc_method_total_number",
|
||||
Labels{{{"status", "failed_forward"}, {"method", method}}},
|
||||
fmt::format("Total number of failed forwarded calls to the method {}", method)
|
||||
))
|
||||
, duration(PrometheusService::counterInt(
|
||||
"rpc_method_duration_us",
|
||||
Labels({util::prometheus::Label{"method", method}}),
|
||||
fmt::format("Total duration of calls to the method {}", method)
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
Counters::MethodInfo&
|
||||
Counters::getMethodInfo(std::string const& method)
|
||||
{
|
||||
auto it = methodInfo_.find(method);
|
||||
if (it == methodInfo_.end()) {
|
||||
it = methodInfo_.emplace(method, MethodInfo(method)).first;
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
Counters::Counters(WorkQueue const& wq)
|
||||
: tooBusyCounter_(PrometheusService::counterInt(
|
||||
"rpc_error_total_number",
|
||||
Labels({Label{"error_type", "too_busy"}}),
|
||||
"Total number of too busy errors"
|
||||
))
|
||||
, notReadyCounter_(PrometheusService::counterInt(
|
||||
"rpc_error_total_number",
|
||||
Labels({Label{"error_type", "not_ready"}}),
|
||||
"Total number of not ready replyes"
|
||||
))
|
||||
, badSyntaxCounter_(PrometheusService::counterInt(
|
||||
"rpc_error_total_number",
|
||||
Labels({Label{"error_type", "bad_syntax"}}),
|
||||
"Total number of bad syntax replyes"
|
||||
))
|
||||
, unknownCommandCounter_(PrometheusService::counterInt(
|
||||
"rpc_error_total_number",
|
||||
Labels({Label{"error_type", "unknown_command"}}),
|
||||
"Total number of unknown command replyes"
|
||||
))
|
||||
, internalErrorCounter_(PrometheusService::counterInt(
|
||||
"rpc_error_total_number",
|
||||
Labels({Label{"error_type", "internal_error"}}),
|
||||
"Total number of internal errors"
|
||||
))
|
||||
, workQueue_(std::cref(wq))
|
||||
, startupTime_{std::chrono::system_clock::now()}
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
Counters::rpcFailed(std::string const& method)
|
||||
{
|
||||
std::scoped_lock lk(mutex_);
|
||||
MethodInfo& counters = methodInfo_[method];
|
||||
++counters.started;
|
||||
++counters.failed;
|
||||
std::scoped_lock const lk(mutex_);
|
||||
MethodInfo const& counters = getMethodInfo(method);
|
||||
++counters.started.get();
|
||||
++counters.failed.get();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::rpcErrored(std::string const& method)
|
||||
{
|
||||
std::scoped_lock lk(mutex_);
|
||||
MethodInfo& counters = methodInfo_[method];
|
||||
++counters.started;
|
||||
++counters.errored;
|
||||
std::scoped_lock const lk(mutex_);
|
||||
MethodInfo const& counters = getMethodInfo(method);
|
||||
++counters.started.get();
|
||||
++counters.errored.get();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::rpcComplete(std::string const& method, std::chrono::microseconds const& rpcDuration)
|
||||
{
|
||||
std::scoped_lock lk(mutex_);
|
||||
MethodInfo& counters = methodInfo_[method];
|
||||
++counters.started;
|
||||
++counters.finished;
|
||||
counters.duration += rpcDuration.count();
|
||||
std::scoped_lock const lk(mutex_);
|
||||
MethodInfo const& counters = getMethodInfo(method);
|
||||
++counters.started.get();
|
||||
++counters.finished.get();
|
||||
counters.duration.get() += rpcDuration.count();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::rpcForwarded(std::string const& method)
|
||||
{
|
||||
std::scoped_lock lk(mutex_);
|
||||
MethodInfo& counters = methodInfo_[method];
|
||||
++counters.forwarded;
|
||||
std::scoped_lock const lk(mutex_);
|
||||
MethodInfo const& counters = getMethodInfo(method);
|
||||
++counters.forwarded.get();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::rpcFailedToForward(std::string const& method)
|
||||
{
|
||||
std::scoped_lock lk(mutex_);
|
||||
MethodInfo& counters = methodInfo_[method];
|
||||
++counters.failedForward;
|
||||
std::scoped_lock const lk(mutex_);
|
||||
MethodInfo const& counters = getMethodInfo(method);
|
||||
++counters.failedForward.get();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::onTooBusy()
|
||||
{
|
||||
++tooBusyCounter_;
|
||||
++tooBusyCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::onNotReady()
|
||||
{
|
||||
++notReadyCounter_;
|
||||
++notReadyCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::onBadSyntax()
|
||||
{
|
||||
++badSyntaxCounter_;
|
||||
++badSyntaxCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::onUnknownCommand()
|
||||
{
|
||||
++unknownCommandCounter_;
|
||||
++unknownCommandCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
Counters::onInternalError()
|
||||
{
|
||||
++internalErrorCounter_;
|
||||
++internalErrorCounter_.get();
|
||||
}
|
||||
|
||||
std::chrono::seconds
|
||||
@@ -106,31 +189,30 @@ Counters::uptime() const
|
||||
boost::json::object
|
||||
Counters::report() const
|
||||
{
|
||||
std::scoped_lock lk(mutex_);
|
||||
std::scoped_lock const lk(mutex_);
|
||||
auto obj = boost::json::object{};
|
||||
|
||||
obj[JS(rpc)] = boost::json::object{};
|
||||
auto& rpc = obj[JS(rpc)].as_object();
|
||||
|
||||
for (auto const& [method, info] : methodInfo_)
|
||||
{
|
||||
for (auto const& [method, info] : methodInfo_) {
|
||||
auto counters = boost::json::object{};
|
||||
counters[JS(started)] = std::to_string(info.started);
|
||||
counters[JS(finished)] = std::to_string(info.finished);
|
||||
counters[JS(errored)] = std::to_string(info.errored);
|
||||
counters[JS(failed)] = std::to_string(info.failed);
|
||||
counters["forwarded"] = std::to_string(info.forwarded);
|
||||
counters["failed_forward"] = std::to_string(info.failedForward);
|
||||
counters[JS(duration_us)] = std::to_string(info.duration);
|
||||
counters[JS(started)] = std::to_string(info.started.get().value());
|
||||
counters[JS(finished)] = std::to_string(info.finished.get().value());
|
||||
counters[JS(errored)] = std::to_string(info.errored.get().value());
|
||||
counters[JS(failed)] = std::to_string(info.failed.get().value());
|
||||
counters["forwarded"] = std::to_string(info.forwarded.get().value());
|
||||
counters["failed_forward"] = std::to_string(info.failedForward.get().value());
|
||||
counters[JS(duration_us)] = std::to_string(info.duration.get().value());
|
||||
|
||||
rpc[method] = std::move(counters);
|
||||
}
|
||||
|
||||
obj["too_busy_errors"] = std::to_string(tooBusyCounter_);
|
||||
obj["not_ready_errors"] = std::to_string(notReadyCounter_);
|
||||
obj["bad_syntax_errors"] = std::to_string(badSyntaxCounter_);
|
||||
obj["unknown_command_errors"] = std::to_string(unknownCommandCounter_);
|
||||
obj["internal_errors"] = std::to_string(internalErrorCounter_);
|
||||
obj["too_busy_errors"] = std::to_string(tooBusyCounter_.get().value());
|
||||
obj["not_ready_errors"] = std::to_string(notReadyCounter_.get().value());
|
||||
obj["bad_syntax_errors"] = std::to_string(badSyntaxCounter_.get().value());
|
||||
obj["unknown_command_errors"] = std::to_string(unknownCommandCounter_.get().value());
|
||||
obj["internal_errors"] = std::to_string(internalErrorCounter_.get().value());
|
||||
|
||||
obj["work_queue"] = workQueue_.get().report();
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <rpc/WorkQueue.h>
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
@@ -33,33 +34,37 @@ namespace rpc {
|
||||
/**
|
||||
* @brief Holds information about successful, failed, forwarded, etc. RPC handler calls.
|
||||
*/
|
||||
class Counters
|
||||
{
|
||||
class Counters {
|
||||
using CounterType = std::reference_wrapper<util::prometheus::CounterInt>;
|
||||
/**
|
||||
* @brief All counters the system keeps track of for each RPC method.
|
||||
*/
|
||||
struct MethodInfo
|
||||
{
|
||||
std::uint64_t started = 0u;
|
||||
std::uint64_t finished = 0u;
|
||||
std::uint64_t failed = 0u;
|
||||
std::uint64_t errored = 0u;
|
||||
std::uint64_t forwarded = 0u;
|
||||
std::uint64_t failedForward = 0u;
|
||||
std::uint64_t duration = 0u;
|
||||
struct MethodInfo {
|
||||
MethodInfo(std::string const& method);
|
||||
|
||||
CounterType started;
|
||||
CounterType finished;
|
||||
CounterType failed;
|
||||
CounterType errored;
|
||||
CounterType forwarded;
|
||||
CounterType failedForward;
|
||||
CounterType duration;
|
||||
};
|
||||
|
||||
MethodInfo&
|
||||
getMethodInfo(std::string const& method);
|
||||
|
||||
mutable std::mutex mutex_;
|
||||
std::unordered_map<std::string, MethodInfo> methodInfo_;
|
||||
|
||||
// counters that don't carry RPC method information
|
||||
std::atomic_uint64_t tooBusyCounter_;
|
||||
std::atomic_uint64_t notReadyCounter_;
|
||||
std::atomic_uint64_t badSyntaxCounter_;
|
||||
std::atomic_uint64_t unknownCommandCounter_;
|
||||
std::atomic_uint64_t internalErrorCounter_;
|
||||
CounterType tooBusyCounter_;
|
||||
CounterType notReadyCounter_;
|
||||
CounterType badSyntaxCounter_;
|
||||
CounterType unknownCommandCounter_;
|
||||
CounterType internalErrorCounter_;
|
||||
|
||||
std::reference_wrapper<const WorkQueue> workQueue_;
|
||||
std::reference_wrapper<WorkQueue const> workQueue_;
|
||||
std::chrono::time_point<std::chrono::system_clock> startupTime_;
|
||||
|
||||
public:
|
||||
@@ -68,7 +73,7 @@ public:
|
||||
*
|
||||
* @param wq The work queue to operate on
|
||||
*/
|
||||
Counters(WorkQueue const& wq) : workQueue_(std::cref(wq)), startupTime_{std::chrono::system_clock::now()} {};
|
||||
Counters(WorkQueue const& wq);
|
||||
|
||||
/**
|
||||
* @brief A factory function that creates a new counters instance.
|
||||
|
||||
@@ -26,8 +26,7 @@ using namespace std;
|
||||
|
||||
namespace {
|
||||
template <class... Ts>
|
||||
struct overloadSet : Ts...
|
||||
{
|
||||
struct overloadSet : Ts... {
|
||||
using Ts::operator()...;
|
||||
};
|
||||
|
||||
@@ -44,9 +43,8 @@ getWarningInfo(WarningCode code)
|
||||
constexpr static WarningInfo infos[]{
|
||||
{warnUNKNOWN, "Unknown warning"},
|
||||
{warnRPC_CLIO,
|
||||
"This is a clio server. clio only serves validated data. If you "
|
||||
"want to talk to rippled, include 'ledger_index':'current' in your "
|
||||
"request"},
|
||||
"This is a clio server. clio only serves validated data. If you want to talk to rippled, include "
|
||||
"'ledger_index':'current' in your request"},
|
||||
{warnRPC_OUTDATED, "This server may be out of date"},
|
||||
{warnRPC_RATE_LIMIT, "You are about to be rate limited"},
|
||||
};
|
||||
@@ -143,11 +141,13 @@ makeError(Status const& status)
|
||||
return makeError(err, wrapOptional(status.error), wrapOptional(status.message));
|
||||
},
|
||||
},
|
||||
status.code);
|
||||
status.code
|
||||
);
|
||||
|
||||
if (status.extraInfo)
|
||||
if (status.extraInfo) {
|
||||
for (auto& [key, value] : status.extraInfo.value())
|
||||
res[key] = value;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
|
||||
namespace rpc {
|
||||
@@ -51,8 +52,7 @@ enum class ClioError {
|
||||
};
|
||||
|
||||
/** @brief Holds info about a particular @ref ClioError. */
|
||||
struct ClioErrorInfo
|
||||
{
|
||||
struct ClioErrorInfo {
|
||||
ClioError const code;
|
||||
std::string_view const error;
|
||||
std::string_view const message;
|
||||
@@ -70,11 +70,10 @@ using RippledError = ripple::error_code_i;
|
||||
using CombinedError = std::variant<RippledError, ClioError>;
|
||||
|
||||
/** @brief A status returned from any RPC handler. */
|
||||
struct Status
|
||||
{
|
||||
struct Status {
|
||||
CombinedError code = RippledError::rpcSUCCESS;
|
||||
std::string error = "";
|
||||
std::string message = "";
|
||||
std::string error;
|
||||
std::string message;
|
||||
std::optional<boost::json::object> extraInfo;
|
||||
|
||||
Status() = default;
|
||||
@@ -83,15 +82,16 @@ struct Status
|
||||
|
||||
// HACK. Some rippled handlers explicitly specify errors.
|
||||
// This means that we have to be able to duplicate this functionality.
|
||||
explicit Status(std::string const& message) : code(ripple::rpcUNKNOWN), message(message)
|
||||
explicit Status(std::string message) : code(ripple::rpcUNKNOWN), message(std::move(message))
|
||||
{
|
||||
}
|
||||
|
||||
Status(CombinedError code, std::string message) : code(code), message(message)
|
||||
Status(CombinedError code, std::string message) : code(code), message(std::move(message))
|
||||
{
|
||||
}
|
||||
|
||||
Status(CombinedError code, std::string error, std::string message) : code(code), error(error), message(message)
|
||||
Status(CombinedError code, std::string error, std::string message)
|
||||
: code(code), error(std::move(error)), message(std::move(message))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -139,8 +139,7 @@ struct Status
|
||||
enum WarningCode { warnUNKNOWN = -1, warnRPC_CLIO = 2001, warnRPC_OUTDATED = 2002, warnRPC_RATE_LIMIT = 2003 };
|
||||
|
||||
/** @brief Holds information about a clio warning. */
|
||||
struct WarningInfo
|
||||
{
|
||||
struct WarningInfo {
|
||||
constexpr WarningInfo() = default;
|
||||
constexpr WarningInfo(WarningCode code, char const* message) : code(code), message(message)
|
||||
{
|
||||
@@ -151,16 +150,15 @@ struct WarningInfo
|
||||
};
|
||||
|
||||
/** @brief Invalid parameters error. */
|
||||
class InvalidParamsError : public std::exception
|
||||
{
|
||||
class InvalidParamsError : public std::exception {
|
||||
std::string msg;
|
||||
|
||||
public:
|
||||
explicit InvalidParamsError(std::string const& msg) : msg(msg)
|
||||
explicit InvalidParamsError(std::string msg) : msg(std::move(msg))
|
||||
{
|
||||
}
|
||||
|
||||
const char*
|
||||
char const*
|
||||
what() const throw() override
|
||||
{
|
||||
return msg.c_str();
|
||||
@@ -168,16 +166,15 @@ public:
|
||||
};
|
||||
|
||||
/** @brief Account not found error. */
|
||||
class AccountNotFoundError : public std::exception
|
||||
{
|
||||
class AccountNotFoundError : public std::exception {
|
||||
std::string account;
|
||||
|
||||
public:
|
||||
explicit AccountNotFoundError(std::string const& acct) : account(acct)
|
||||
explicit AccountNotFoundError(std::string acct) : account(std::move(acct))
|
||||
{
|
||||
}
|
||||
|
||||
const char*
|
||||
char const*
|
||||
what() const throw() override
|
||||
{
|
||||
return account.c_str();
|
||||
@@ -233,7 +230,8 @@ boost::json::object
|
||||
makeError(
|
||||
RippledError err,
|
||||
std::optional<std::string_view> customError = std::nullopt,
|
||||
std::optional<std::string_view> customMessage = std::nullopt);
|
||||
std::optional<std::string_view> customMessage = std::nullopt
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Generate JSON from a @ref rpc::ClioError.
|
||||
@@ -245,6 +243,7 @@ boost::json::object
|
||||
makeError(
|
||||
ClioError err,
|
||||
std::optional<std::string_view> customError = std::nullopt,
|
||||
std::optional<std::string_view> customMessage = std::nullopt);
|
||||
std::optional<std::string_view> customMessage = std::nullopt
|
||||
);
|
||||
|
||||
} // namespace rpc
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user