mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-20 11:45:53 +00:00
Compare commits
128 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bacad9e49 | ||
|
|
ca16858878 | ||
|
|
feae85782c | ||
|
|
b016c1d7ba | ||
|
|
0597a9d685 | ||
|
|
05bea6a971 | ||
|
|
fa660ef400 | ||
|
|
25d9e3cc36 | ||
|
|
58f13e1660 | ||
|
|
a16b680a7a | ||
|
|
320ebaa5d2 | ||
|
|
058df4d12a | ||
|
|
5145d07693 | ||
|
|
5e9e5f6f65 | ||
|
|
1ce7bcbc28 | ||
|
|
243858df12 | ||
|
|
b363cc93af | ||
|
|
200d97f0de | ||
|
|
1ec5d3e5a3 | ||
|
|
e062121917 | ||
|
|
1aab2b94b1 | ||
|
|
5de87b9ef8 | ||
|
|
398db13f4d | ||
|
|
5e8ffb66b4 | ||
|
|
939740494b | ||
|
|
ff3d2b5600 | ||
|
|
7080b4d549 | ||
|
|
8d783ecd6a | ||
|
|
5e6682ddc7 | ||
|
|
fca29694a0 | ||
|
|
a541e6d00e | ||
|
|
9bd38dd290 | ||
|
|
f683b25f76 | ||
|
|
91ad1ffc3b | ||
|
|
64b4a908da | ||
|
|
ac752c656e | ||
|
|
4fe868aaeb | ||
|
|
59eb40a1f2 | ||
|
|
0b5f667e4a | ||
|
|
fa42c5c900 | ||
|
|
0818b6ce5b | ||
|
|
e2cc56d25a | ||
|
|
caaa01bf0f | ||
|
|
4b53bef1f5 | ||
|
|
69f5025a29 | ||
|
|
d1c41a8bb7 | ||
|
|
207ba51461 | ||
|
|
ebe7688ccb | ||
|
|
6d9f8a7ead | ||
|
|
6ca777ea96 | ||
|
|
963685dd31 | ||
|
|
e36545058d | ||
|
|
44527140f0 | ||
|
|
0eaaa1fb31 | ||
|
|
1846f629a5 | ||
|
|
83af5af3c6 | ||
|
|
418a0ddbf2 | ||
|
|
6cfbfda014 | ||
|
|
91648f98ad | ||
|
|
71e1637c5f | ||
|
|
59cd2ce5aa | ||
|
|
d783edd57a | ||
|
|
1ce8a58167 | ||
|
|
92e5c4792b | ||
|
|
d7f36733bc | ||
|
|
435d56e7c5 | ||
|
|
bf3b24867c | ||
|
|
ec70127050 | ||
|
|
547cb340bd | ||
|
|
c20b14494a | ||
|
|
696b1a585c | ||
|
|
23442ff1a7 | ||
|
|
db4046e02a | ||
|
|
fc1b5ae4da | ||
|
|
5411fd7497 | ||
|
|
f6488f7024 | ||
|
|
e3ada6c5da | ||
|
|
d61d702ccd | ||
|
|
4d42cb3cdb | ||
|
|
111b55b397 | ||
|
|
c90bc15959 | ||
|
|
1804e3e9c0 | ||
|
|
24f69acd9e | ||
|
|
98d0a963dc | ||
|
|
665890d410 | ||
|
|
545886561f | ||
|
|
68eec01dbc | ||
|
|
02621fe02e | ||
|
|
6ad72446d1 | ||
|
|
1d0a43669b | ||
|
|
71aabc8c29 | ||
|
|
6b98579bfb | ||
|
|
375ac2ffa6 | ||
|
|
c6ca650767 | ||
|
|
2336148d0d | ||
|
|
12178abf4d | ||
|
|
b8705ae086 | ||
|
|
b83d7478ef | ||
|
|
4fd6d51d21 | ||
|
|
d195bdb66d | ||
|
|
50dbb51627 | ||
|
|
2f369e175c | ||
|
|
47e03a7da3 | ||
|
|
d7b84a2e7a | ||
|
|
e79425bc21 | ||
|
|
7710468f37 | ||
|
|
210d7fdbc8 | ||
|
|
ba8e7188ca | ||
|
|
271323b0f4 | ||
|
|
7b306f3ba0 | ||
|
|
73805d44ad | ||
|
|
f19772907d | ||
|
|
616f0176c9 | ||
|
|
9f4f5d319e | ||
|
|
dcbc4577c2 | ||
|
|
f4d8e18bf7 | ||
|
|
b3e001ebfb | ||
|
|
524821c0b0 | ||
|
|
a292a607c2 | ||
|
|
81894c0a90 | ||
|
|
0a7def18cd | ||
|
|
1e969ba13b | ||
|
|
ef62718a27 | ||
|
|
aadd9e50f0 | ||
|
|
d9e89746a4 | ||
|
|
557ea5d7f6 | ||
|
|
4cc3b3ec0f | ||
|
|
a960471ef4 |
@@ -1,7 +1,7 @@
|
||||
---
|
||||
Language: Cpp
|
||||
AccessModifierOffset: -4
|
||||
AlignAfterOpenBracket: AlwaysBreak
|
||||
AlignAfterOpenBracket: BlockIndent
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
AlignEscapedNewlinesLeft: true
|
||||
@@ -18,20 +18,8 @@ AlwaysBreakBeforeMultilineStrings: true
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
BraceWrapping:
|
||||
AfterClass: true
|
||||
AfterControlStatement: true
|
||||
AfterEnum: false
|
||||
AfterFunction: true
|
||||
AfterNamespace: false
|
||||
AfterObjCDeclaration: true
|
||||
AfterStruct: true
|
||||
AfterUnion: true
|
||||
BeforeCatch: true
|
||||
BeforeElse: true
|
||||
IndentBraces: false
|
||||
BreakBeforeBinaryOperators: false
|
||||
BreakBeforeBraces: Custom
|
||||
BreakBeforeBraces: WebKit
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: true
|
||||
ColumnLimit: 120
|
||||
@@ -43,6 +31,7 @@ Cpp11BracedListStyle: true
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
FixNamespaceComments: true
|
||||
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
|
||||
IncludeCategories:
|
||||
- Regex: '^<(BeastConfig)'
|
||||
@@ -58,6 +47,8 @@ IndentCaseLabels: true
|
||||
IndentFunctionDeclarationAfterType: false
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
IndentRequiresClause: true
|
||||
RequiresClausePosition: OwnLine
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
NamespaceIndentation: None
|
||||
@@ -70,6 +61,7 @@ PenaltyBreakString: 1000
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 200
|
||||
PointerAlignment: Left
|
||||
QualifierAlignment: Right
|
||||
ReflowComments: true
|
||||
SortIncludes: true
|
||||
SpaceAfterCStyleCast: false
|
||||
|
||||
118
.clang-tidy
Normal file
118
.clang-tidy
Normal file
@@ -0,0 +1,118 @@
|
||||
---
|
||||
Checks: '-*,
|
||||
bugprone-argument-comment,
|
||||
bugprone-assert-side-effect,
|
||||
bugprone-bad-signal-to-kill-thread,
|
||||
bugprone-bool-pointer-implicit-conversion,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-dangling-handle,
|
||||
bugprone-dynamic-static-initializers,
|
||||
bugprone-fold-init-type,
|
||||
bugprone-forward-declaration-namespace,
|
||||
bugprone-inaccurate-erase,
|
||||
bugprone-incorrect-roundings,
|
||||
bugprone-infinite-loop,
|
||||
bugprone-integer-division,
|
||||
bugprone-lambda-function-name,
|
||||
bugprone-macro-parentheses,
|
||||
bugprone-macro-repeated-side-effects,
|
||||
bugprone-misplaced-operator-in-strlen-in-alloc,
|
||||
bugprone-misplaced-pointer-arithmetic-in-alloc,
|
||||
bugprone-misplaced-widening-cast,
|
||||
bugprone-move-forwarding-reference,
|
||||
bugprone-multiple-statement-macro,
|
||||
bugprone-no-escape,
|
||||
bugprone-parent-virtual-call,
|
||||
bugprone-posix-return,
|
||||
bugprone-redundant-branch-condition,
|
||||
bugprone-shared-ptr-array-mismatch,
|
||||
bugprone-signal-handler,
|
||||
bugprone-signed-char-misuse,
|
||||
bugprone-sizeof-container,
|
||||
bugprone-sizeof-expression,
|
||||
bugprone-spuriously-wake-up-functions,
|
||||
bugprone-standalone-empty,
|
||||
bugprone-string-constructor,
|
||||
bugprone-string-integer-assignment,
|
||||
bugprone-string-literal-with-embedded-nul,
|
||||
bugprone-stringview-nullptr,
|
||||
bugprone-suspicious-enum-usage,
|
||||
bugprone-suspicious-include,
|
||||
bugprone-suspicious-memory-comparison,
|
||||
bugprone-suspicious-memset-usage,
|
||||
bugprone-suspicious-missing-comma,
|
||||
bugprone-suspicious-realloc-usage,
|
||||
bugprone-suspicious-semicolon,
|
||||
bugprone-suspicious-string-compare,
|
||||
bugprone-swapped-arguments,
|
||||
bugprone-terminating-continue,
|
||||
bugprone-throw-keyword-missing,
|
||||
bugprone-too-small-loop-variable,
|
||||
bugprone-undefined-memory-manipulation,
|
||||
bugprone-undelegated-constructor,
|
||||
bugprone-unhandled-exception-at-new,
|
||||
bugprone-unhandled-self-assignment,
|
||||
bugprone-unused-raii,
|
||||
bugprone-unused-return-value,
|
||||
bugprone-use-after-move,
|
||||
bugprone-virtual-near-miss,
|
||||
cppcoreguidelines-init-variables,
|
||||
cppcoreguidelines-prefer-member-initializer,
|
||||
cppcoreguidelines-pro-type-member-init,
|
||||
cppcoreguidelines-pro-type-static-cast-downcast,
|
||||
cppcoreguidelines-virtual-class-destructor,
|
||||
llvm-namespace-comment,
|
||||
misc-const-correctness,
|
||||
misc-definitions-in-headers,
|
||||
misc-misplaced-const,
|
||||
misc-redundant-expression,
|
||||
misc-static-assert,
|
||||
misc-throw-by-value-catch-by-reference,
|
||||
misc-unused-alias-decls,
|
||||
misc-unused-using-decls,
|
||||
modernize-concat-nested-namespaces,
|
||||
modernize-deprecated-headers,
|
||||
modernize-make-shared,
|
||||
modernize-make-unique,
|
||||
modernize-pass-by-value,
|
||||
modernize-use-emplace,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-override,
|
||||
modernize-use-using,
|
||||
performance-faster-string-find,
|
||||
performance-for-range-copy,
|
||||
performance-implicit-conversion-in-loop,
|
||||
performance-inefficient-vector-operation,
|
||||
performance-move-const-arg,
|
||||
performance-move-constructor-init,
|
||||
performance-no-automatic-move,
|
||||
performance-trivially-destructible,
|
||||
readability-avoid-const-params-in-decls,
|
||||
readability-braces-around-statements,
|
||||
readability-const-return-type,
|
||||
readability-container-contains,
|
||||
readability-container-size-empty,
|
||||
readability-convert-member-functions-to-static,
|
||||
readability-duplicate-include,
|
||||
readability-else-after-return,
|
||||
readability-implicit-bool-conversion,
|
||||
readability-inconsistent-declaration-parameter-name,
|
||||
readability-make-member-function-const,
|
||||
readability-misleading-indentation,
|
||||
readability-non-const-parameter,
|
||||
readability-redundant-declaration,
|
||||
readability-redundant-member-init,
|
||||
readability-redundant-string-init,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-suspicious-call-argument
|
||||
'
|
||||
|
||||
CheckOptions:
|
||||
readability-braces-around-statements.ShortStatementLines: 2
|
||||
|
||||
HeaderFilterRegex: '^.*/(src|unitests)/.*\.(h|hpp)$'
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
@@ -4,7 +4,22 @@ exec 1>&2
|
||||
|
||||
# paths to check and re-format
|
||||
sources="src unittests"
|
||||
formatter="clang-format-11 -i"
|
||||
formatter="clang-format -i"
|
||||
version=$($formatter --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "16.0.0" > "$version" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 16 of `clang-format` is required.
|
||||
Your version is $version.
|
||||
Please fix paths and run again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 2
|
||||
fi
|
||||
|
||||
first=$(git diff $sources)
|
||||
find $sources -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
|
||||
|
||||
37
.github/actions/build_clio/action.yml
vendored
Normal file
37
.github/actions/build_clio/action.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
default: default
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build Clio
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
LINT: "${{ runner.os == 'Linux' && 'True' || 'False' }}"
|
||||
run: |
|
||||
mkdir -p build
|
||||
cd build
|
||||
threads_num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=Release -o clio:tests=True -o clio:lint=$LINT --profile ${{ inputs.conan_profile }}
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -G Ninja
|
||||
cmake --build . --parallel $threads_num
|
||||
27
.github/actions/clang_format/action.yml
vendored
Normal file
27
.github/actions/clang_format/action.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Check format
|
||||
description: Check format using clang-format-16
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Add llvm repo
|
||||
run: |
|
||||
echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main' | sudo tee -a /etc/apt/sources.list
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
shell: bash
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
sudo apt update -qq
|
||||
sudo apt install -y jq clang-format-16
|
||||
shell: bash
|
||||
|
||||
- name: Run formatter
|
||||
run: |
|
||||
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-16 -i
|
||||
shell: bash
|
||||
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
shell: bash
|
||||
run: |
|
||||
git diff --color --exit-code | tee "clang-format.patch"
|
||||
14
.github/actions/git_common_ancestor/action.yml
vendored
Normal file
14
.github/actions/git_common_ancestor/action.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Git common ancestor
|
||||
description: Find the closest common commit
|
||||
outputs:
|
||||
commit:
|
||||
description: Hash of commit
|
||||
value: ${{ steps.find_common_ancestor.outputs.commit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common git ancestor
|
||||
id: find_common_ancestor
|
||||
shell: bash
|
||||
run: |
|
||||
echo "commit=$(git merge-base --fork-point origin/develop)" >> $GITHUB_OUTPUT
|
||||
13
.github/actions/lint/action.yml
vendored
13
.github/actions/lint/action.yml
vendored
@@ -1,13 +0,0 @@
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Github's ubuntu-20.04 image already has clang-format-11 installed
|
||||
- run: |
|
||||
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i
|
||||
shell: bash
|
||||
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
shell: bash
|
||||
run: |
|
||||
git diff --color --exit-code | tee "clang-format.patch"
|
||||
50
.github/actions/restore_cache/action.yml
vendored
Normal file
50
.github/actions/restore_cache/action.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Restore cache
|
||||
description: Find and restores conan and ccache cache
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
outputs:
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
value: ${{ steps.conan_hash.outputs.hash }}
|
||||
conan_cache_hit:
|
||||
description: True if conan cache has been downloaded
|
||||
value: ${{ steps.conan_cache.outputs.cache-hit }}
|
||||
ccache_cache_hit:
|
||||
description: True if ccache cache has been downloaded
|
||||
value: ${{ steps.ccache_cache.outputs.cache-hit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Calculate conan hash
|
||||
id: conan_hash
|
||||
shell: bash
|
||||
run: |
|
||||
conan info . -j info.json -o clio:tests=True
|
||||
packages_info=$(cat info.json | jq '.[] | "\(.display_name): \(.id)"' | grep -v 'clio')
|
||||
echo "$packages_info"
|
||||
hash=$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)
|
||||
rm info.json
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore conan cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: conan_cache
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: ccache_cache
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
46
.github/actions/save_cache/action.yml
vendored
Normal file
46
.github/actions/save_cache/action.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Save cache
|
||||
description: Save conan and ccache cache for develop branch
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
ccache_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Cleanup conan directory from extra data
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
conan remove "*" -s -b -f
|
||||
|
||||
- name: Save conan cache
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-develop-${{ inputs.conan_hash }}
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
|
||||
|
||||
55
.github/actions/setup_conan/action.yml
vendored
Normal file
55
.github/actions/setup_conan/action.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Setup conan
|
||||
description: Setup conan profile and artifactory
|
||||
outputs:
|
||||
conan_profile:
|
||||
description: Created conan profile name
|
||||
value: ${{ steps.conan_export_output.outputs.conan_profile }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: On mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_PROFILE: clio_clang_14
|
||||
id: conan_setup_mac
|
||||
run: |
|
||||
echo "Creating $CONAN_PROFILE conan profile";
|
||||
clang_path="$(brew --prefix llvm@14)/bin/clang"
|
||||
clang_cxx_path="$(brew --prefix llvm@14)/bin/clang++"
|
||||
conan profile new $CONAN_PROFILE --detect --force
|
||||
conan profile update settings.compiler=clang $CONAN_PROFILE
|
||||
conan profile update settings.compiler.version=14 $CONAN_PROFILE
|
||||
conan profile update settings.compiler.cppstd=20 $CONAN_PROFILE
|
||||
conan profile update "conf.tools.build:compiler_executables={\"c\": \"$clang_path\", \"cpp\": \"$clang_cxx_path\"}" $CONAN_PROFILE
|
||||
conan profile update env.CC="$clang_path" $CONAN_PROFILE
|
||||
conan profile update env.CXX="$clang_cxx_path" $CONAN_PROFILE
|
||||
echo "created_conan_profile=$CONAN_PROFILE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: On linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
id: conan_setup_linux
|
||||
run: |
|
||||
conan profile new default --detect
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
echo "created_conan_profile=default" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
id: conan_export_output
|
||||
run: |
|
||||
echo "conan_profile=${{ steps.conan_setup_mac.outputs.created_conan_profile || steps.conan_setup_linux.outputs.created_conan_profile }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add conan-non-prod artifactory
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z $(conan remote list | grep conan-non-prod) ]]; then
|
||||
echo "Adding conan-non-prod"
|
||||
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
else
|
||||
echo "Conan-non-prod is available"
|
||||
fi
|
||||
|
||||
|
||||
295
.github/workflows/build.yml
vendored
295
.github/workflows/build.yml
vendored
@@ -1,9 +1,9 @@
|
||||
name: Build Clio
|
||||
on:
|
||||
push:
|
||||
branches: [master, release/*, develop, develop-next]
|
||||
branches: [master, release/*, develop]
|
||||
pull_request:
|
||||
branches: [master, release/*, develop, develop-next]
|
||||
branches: [master, release/*, develop]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
@@ -13,200 +13,147 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run clang-format
|
||||
uses: ./.github/actions/lint
|
||||
uses: ./.github/actions/clang_format
|
||||
|
||||
build_clio:
|
||||
name: Build Clio
|
||||
runs-on: [self-hosted, heavy]
|
||||
build_mac:
|
||||
name: Build macOS
|
||||
needs: lint
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- suffix: deb
|
||||
image: rippleci/clio-dpkg-builder:2022-09-17
|
||||
script: dpkg
|
||||
- suffix: rpm
|
||||
image: rippleci/clio-rpm-builder:2022-09-17
|
||||
script: rpm
|
||||
|
||||
container:
|
||||
image: ${{ matrix.type.image }}
|
||||
runs-on: [self-hosted, macOS]
|
||||
env:
|
||||
CCACHE_DIR: ${{ github.workspace }}/.ccache
|
||||
CONAN_USER_HOME: ${{ github.workspace }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Clone Clio packaging repo
|
||||
- name: Install packages
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_mac
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
|
||||
build_linux:
|
||||
name: Build linux
|
||||
needs: lint
|
||||
runs-on: [self-hosted, Linux]
|
||||
container:
|
||||
image: conanio/gcc11:1.61.0
|
||||
options: --user root
|
||||
env:
|
||||
CCACHE_DIR: /root/.ccache
|
||||
CONAN_USER_HOME: /root/
|
||||
steps:
|
||||
- name: Get Clio
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio-packages
|
||||
repository: XRPLF/clio-packages
|
||||
ref: main
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
- name: Add llvm repo
|
||||
run: |
|
||||
export CLIO_ROOT=$(realpath clio)
|
||||
if [ ${{ matrix.type.suffix }} == "rpm" ]; then
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
fi
|
||||
cmake -S clio-packages -B clio-packages/build -DCLIO_ROOT=$CLIO_ROOT
|
||||
cmake --build clio-packages/build --parallel $(nproc)
|
||||
cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests .
|
||||
mv ./clio-packages/build/*.${{ matrix.type.suffix }} .
|
||||
- name: Artifact packages
|
||||
echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-16 main' >> /etc/apt/sources.list
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
apt update -qq
|
||||
apt install -y jq clang-tidy-16
|
||||
|
||||
- name: Install ccache
|
||||
run: |
|
||||
wget https://github.com/ccache/ccache/releases/download/v4.8.3/ccache-4.8.3-linux-x86_64.tar.xz
|
||||
tar xf ./ccache-4.8.3-linux-x86_64.tar.xz
|
||||
mv ./ccache-4.8.3-linux-x86_64/ccache /usr/bin/ccache
|
||||
|
||||
- name: Fix git permissions
|
||||
run: git config --global --add safe.directory $PWD
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_${{ matrix.type.suffix }}_packages
|
||||
path: ${{ github.workspace }}/*.${{ matrix.type.suffix }}
|
||||
name: clio_tests_linux
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Artifact clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
name: clio_tests-${{ matrix.type.suffix }}
|
||||
path: ${{ github.workspace }}/clio_tests
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
|
||||
build_dev:
|
||||
name: Build on Mac/Clang14 and run tests
|
||||
needs: lint
|
||||
continue-on-error: false
|
||||
test_mac:
|
||||
needs: build_mac
|
||||
runs-on: [self-hosted, macOS]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: clio
|
||||
|
||||
- name: Check Boost cache
|
||||
id: boost
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: boost_1_77_0
|
||||
key: ${{ runner.os }}-boost
|
||||
|
||||
- name: Build Boost
|
||||
if: ${{ steps.boost.outputs.cache-hit != 'true' }}
|
||||
name: clio_tests_mac
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
rm -rf boost_1_77_0.tar.gz boost_1_77_0 # cleanup if needed first
|
||||
curl -s -fOJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
|
||||
tar zxf boost_1_77_0.tar.gz
|
||||
cd boost_1_77_0
|
||||
./bootstrap.sh
|
||||
./b2 define=BOOST_ASIO_HAS_STD_INVOKE_RESULT cxxflags="-std=c++20"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
brew install llvm@14 pkg-config protobuf openssl ninja cassandra-cpp-driver bison cmake
|
||||
|
||||
- name: Setup environment for llvm-14
|
||||
run: |
|
||||
export PATH="/usr/local/opt/llvm@14/bin:$PATH"
|
||||
export LDFLAGS="-L/usr/local/opt/llvm@14/lib -L/usr/local/opt/llvm@14/lib/c++ -Wl,-rpath,/usr/local/opt/llvm@14/lib/c++"
|
||||
export CPPFLAGS="-I/usr/local/opt/llvm@14/include"
|
||||
|
||||
- name: Build clio
|
||||
run: |
|
||||
export BOOST_ROOT=$(pwd)/boost_1_77_0
|
||||
cd clio
|
||||
cmake -B build -DCMAKE_C_COMPILER='/usr/local/opt/llvm@14/bin/clang' -DCMAKE_CXX_COMPILER='/usr/local/opt/llvm@14/bin/clang++'
|
||||
if ! cmake --build build -j; then
|
||||
echo '# 🔥🔥 MacOS AppleClang build failed!💥' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
- name: Run Test
|
||||
run: |
|
||||
cd clio/build
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
test_clio:
|
||||
name: Test Clio
|
||||
runs-on: [self-hosted, Linux]
|
||||
needs: build_clio
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
suffix: [rpm, deb]
|
||||
test_linux:
|
||||
needs: build_linux
|
||||
runs-on: [self-hosted, x-heavy]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get clio_tests artifact
|
||||
uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests-${{ matrix.suffix }}
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 10
|
||||
uses: ./.github/actions/test
|
||||
|
||||
code_coverage:
|
||||
name: Build on Linux and code coverage
|
||||
needs: lint
|
||||
continue-on-error: false
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: clio
|
||||
|
||||
- name: Check Boost cache
|
||||
id: boost
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: boost
|
||||
key: ${{ runner.os }}-boost
|
||||
|
||||
- name: Build boost
|
||||
if: steps.boost.outputs.cache-hit != 'true'
|
||||
name: clio_tests_linux
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
|
||||
tar zxf boost_1_77_0.tar.gz
|
||||
mv boost_1_77_0 boost
|
||||
cd boost
|
||||
./bootstrap.sh
|
||||
./b2
|
||||
- name: install deps
|
||||
run: |
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential doxygen bison flex autoconf clang-format gcovr
|
||||
- name: Build clio
|
||||
run: |
|
||||
export BOOST_ROOT=$(pwd)/boost
|
||||
cd clio
|
||||
cmake -B build -DCODE_COVERAGE=on -DTEST_PARAMETER='--gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"'
|
||||
if ! cmake --build build -j$(nproc); then
|
||||
echo '# 🔥Ubuntu build🔥 failed!💥' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
cd build
|
||||
make clio_tests-ccov
|
||||
- name: Code Coverage Summary Report
|
||||
uses: irongut/CodeCoverageSummary@v1.2.0
|
||||
with:
|
||||
filename: clio/build/clio_tests-gcc-cov/out.xml
|
||||
badge: true
|
||||
output: both
|
||||
format: markdown
|
||||
|
||||
- name: Save PR number and ccov report
|
||||
run: |
|
||||
mkdir -p ./UnitTestCoverage
|
||||
echo ${{ github.event.number }} > ./UnitTestCoverage/NR
|
||||
cp clio/build/clio_tests-gcc-cov/report.html ./UnitTestCoverage/report.html
|
||||
cp code-coverage-results.md ./UnitTestCoverage/out.md
|
||||
cat code-coverage-results.md > $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
files: clio/build/clio_tests-gcc-cov/out.xml
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: UnitTestCoverage
|
||||
path: UnitTestCoverage/
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: code_coverage_report
|
||||
path: clio/build/clio_tests-gcc-cov/out.xml
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,6 +1,9 @@
|
||||
*clio*.log
|
||||
build*/
|
||||
/build*/
|
||||
.build
|
||||
.cache
|
||||
.vscode
|
||||
.python-version
|
||||
CMakeUserPresets.json
|
||||
config.json
|
||||
src/main/impl/Build.cpp
|
||||
|
||||
5
CMake/Ccache.cmake
Normal file
5
CMake/Ccache.cmake
Normal file
@@ -0,0 +1,5 @@
|
||||
find_program (CCACHE_PATH "ccache")
|
||||
if (CCACHE_PATH)
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
|
||||
message (STATUS "Using ccache: ${CCACHE_PATH}")
|
||||
endif ()
|
||||
42
CMake/CheckCompiler.cmake
Normal file
42
CMake/CheckCompiler.cmake
Normal file
@@ -0,0 +1,42 @@
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
|
||||
message (FATAL_ERROR "Clang 14+ required for building clio")
|
||||
endif ()
|
||||
set (is_clang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
|
||||
message (FATAL_ERROR "AppleClang 14+ required for building clio")
|
||||
endif ()
|
||||
set (is_appleclang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
|
||||
message (FATAL_ERROR "GCC 11+ required for building clio")
|
||||
endif ()
|
||||
set (is_gcc TRUE)
|
||||
else ()
|
||||
message (FATAL_ERROR "Supported compilers: AppleClang 14+, Clang 14+, GCC 11+")
|
||||
endif ()
|
||||
|
||||
if (san)
|
||||
string (TOLOWER ${san} san)
|
||||
set (SAN_FLAG "-fsanitize=${san}")
|
||||
set (SAN_LIB "")
|
||||
if (is_gcc)
|
||||
if (san STREQUAL "address")
|
||||
set (SAN_LIB "asan")
|
||||
elseif (san STREQUAL "thread")
|
||||
set (SAN_LIB "tsan")
|
||||
elseif (san STREQUAL "memory")
|
||||
set (SAN_LIB "msan")
|
||||
elseif (san STREQUAL "undefined")
|
||||
set (SAN_LIB "ubsan")
|
||||
endif ()
|
||||
endif ()
|
||||
set (_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
||||
set (CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
||||
CHECK_CXX_COMPILER_FLAG (${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
||||
set (CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
||||
if (NOT COMPILER_SUPPORTS_SAN)
|
||||
message (FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
||||
endif ()
|
||||
endif ()
|
||||
31
CMake/ClangTidy.cmake
Normal file
31
CMake/ClangTidy.cmake
Normal file
@@ -0,0 +1,31 @@
|
||||
if (lint)
|
||||
|
||||
# Find clang-tidy binary
|
||||
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
|
||||
set (_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
|
||||
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
|
||||
message (FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
|
||||
endif ()
|
||||
message (STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||
else ()
|
||||
find_program (_CLANG_TIDY_BIN NAMES "clang-tidy-16" "clang-tidy" REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (NOT _CLANG_TIDY_BIN)
|
||||
message (FATAL_ERROR
|
||||
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy.")
|
||||
endif ()
|
||||
|
||||
# Support for https://github.com/matus-chochlik/ctcache
|
||||
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
|
||||
if (CLANG_TIDY_CACHE_PATH)
|
||||
set (_CLANG_TIDY_CMD
|
||||
"${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
|
||||
CACHE STRING "A combined command to run clang-tidy with caching wrapper")
|
||||
else ()
|
||||
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
|
||||
endif ()
|
||||
|
||||
set (CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
|
||||
message (STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
|
||||
endif ()
|
||||
@@ -2,32 +2,38 @@
|
||||
write version to source
|
||||
#]===================================================================]
|
||||
|
||||
find_package(Git REQUIRED)
|
||||
find_package (Git REQUIRED)
|
||||
|
||||
set(GIT_COMMAND rev-parse --short HEAD)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (GIT_COMMAND rev-parse --short HEAD)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
set(GIT_COMMAND branch --show-current)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (GIT_COMMAND branch --show-current)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(BRANCH STREQUAL "")
|
||||
set(BRANCH "dev")
|
||||
endif()
|
||||
if (BRANCH STREQUAL "")
|
||||
set (BRANCH "dev")
|
||||
endif ()
|
||||
|
||||
if(NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-ref>
|
||||
execute_process(COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set(VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
else()
|
||||
set(GIT_COMMAND describe --tags)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set(VERSION "${TAG_VERSION}-${REV}")
|
||||
endif()
|
||||
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-rev>
|
||||
execute_process (COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
else ()
|
||||
set (GIT_COMMAND describe --tags)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (VERSION "${TAG_VERSION}-${REV}")
|
||||
endif ()
|
||||
|
||||
if(CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
set(VERSION "${VERSION}+DEBUG")
|
||||
endif()
|
||||
if (CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
set (VERSION "${VERSION}+DEBUG")
|
||||
endif ()
|
||||
|
||||
message(STATUS "Build version: ${VERSION}")
|
||||
set(clio_version "${VERSION}")
|
||||
message (STATUS "Build version: ${VERSION}")
|
||||
set (clio_version "${VERSION}")
|
||||
|
||||
configure_file(CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)
|
||||
configure_file (CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)
|
||||
|
||||
@@ -1,42 +1,45 @@
|
||||
# call add_converage(module_name) to add coverage targets for the given module
|
||||
function(add_converage module)
|
||||
if("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang"
|
||||
# call add_coverage(module_name) to add coverage targets for the given module
|
||||
function (add_coverage module)
|
||||
if ("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang"
|
||||
OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
message("[Coverage] Building with llvm Code Coverage Tools")
|
||||
message ("[Coverage] Building with llvm Code Coverage Tools")
|
||||
# Using llvm gcov ; llvm install by xcode
|
||||
set(LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin)
|
||||
if(NOT EXISTS ${LLVM_COV_PATH}/llvm-cov)
|
||||
message(FATAL_ERROR "llvm-cov not found! Aborting.")
|
||||
endif()
|
||||
set (LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin)
|
||||
if (NOT EXISTS ${LLVM_COV_PATH}/llvm-cov)
|
||||
message (FATAL_ERROR "llvm-cov not found! Aborting.")
|
||||
endif ()
|
||||
|
||||
# set Flags
|
||||
target_compile_options(${module} PRIVATE -fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
target_link_options(${module} PUBLIC -fprofile-instr-generate
|
||||
target_compile_options (${module} PRIVATE
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_compile_options(clio PRIVATE -fprofile-instr-generate
|
||||
target_link_options (${module} PUBLIC
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
target_link_options(clio PUBLIC -fprofile-instr-generate
|
||||
|
||||
target_compile_options (clio PRIVATE
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
target_link_options (clio PUBLIC
|
||||
-fprofile-instr-generate
|
||||
-fcoverage-mapping)
|
||||
|
||||
# llvm-cov
|
||||
add_custom_target(
|
||||
${module}-ccov-preprocessing
|
||||
add_custom_target (${module}-ccov-preprocessing
|
||||
COMMAND LLVM_PROFILE_FILE=${module}.profraw $<TARGET_FILE:${module}>
|
||||
COMMAND ${LLVM_COV_PATH}/llvm-profdata merge -sparse ${module}.profraw -o
|
||||
${module}.profdata
|
||||
DEPENDS ${module})
|
||||
|
||||
add_custom_target(
|
||||
${module}-ccov-show
|
||||
add_custom_target (${module}-ccov-show
|
||||
COMMAND ${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata -show-line-counts-or-regions
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
# add summary for CI parse
|
||||
add_custom_target(
|
||||
${module}-ccov-report
|
||||
add_custom_target (${module}-ccov-report
|
||||
COMMAND
|
||||
${LLVM_COV_PATH}/llvm-cov report $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata
|
||||
@@ -45,8 +48,7 @@ function(add_converage module)
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
# exclude libs and unittests self
|
||||
add_custom_target(
|
||||
${module}-ccov
|
||||
add_custom_target (${module}-ccov
|
||||
COMMAND
|
||||
${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
|
||||
-instr-profile=${module}.profdata -show-line-counts-or-regions
|
||||
@@ -54,38 +56,36 @@ function(add_converage module)
|
||||
-ignore-filename-regex=".*_makefiles|.*unittests|.*_deps" > /dev/null 2>&1
|
||||
DEPENDS ${module}-ccov-preprocessing)
|
||||
|
||||
add_custom_command(
|
||||
add_custom_command (
|
||||
TARGET ${module}-ccov
|
||||
POST_BUILD
|
||||
COMMENT
|
||||
"Open ${module}-llvm-cov/index.html in your browser to view the coverage report."
|
||||
)
|
||||
elseif("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}"
|
||||
MATCHES "GNU")
|
||||
message("[Coverage] Building with Gcc Code Coverage Tools")
|
||||
elseif ("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
message ("[Coverage] Building with Gcc Code Coverage Tools")
|
||||
|
||||
find_program(GCOV_PATH gcov)
|
||||
if(NOT GCOV_PATH)
|
||||
message(FATAL_ERROR "gcov not found! Aborting...")
|
||||
endif() # NOT GCOV_PATH
|
||||
find_program(GCOVR_PATH gcovr)
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "gcovr not found! Aborting...")
|
||||
endif() # NOT GCOVR_PATH
|
||||
find_program (GCOV_PATH gcov)
|
||||
if (NOT GCOV_PATH)
|
||||
message (FATAL_ERROR "gcov not found! Aborting...")
|
||||
endif () # NOT GCOV_PATH
|
||||
find_program (GCOVR_PATH gcovr)
|
||||
if (NOT GCOVR_PATH)
|
||||
message (FATAL_ERROR "gcovr not found! Aborting...")
|
||||
endif () # NOT GCOVR_PATH
|
||||
|
||||
set(COV_OUTPUT_PATH ${module}-gcc-cov)
|
||||
target_compile_options(${module} PRIVATE -fprofile-arcs -ftest-coverage
|
||||
set (COV_OUTPUT_PATH ${module}-gcc-cov)
|
||||
target_compile_options (${module} PRIVATE -fprofile-arcs -ftest-coverage
|
||||
-fPIC)
|
||||
target_link_libraries(${module} PRIVATE gcov)
|
||||
target_link_libraries (${module} PRIVATE gcov)
|
||||
|
||||
target_compile_options(clio PRIVATE -fprofile-arcs -ftest-coverage
|
||||
target_compile_options (clio PRIVATE -fprofile-arcs -ftest-coverage
|
||||
-fPIC)
|
||||
target_link_libraries(clio PRIVATE gcov)
|
||||
target_link_libraries (clio PRIVATE gcov)
|
||||
# this target is used for CI as well generate the summary out.xml will send
|
||||
# to github action to generate markdown, we can paste it to comments or
|
||||
# readme
|
||||
add_custom_target(
|
||||
${module}-ccov
|
||||
add_custom_target (${module}-ccov
|
||||
COMMAND ${module} ${TEST_PARAMETER}
|
||||
COMMAND rm -rf ${COV_OUTPUT_PATH}
|
||||
COMMAND mkdir ${COV_OUTPUT_PATH}
|
||||
@@ -102,8 +102,7 @@ function(add_converage module)
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report.")
|
||||
|
||||
# generate the detail report
|
||||
add_custom_target(
|
||||
${module}-ccov-report
|
||||
add_custom_target (${module}-ccov-report
|
||||
COMMAND ${module} ${TEST_PARAMETER}
|
||||
COMMAND rm -rf ${COV_OUTPUT_PATH}
|
||||
COMMAND mkdir ${COV_OUTPUT_PATH}
|
||||
@@ -114,13 +113,13 @@ function(add_converage module)
|
||||
--exclude='${PROJECT_BINARY_DIR}/'
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMENT "Running gcovr to produce Cobertura code coverage report.")
|
||||
add_custom_command(
|
||||
add_custom_command (
|
||||
TARGET ${module}-ccov-report
|
||||
POST_BUILD
|
||||
COMMENT
|
||||
"Open ${COV_OUTPUT_PATH}/index.html in your browser to view the coverage report."
|
||||
)
|
||||
else()
|
||||
message(FATAL_ERROR "Complier not support yet")
|
||||
endif()
|
||||
endfunction()
|
||||
else ()
|
||||
message (FATAL_ERROR "Complier not support yet")
|
||||
endif ()
|
||||
endfunction ()
|
||||
11
CMake/Docs.cmake
Normal file
11
CMake/Docs.cmake
Normal file
@@ -0,0 +1,11 @@
|
||||
find_package (Doxygen REQUIRED)
|
||||
|
||||
set (DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile)
|
||||
set (DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
|
||||
|
||||
configure_file (${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
||||
add_custom_target (docs
|
||||
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Generating API documentation with Doxygen"
|
||||
VERBATIM)
|
||||
45
CMake/Settings.cmake
Normal file
45
CMake/Settings.cmake
Normal file
@@ -0,0 +1,45 @@
|
||||
set(COMPILER_FLAGS
|
||||
-Wall
|
||||
-Wcast-align
|
||||
-Wdouble-promotion
|
||||
-Wextra
|
||||
-Werror
|
||||
-Wformat=2
|
||||
-Wimplicit-fallthrough
|
||||
-Wmisleading-indentation
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else
|
||||
-Wno-unused-but-set-variable
|
||||
-Wnon-virtual-dtor
|
||||
-Wnull-dereference
|
||||
-Wold-style-cast
|
||||
-pedantic
|
||||
-Wpedantic
|
||||
-Wunused
|
||||
)
|
||||
|
||||
if (is_gcc AND NOT lint)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wduplicated-branches
|
||||
-Wduplicated-cond
|
||||
-Wlogical-op
|
||||
-Wuseless-cast
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (is_clang)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (is_appleclang)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wreorder-init-list
|
||||
)
|
||||
endif ()
|
||||
|
||||
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for the flags description
|
||||
|
||||
target_compile_options (clio PUBLIC ${COMPILER_FLAGS})
|
||||
11
CMake/SourceLocation.cmake
Normal file
11
CMake/SourceLocation.cmake
Normal file
@@ -0,0 +1,11 @@
|
||||
include (CheckIncludeFileCXX)
|
||||
|
||||
check_include_file_cxx ("source_location" SOURCE_LOCATION_AVAILABLE)
|
||||
if (SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions (clio PUBLIC "HAS_SOURCE_LOCATION")
|
||||
endif ()
|
||||
|
||||
check_include_file_cxx ("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
if (EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions (clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
||||
endif ()
|
||||
@@ -1,6 +1,11 @@
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
set(Boost_USE_STATIC_RUNTIME ON)
|
||||
set (Boost_USE_STATIC_LIBS ON)
|
||||
set (Boost_USE_STATIC_RUNTIME ON)
|
||||
|
||||
find_package(Boost 1.75 COMPONENTS filesystem log_setup log thread system REQUIRED)
|
||||
|
||||
target_link_libraries(clio PUBLIC ${Boost_LIBRARIES})
|
||||
find_package (Boost 1.82 REQUIRED
|
||||
COMPONENTS
|
||||
program_options
|
||||
coroutine
|
||||
system
|
||||
log
|
||||
log_setup
|
||||
)
|
||||
|
||||
5
CMake/deps/OpenSSL.cmake
Normal file
5
CMake/deps/OpenSSL.cmake
Normal file
@@ -0,0 +1,5 @@
|
||||
find_package (OpenSSL 1.1.1 REQUIRED)
|
||||
|
||||
set_target_properties (OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
From 5cd9d09d960fa489a0c4379880cd7615b1c16e55 Mon Sep 17 00:00:00 2001
|
||||
From: CJ Cobb <ccobb@ripple.com>
|
||||
Date: Wed, 10 Aug 2022 12:30:01 -0400
|
||||
Subject: [PATCH] Remove bitset operator !=
|
||||
|
||||
---
|
||||
src/ripple/protocol/Feature.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h
|
||||
index b3ecb099b..6424be411 100644
|
||||
--- a/src/ripple/protocol/Feature.h
|
||||
+++ b/src/ripple/protocol/Feature.h
|
||||
@@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset<detail::numFeatures>
|
||||
public:
|
||||
using base::bitset;
|
||||
using base::operator==;
|
||||
- using base::operator!=;
|
||||
|
||||
using base::all;
|
||||
using base::any;
|
||||
--
|
||||
2.32.0
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
include(CheckIncludeFileCXX)
|
||||
|
||||
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
|
||||
if(SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions(clio PUBLIC "HAS_SOURCE_LOCATION")
|
||||
endif()
|
||||
|
||||
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
if(EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions(clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
||||
endif()
|
||||
2
CMake/deps/Threads.cmake
Normal file
2
CMake/deps/Threads.cmake
Normal file
@@ -0,0 +1,2 @@
|
||||
set (THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package (Threads)
|
||||
@@ -1,153 +1 @@
|
||||
find_package(ZLIB REQUIRED)
|
||||
|
||||
find_library(cassandra NAMES cassandra)
|
||||
if(NOT cassandra)
|
||||
message("System installed Cassandra cpp driver not found. Will build")
|
||||
find_library(zlib NAMES zlib1g-dev zlib-devel zlib z)
|
||||
if(NOT zlib)
|
||||
message("zlib not found. will build")
|
||||
add_library(zlib STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(zlib_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/madler/zlib.git
|
||||
GIT_TAG v1.2.12
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
||||
)
|
||||
ExternalProject_Get_Property (zlib_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (zlib_src BINARY_DIR)
|
||||
set (zlib_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${zlib_src_SOURCE_DIR}/include)
|
||||
set_target_properties (zlib PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(zlib zlib_src)
|
||||
file(TO_CMAKE_PATH "${zlib_src_SOURCE_DIR}" zlib_src_SOURCE_DIR)
|
||||
endif()
|
||||
find_library(krb5 NAMES krb5-dev libkrb5-dev)
|
||||
if(NOT krb5)
|
||||
message("krb5 not found. will build")
|
||||
add_library(krb5 STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(krb5_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/krb5/krb5.git
|
||||
GIT_TAG krb5-1.20
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <SOURCE_DIR>/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
|
||||
)
|
||||
message(${ep_lib_prefix}/krb5.a)
|
||||
message(${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a)
|
||||
ExternalProject_Get_Property (krb5_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (krb5_src BINARY_DIR)
|
||||
set (krb5_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${krb5_src_SOURCE_DIR}/include)
|
||||
set_target_properties (krb5 PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${SOURCE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(krb5 krb5_src)
|
||||
file(TO_CMAKE_PATH "${krb5_src_SOURCE_DIR}" krb5_src_SOURCE_DIR)
|
||||
endif()
|
||||
|
||||
|
||||
find_library(libuv1 NAMES uv1 libuv1 liubuv1-dev libuv1:amd64)
|
||||
|
||||
|
||||
if(NOT libuv1)
|
||||
message("libuv1 not found, will build")
|
||||
add_library(libuv1 STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(libuv_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||
GIT_TAG v1.44.1
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (libuv_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (libuv_src BINARY_DIR)
|
||||
set (libuv_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${libuv_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (libuv1 PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(libuv1 libuv_src)
|
||||
|
||||
file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR)
|
||||
endif()
|
||||
add_library (cassandra STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(cassandra_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
|
||||
GIT_TAG 2.16.2
|
||||
CMAKE_ARGS
|
||||
-DLIBUV_ROOT_DIR=${BINARY_DIR}
|
||||
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
|
||||
-DCASS_BUILD_STATIC=ON
|
||||
-DCASS_BUILD_SHARED=OFF
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (cassandra_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (cassandra_src BINARY_DIR)
|
||||
set (cassandra_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${cassandra_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (cassandra PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
message("cass dirs")
|
||||
message(${BINARY_DIR})
|
||||
message(${SOURCE_DIR})
|
||||
message(${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a)
|
||||
add_dependencies(cassandra cassandra_src)
|
||||
|
||||
if(NOT libuv1)
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build libuv1)
|
||||
target_link_libraries(cassandra INTERFACE libuv1)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${libuv1})
|
||||
endif()
|
||||
if(NOT krb5)
|
||||
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build krb5)
|
||||
target_link_libraries(cassandra INTERFACE krb5)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${krb5})
|
||||
endif()
|
||||
|
||||
if(NOT zlib)
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build zlib)
|
||||
target_link_libraries(cassandra INTERFACE zlib)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${zlib})
|
||||
endif()
|
||||
set(OPENSSL_USE_STATIC_LIBS TRUE)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
target_link_libraries(cassandra INTERFACE OpenSSL::SSL)
|
||||
|
||||
file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR)
|
||||
target_link_libraries(clio PUBLIC cassandra)
|
||||
else()
|
||||
message("Found system installed cassandra cpp driver")
|
||||
message(${cassandra})
|
||||
find_path(cassandra_includes NAMES cassandra.h REQUIRED)
|
||||
message(${cassandra_includes})
|
||||
get_filename_component(CASSANDRA_HEADER ${cassandra_includes}/cassandra.h REALPATH)
|
||||
get_filename_component(CASSANDRA_HEADER_DIR ${CASSANDRA_HEADER} DIRECTORY)
|
||||
target_link_libraries (clio PUBLIC ${cassandra})
|
||||
target_include_directories(clio PUBLIC ${CASSANDRA_HEADER_DIR})
|
||||
endif()
|
||||
find_package (cassandra-cpp-driver REQUIRED)
|
||||
|
||||
@@ -1,22 +1,4 @@
|
||||
FetchContent_Declare(
|
||||
googletest
|
||||
URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip
|
||||
)
|
||||
find_package (GTest REQUIRED)
|
||||
|
||||
FetchContent_GetProperties(googletest)
|
||||
|
||||
if(NOT googletest_POPULATED)
|
||||
FetchContent_Populate(googletest)
|
||||
add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio_tests PUBLIC clio gmock_main)
|
||||
target_include_directories(clio_tests PRIVATE unittests)
|
||||
|
||||
enable_testing()
|
||||
|
||||
include(GoogleTest)
|
||||
|
||||
#increase timeout for tests discovery to 10 seconds, by default it is 5s. As more unittests added, we start to hit this issue
|
||||
#https://github.com/google/googletest/issues/3475
|
||||
gtest_discover_tests(clio_tests DISCOVERY_TIMEOUT 10)
|
||||
enable_testing ()
|
||||
include (GoogleTest)
|
||||
|
||||
@@ -1,14 +1 @@
|
||||
FetchContent_Declare(
|
||||
libfmt
|
||||
URL https://github.com/fmtlib/fmt/releases/download/9.1.0/fmt-9.1.0.zip
|
||||
)
|
||||
|
||||
FetchContent_GetProperties(libfmt)
|
||||
|
||||
if(NOT libfmt_POPULATED)
|
||||
FetchContent_Populate(libfmt)
|
||||
add_subdirectory(${libfmt_SOURCE_DIR} ${libfmt_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio PUBLIC fmt)
|
||||
|
||||
find_package (fmt REQUIRED)
|
||||
|
||||
1
CMake/deps/libxrpl.cmake
Normal file
1
CMake/deps/libxrpl.cmake
Normal file
@@ -0,0 +1 @@
|
||||
find_package (xrpl REQUIRED)
|
||||
@@ -1,20 +0,0 @@
|
||||
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
|
||||
set(RIPPLED_BRANCH "1.9.2")
|
||||
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
|
||||
set(patch_command ! grep operator!= src/ripple/protocol/Feature.h || git apply < ${CMAKE_CURRENT_SOURCE_DIR}/CMake/deps/Remove-bitset-operator.patch)
|
||||
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
|
||||
FetchContent_Declare(rippled
|
||||
GIT_REPOSITORY "${RIPPLED_REPO}"
|
||||
GIT_TAG "${RIPPLED_BRANCH}"
|
||||
GIT_SHALLOW ON
|
||||
PATCH_COMMAND "${patch_command}"
|
||||
)
|
||||
|
||||
FetchContent_GetProperties(rippled)
|
||||
if(NOT rippled_POPULATED)
|
||||
FetchContent_Populate(rippled)
|
||||
add_subdirectory(${rippled_SOURCE_DIR} ${rippled_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio PUBLIC xrpl_core grpc_pbufs)
|
||||
target_include_directories(clio PUBLIC ${rippled_SOURCE_DIR}/src ) # TODO: Seems like this shouldn't be needed?
|
||||
@@ -1,16 +1,14 @@
|
||||
set(CLIO_INSTALL_DIR "/opt/clio")
|
||||
set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
||||
set (CLIO_INSTALL_DIR "/opt/clio")
|
||||
set (CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
||||
|
||||
install(TARGETS clio_server DESTINATION bin)
|
||||
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
|
||||
install (TARGETS clio_server DESTINATION bin)
|
||||
|
||||
#install(FILES example-config.json DESTINATION etc RENAME config.json)
|
||||
file(READ example-config.json config)
|
||||
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
|
||||
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
|
||||
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
|
||||
file (READ example-config.json config)
|
||||
string (REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
|
||||
file (WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
|
||||
install (FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
|
||||
|
||||
configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
||||
configure_file ("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
||||
|
||||
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||
install (FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
target_compile_options(clio
|
||||
PUBLIC -Wall
|
||||
-Werror
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else)
|
||||
332
CMakeLists.txt
332
CMakeLists.txt
@@ -1,65 +1,103 @@
|
||||
cmake_minimum_required(VERSION 3.16.3)
|
||||
|
||||
project(clio)
|
||||
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
|
||||
message(FATAL_ERROR "GCC 11+ required for building clio")
|
||||
endif()
|
||||
# ========================================================================== #
|
||||
# Options #
|
||||
# ========================================================================== #
|
||||
option (verbose "Verbose build" FALSE)
|
||||
option (tests "Build tests" FALSE)
|
||||
option (docs "Generate doxygen docs" FALSE)
|
||||
option (coverage "Build test coverage report" FALSE)
|
||||
option (packaging "Create distribution packages" FALSE)
|
||||
option (lint "Run clang-tidy checks during compilation" FALSE)
|
||||
# ========================================================================== #
|
||||
set (san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set (CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
# ========================================================================== #
|
||||
|
||||
option(BUILD_TESTS "Build tests" TRUE)
|
||||
# Include required modules
|
||||
include (CMake/Ccache.cmake)
|
||||
include (CheckCXXCompilerFlag)
|
||||
include (CMake/ClangTidy.cmake)
|
||||
|
||||
option(VERBOSE "Verbose build" TRUE)
|
||||
if(VERBOSE)
|
||||
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
|
||||
endif()
|
||||
if (verbose)
|
||||
set (CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
endif ()
|
||||
|
||||
if (packaging)
|
||||
add_definitions (-DPKG=1)
|
||||
endif ()
|
||||
|
||||
if(PACKAGING)
|
||||
add_definitions(-DPKG=1)
|
||||
endif()
|
||||
add_library (clio)
|
||||
|
||||
#c++20 removed std::result_of but boost 1.75 is still using it.
|
||||
add_definitions(-DBOOST_ASIO_HAS_STD_INVOKE_RESULT=1)
|
||||
# Clio tweaks and checks
|
||||
include (CMake/CheckCompiler.cmake)
|
||||
include (CMake/Settings.cmake)
|
||||
include (CMake/ClioVersion.cmake)
|
||||
include (CMake/SourceLocation.cmake)
|
||||
|
||||
add_library(clio)
|
||||
target_compile_features(clio PUBLIC cxx_std_20)
|
||||
target_include_directories(clio PUBLIC src)
|
||||
# Clio deps
|
||||
include (CMake/deps/libxrpl.cmake)
|
||||
include (CMake/deps/Boost.cmake)
|
||||
include (CMake/deps/OpenSSL.cmake)
|
||||
include (CMake/deps/Threads.cmake)
|
||||
include (CMake/deps/libfmt.cmake)
|
||||
include (CMake/deps/cassandra.cmake)
|
||||
|
||||
include(FetchContent)
|
||||
include(ExternalProject)
|
||||
include(CMake/settings.cmake)
|
||||
include(CMake/ClioVersion.cmake)
|
||||
include(CMake/deps/rippled.cmake)
|
||||
include(CMake/deps/libfmt.cmake)
|
||||
include(CMake/deps/Boost.cmake)
|
||||
include(CMake/deps/cassandra.cmake)
|
||||
include(CMake/deps/SourceLocation.cmake)
|
||||
# TODO: Include directory will be wrong when installed.
|
||||
target_include_directories (clio PUBLIC src)
|
||||
target_compile_features (clio PUBLIC cxx_std_20)
|
||||
|
||||
target_sources(clio PRIVATE
|
||||
target_link_libraries (clio
|
||||
PUBLIC Boost::boost
|
||||
PUBLIC Boost::coroutine
|
||||
PUBLIC Boost::program_options
|
||||
PUBLIC Boost::system
|
||||
PUBLIC Boost::log
|
||||
PUBLIC Boost::log_setup
|
||||
PUBLIC cassandra-cpp-driver::cassandra-cpp-driver
|
||||
PUBLIC fmt::fmt
|
||||
PUBLIC OpenSSL::Crypto
|
||||
PUBLIC OpenSSL::SSL
|
||||
PUBLIC xrpl::libxrpl
|
||||
|
||||
INTERFACE Threads::Threads
|
||||
)
|
||||
|
||||
if (is_gcc)
|
||||
# FIXME: needed on gcc for now
|
||||
target_compile_definitions (clio PUBLIC BOOST_ASIO_DISABLE_CONCEPTS)
|
||||
endif ()
|
||||
|
||||
target_sources (clio PRIVATE
|
||||
## Main
|
||||
src/main/impl/Build.cpp
|
||||
## Backend
|
||||
src/backend/BackendInterface.cpp
|
||||
src/backend/LedgerCache.cpp
|
||||
## NextGen Backend
|
||||
src/backend/cassandra/impl/Future.cpp
|
||||
src/backend/cassandra/impl/Cluster.cpp
|
||||
src/backend/cassandra/impl/Batch.cpp
|
||||
src/backend/cassandra/impl/Result.cpp
|
||||
src/backend/cassandra/impl/Tuple.cpp
|
||||
src/backend/cassandra/impl/SslContext.cpp
|
||||
src/backend/cassandra/Handle.cpp
|
||||
src/backend/cassandra/SettingsProvider.cpp
|
||||
src/data/BackendCounters.cpp
|
||||
src/data/BackendInterface.cpp
|
||||
src/data/LedgerCache.cpp
|
||||
src/data/cassandra/impl/Future.cpp
|
||||
src/data/cassandra/impl/Cluster.cpp
|
||||
src/data/cassandra/impl/Batch.cpp
|
||||
src/data/cassandra/impl/Result.cpp
|
||||
src/data/cassandra/impl/Tuple.cpp
|
||||
src/data/cassandra/impl/SslContext.cpp
|
||||
src/data/cassandra/Handle.cpp
|
||||
src/data/cassandra/SettingsProvider.cpp
|
||||
## ETL
|
||||
src/etl/Source.cpp
|
||||
src/etl/ProbingSource.cpp
|
||||
src/etl/NFTHelpers.cpp
|
||||
src/etl/ETLService.cpp
|
||||
src/etl/ETLState.cpp
|
||||
src/etl/LoadBalancer.cpp
|
||||
src/etl/impl/ForwardCache.cpp
|
||||
## Subscriptions
|
||||
src/subscriptions/SubscriptionManager.cpp
|
||||
## Feed
|
||||
src/feed/SubscriptionManager.cpp
|
||||
## Web
|
||||
src/web/impl/AdminVerificationStrategy.cpp
|
||||
src/web/IntervalSweepHandler.cpp
|
||||
## RPC
|
||||
src/rpc/Errors.cpp
|
||||
src/rpc/Factories.cpp
|
||||
@@ -68,9 +106,10 @@ target_sources(clio PRIVATE
|
||||
src/rpc/WorkQueue.cpp
|
||||
src/rpc/common/Specs.cpp
|
||||
src/rpc/common/Validators.cpp
|
||||
# RPC impl
|
||||
src/rpc/common/MetaProcessors.cpp
|
||||
src/rpc/common/impl/APIVersionParser.cpp
|
||||
src/rpc/common/impl/HandlerProvider.cpp
|
||||
## RPC handler
|
||||
## RPC handlers
|
||||
src/rpc/handlers/AccountChannels.cpp
|
||||
src/rpc/handlers/AccountCurrencies.cpp
|
||||
src/rpc/handlers/AccountInfo.cpp
|
||||
@@ -81,11 +120,13 @@ target_sources(clio PRIVATE
|
||||
src/rpc/handlers/AccountTx.cpp
|
||||
src/rpc/handlers/BookChanges.cpp
|
||||
src/rpc/handlers/BookOffers.cpp
|
||||
src/rpc/handlers/DepositAuthorized.cpp
|
||||
src/rpc/handlers/GatewayBalances.cpp
|
||||
src/rpc/handlers/Ledger.cpp
|
||||
src/rpc/handlers/LedgerData.cpp
|
||||
src/rpc/handlers/LedgerEntry.cpp
|
||||
src/rpc/handlers/LedgerRange.cpp
|
||||
src/rpc/handlers/NFTsByIssuer.cpp
|
||||
src/rpc/handlers/NFTBuyOffers.cpp
|
||||
src/rpc/handlers/NFTHistory.cpp
|
||||
src/rpc/handlers/NFTInfo.cpp
|
||||
@@ -94,90 +135,157 @@ target_sources(clio PRIVATE
|
||||
src/rpc/handlers/NoRippleCheck.cpp
|
||||
src/rpc/handlers/Random.cpp
|
||||
src/rpc/handlers/TransactionEntry.cpp
|
||||
src/rpc/handlers/Tx.cpp
|
||||
## Util
|
||||
src/config/Config.cpp
|
||||
src/log/Logger.cpp
|
||||
src/util/config/Config.cpp
|
||||
src/util/log/Logger.cpp
|
||||
src/util/prometheus/Http.cpp
|
||||
src/util/prometheus/Label.cpp
|
||||
src/util/prometheus/Metrics.cpp
|
||||
src/util/prometheus/Prometheus.cpp
|
||||
src/util/Random.cpp
|
||||
src/util/Taggable.cpp)
|
||||
|
||||
add_executable(clio_server src/main/main.cpp)
|
||||
target_link_libraries(clio_server PUBLIC clio)
|
||||
# Clio server
|
||||
add_executable (clio_server src/main/Main.cpp)
|
||||
target_link_libraries (clio_server PRIVATE clio)
|
||||
target_link_options(clio_server
|
||||
PRIVATE
|
||||
$<$<AND:$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:-static-libstdc++ -static-libgcc>
|
||||
)
|
||||
|
||||
if(BUILD_TESTS)
|
||||
set(TEST_TARGET clio_tests)
|
||||
add_executable(${TEST_TARGET}
|
||||
# Unittesting
|
||||
if (tests)
|
||||
set (TEST_TARGET clio_tests)
|
||||
add_executable (${TEST_TARGET}
|
||||
# Common
|
||||
unittests/Main.cpp
|
||||
unittests/Playground.cpp
|
||||
unittests/Logger.cpp
|
||||
unittests/Config.cpp
|
||||
unittests/ProfilerTest.cpp
|
||||
unittests/DOSGuard.cpp
|
||||
unittests/SubscriptionTest.cpp
|
||||
unittests/SubscriptionManagerTest.cpp
|
||||
unittests/LoggerTests.cpp
|
||||
unittests/ConfigTests.cpp
|
||||
unittests/ProfilerTests.cpp
|
||||
unittests/JsonUtilTests.cpp
|
||||
unittests/DOSGuardTests.cpp
|
||||
unittests/SubscriptionTests.cpp
|
||||
unittests/SubscriptionManagerTests.cpp
|
||||
unittests/util/TestObject.cpp
|
||||
unittests/util/StringUtils.cpp
|
||||
unittests/util/prometheus/CounterTests.cpp
|
||||
unittests/util/prometheus/GaugeTests.cpp
|
||||
unittests/util/prometheus/HttpTests.cpp
|
||||
unittests/util/prometheus/LabelTests.cpp
|
||||
unittests/util/prometheus/MetricsTests.cpp
|
||||
# ETL
|
||||
unittests/etl/ExtractionDataPipeTest.cpp
|
||||
unittests/etl/ExtractorTest.cpp
|
||||
unittests/etl/TransformerTest.cpp
|
||||
unittests/etl/ExtractionDataPipeTests.cpp
|
||||
unittests/etl/ExtractorTests.cpp
|
||||
unittests/etl/TransformerTests.cpp
|
||||
unittests/etl/CacheLoaderTests.cpp
|
||||
unittests/etl/AmendmentBlockHandlerTests.cpp
|
||||
unittests/etl/LedgerPublisherTests.cpp
|
||||
unittests/etl/ETLStateTests.cpp
|
||||
# RPC
|
||||
unittests/rpc/ErrorTests.cpp
|
||||
unittests/rpc/BaseTests.cpp
|
||||
unittests/rpc/RPCHelpersTest.cpp
|
||||
unittests/rpc/CountersTest.cpp
|
||||
unittests/rpc/AdminVerificationTest.cpp
|
||||
unittests/rpc/RPCHelpersTests.cpp
|
||||
unittests/rpc/CountersTests.cpp
|
||||
unittests/rpc/APIVersionTests.cpp
|
||||
unittests/rpc/ForwardingProxyTests.cpp
|
||||
unittests/rpc/WorkQueueTests.cpp
|
||||
unittests/rpc/AmendmentsTests.cpp
|
||||
unittests/rpc/JsonBoolTests.cpp
|
||||
## RPC handlers
|
||||
unittests/rpc/handlers/DefaultProcessorTests.cpp
|
||||
unittests/rpc/handlers/TestHandlerTests.cpp
|
||||
unittests/rpc/handlers/AccountCurrenciesTest.cpp
|
||||
unittests/rpc/handlers/AccountLinesTest.cpp
|
||||
unittests/rpc/handlers/AccountTxTest.cpp
|
||||
unittests/rpc/handlers/AccountOffersTest.cpp
|
||||
unittests/rpc/handlers/AccountInfoTest.cpp
|
||||
unittests/rpc/handlers/AccountChannelsTest.cpp
|
||||
unittests/rpc/handlers/AccountNFTsTest.cpp
|
||||
unittests/rpc/handlers/BookOffersTest.cpp
|
||||
unittests/rpc/handlers/GatewayBalancesTest.cpp
|
||||
unittests/rpc/handlers/TxTest.cpp
|
||||
unittests/rpc/handlers/TransactionEntryTest.cpp
|
||||
unittests/rpc/handlers/LedgerEntryTest.cpp
|
||||
unittests/rpc/handlers/LedgerRangeTest.cpp
|
||||
unittests/rpc/handlers/NoRippleCheckTest.cpp
|
||||
unittests/rpc/handlers/ServerInfoTest.cpp
|
||||
unittests/rpc/handlers/PingTest.cpp
|
||||
unittests/rpc/handlers/RandomTest.cpp
|
||||
unittests/rpc/handlers/NFTInfoTest.cpp
|
||||
unittests/rpc/handlers/NFTBuyOffersTest.cpp
|
||||
unittests/rpc/handlers/NFTSellOffersTest.cpp
|
||||
unittests/rpc/handlers/NFTHistoryTest.cpp
|
||||
unittests/rpc/handlers/SubscribeTest.cpp
|
||||
unittests/rpc/handlers/UnsubscribeTest.cpp
|
||||
unittests/rpc/handlers/LedgerDataTest.cpp
|
||||
unittests/rpc/handlers/AccountObjectsTest.cpp
|
||||
unittests/rpc/handlers/BookChangesTest.cpp
|
||||
unittests/rpc/handlers/LedgerTest.cpp
|
||||
unittests/rpc/handlers/AccountCurrenciesTests.cpp
|
||||
unittests/rpc/handlers/AccountLinesTests.cpp
|
||||
unittests/rpc/handlers/AccountTxTests.cpp
|
||||
unittests/rpc/handlers/AccountOffersTests.cpp
|
||||
unittests/rpc/handlers/AccountInfoTests.cpp
|
||||
unittests/rpc/handlers/AccountChannelsTests.cpp
|
||||
unittests/rpc/handlers/AccountNFTsTests.cpp
|
||||
unittests/rpc/handlers/BookOffersTests.cpp
|
||||
unittests/rpc/handlers/DepositAuthorizedTests.cpp
|
||||
unittests/rpc/handlers/GatewayBalancesTests.cpp
|
||||
unittests/rpc/handlers/TxTests.cpp
|
||||
unittests/rpc/handlers/TransactionEntryTests.cpp
|
||||
unittests/rpc/handlers/LedgerEntryTests.cpp
|
||||
unittests/rpc/handlers/LedgerRangeTests.cpp
|
||||
unittests/rpc/handlers/NoRippleCheckTests.cpp
|
||||
unittests/rpc/handlers/ServerInfoTests.cpp
|
||||
unittests/rpc/handlers/PingTests.cpp
|
||||
unittests/rpc/handlers/RandomTests.cpp
|
||||
unittests/rpc/handlers/NFTInfoTests.cpp
|
||||
unittests/rpc/handlers/NFTBuyOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTsByIssuerTest.cpp
|
||||
unittests/rpc/handlers/NFTSellOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTHistoryTests.cpp
|
||||
unittests/rpc/handlers/SubscribeTests.cpp
|
||||
unittests/rpc/handlers/UnsubscribeTests.cpp
|
||||
unittests/rpc/handlers/LedgerDataTests.cpp
|
||||
unittests/rpc/handlers/AccountObjectsTests.cpp
|
||||
unittests/rpc/handlers/BookChangesTests.cpp
|
||||
unittests/rpc/handlers/LedgerTests.cpp
|
||||
unittests/rpc/handlers/VersionHandlerTests.cpp
|
||||
# Backend
|
||||
unittests/backend/BackendFactoryTest.cpp
|
||||
unittests/backend/cassandra/BaseTests.cpp
|
||||
unittests/backend/cassandra/BackendTests.cpp
|
||||
unittests/backend/cassandra/RetryPolicyTests.cpp
|
||||
unittests/backend/cassandra/SettingsProviderTests.cpp
|
||||
unittests/backend/cassandra/ExecutionStrategyTests.cpp
|
||||
unittests/backend/cassandra/AsyncExecutorTests.cpp
|
||||
unittests/webserver/ServerTest.cpp
|
||||
unittests/webserver/RPCExecutorTest.cpp)
|
||||
include(CMake/deps/gtest.cmake)
|
||||
unittests/data/BackendFactoryTests.cpp
|
||||
unittests/data/BackendCountersTests.cpp
|
||||
unittests/data/cassandra/BaseTests.cpp
|
||||
unittests/data/cassandra/BackendTests.cpp
|
||||
unittests/data/cassandra/RetryPolicyTests.cpp
|
||||
unittests/data/cassandra/SettingsProviderTests.cpp
|
||||
unittests/data/cassandra/ExecutionStrategyTests.cpp
|
||||
unittests/data/cassandra/AsyncExecutorTests.cpp
|
||||
# Webserver
|
||||
unittests/web/AdminVerificationTests.cpp
|
||||
unittests/web/ServerTests.cpp
|
||||
unittests/web/RPCServerHandlerTests.cpp
|
||||
unittests/web/WhitelistHandlerTests.cpp
|
||||
unittests/web/SweepHandlerTests.cpp)
|
||||
|
||||
# test for dwarf5 bug on ci
|
||||
target_compile_options(clio PUBLIC -gdwarf-4)
|
||||
include (CMake/deps/gtest.cmake)
|
||||
|
||||
# if CODE_COVERAGE enable, add clio_test-ccov
|
||||
if(CODE_COVERAGE)
|
||||
include(CMake/coverage.cmake)
|
||||
add_converage(${TEST_TARGET})
|
||||
endif()
|
||||
endif()
|
||||
# See https://github.com/google/googletest/issues/3475
|
||||
gtest_discover_tests (clio_tests DISCOVERY_TIMEOUT 10)
|
||||
|
||||
include(CMake/install/install.cmake)
|
||||
if(PACKAGING)
|
||||
include(CMake/packaging.cmake)
|
||||
endif()
|
||||
# Fix for dwarf5 bug on ci
|
||||
target_compile_options (clio PUBLIC -gdwarf-4)
|
||||
|
||||
target_compile_definitions (${TEST_TARGET} PUBLIC UNITTEST_BUILD)
|
||||
target_include_directories (${TEST_TARGET} PRIVATE unittests)
|
||||
target_link_libraries (${TEST_TARGET} PUBLIC clio gtest::gtest)
|
||||
|
||||
# Generate `clio_tests-ccov` if coverage is enabled
|
||||
# Note: use `make clio_tests-ccov` to generate report
|
||||
if (coverage)
|
||||
target_compile_definitions(${TEST_TARGET} PRIVATE COVERAGE_ENABLED)
|
||||
include (CMake/Coverage.cmake)
|
||||
add_coverage (${TEST_TARGET})
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
# Enable selected sanitizer if enabled via `san`
|
||||
if (san)
|
||||
target_compile_options (clio
|
||||
PUBLIC
|
||||
# Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1>
|
||||
${SAN_FLAG}
|
||||
-fno-omit-frame-pointer)
|
||||
target_compile_definitions (clio
|
||||
PUBLIC
|
||||
$<$<STREQUAL:${san},address>:SANITIZER=ASAN>
|
||||
$<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN>
|
||||
$<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>)
|
||||
target_link_libraries (clio INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
# Generate `docs` target for doxygen documentation if enabled
|
||||
# Note: use `make docs` to generate the documentation
|
||||
if (docs)
|
||||
include (CMake/Docs.cmake)
|
||||
endif ()
|
||||
|
||||
include (CMake/install/install.cmake)
|
||||
if (packaging)
|
||||
include (CMake/packaging.cmake) # This file exists only in build runner
|
||||
endif ()
|
||||
|
||||
@@ -91,7 +91,7 @@ The button for that is near the bottom of the PR's page on GitHub.
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
|
||||
|
||||
## Formatting
|
||||
Code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
|
||||
Code must conform to `clang-format` version 16, unless the result would be unreasonably difficult to read or maintain.
|
||||
To change your code to conform use `clang-format -i <your changed files>`.
|
||||
|
||||
## Avoid
|
||||
|
||||
15
Doxyfile
15
Doxyfile
@@ -1,3 +1,16 @@
|
||||
PROJECT_NAME = "Clio"
|
||||
INPUT = src
|
||||
INPUT = ../src ../unittests
|
||||
EXCLUDE_PATTERNS = *Test*.cpp *Test*.h
|
||||
RECURSIVE = YES
|
||||
HAVE_DOT = YES
|
||||
|
||||
QUIET = YES
|
||||
WARNINGS = NO
|
||||
WARN_NO_PARAMDOC = NO
|
||||
WARN_IF_INCOMPLETE_DOC = NO
|
||||
WARN_IF_UNDOCUMENTED = NO
|
||||
|
||||
GENERATE_LATEX = NO
|
||||
GENERATE_HTML = YES
|
||||
|
||||
SORT_MEMBERS_CTORS_1ST = YES
|
||||
|
||||
162
README.md
162
README.md
@@ -1,51 +1,105 @@
|
||||
# Clio
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC. Validated
|
||||
historical ledger and transaction data are stored in a more space-efficient format,
|
||||
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC.
|
||||
Validated historical ledger and transaction data are stored in a more space-efficient format,
|
||||
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
|
||||
allowing for scalable read throughput. Multiple Clio nodes can share
|
||||
access to the same dataset, allowing for a highly available cluster of Clio nodes,
|
||||
without the need for redundant data storage or computation.
|
||||
allowing for scalable read throughput. Multiple Clio nodes can share access to the same dataset,
|
||||
allowing for a highly available cluster of Clio nodes, without the need for redundant data storage or computation.
|
||||
|
||||
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
|
||||
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
|
||||
Other non-validated data is also not returned, such as information about queued transactions.
|
||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client.
|
||||
To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||
|
||||
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
|
||||
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
|
||||
|
||||
## Help
|
||||
Feel free to open an [issue](https://github.com/XRPLF/clio/issues) if you have a feature request or something doesn't work as expected.
|
||||
If you have any questions about building, running, contributing, using clio or any other, you could always start a new [discussion](https://github.com/XRPLF/clio/discussions).
|
||||
|
||||
## Requirements
|
||||
1. Access to a Cassandra cluster or ScyllaDB cluster. Can be local or remote.
|
||||
|
||||
2. Access to one or more rippled nodes. Can be local or remote.
|
||||
|
||||
## Building
|
||||
|
||||
Clio is built with CMake. Clio requires at least GCC-11/clang-14.0.0 (C++20), and Boost 1.75.0.
|
||||
Clio is built with CMake and uses Conan for managing dependencies.
|
||||
It is written in C++20 and therefore requires a modern compiler.
|
||||
|
||||
Use these instructions to build a Clio executable from the source. These instructions were tested on Ubuntu 20.04 LTS.
|
||||
## Prerequisites
|
||||
|
||||
```sh
|
||||
# Install dependencies
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake clang-format
|
||||
# Install gcovr to run code coverage
|
||||
sudo apt-get -y install gcovr
|
||||
### Minimum Requirements
|
||||
|
||||
# Compile Boost
|
||||
wget -O $HOME/boost_1_75_0.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz
|
||||
tar xvzf $HOME/boost_1_75_0.tar.gz
|
||||
cd $HOME/boost_1_75_0
|
||||
./bootstrap.sh
|
||||
./b2 -j$(nproc)
|
||||
echo "export BOOST_ROOT=$HOME/boost_1_75_0" >> $HOME/.profile && source $HOME/.profile
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 1.55](https://conan.io/downloads.html)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html) (needed for code coverage generation)
|
||||
|
||||
# Clone the Clio Git repository & build Clio
|
||||
cd $HOME
|
||||
git clone https://github.com/XRPLF/clio.git
|
||||
cd $HOME/clio
|
||||
cmake -B build && cmake --build build --parallel $(nproc)
|
||||
| Compiler | Version |
|
||||
|-------------|---------|
|
||||
| GCC | 11 |
|
||||
| Clang | 14 |
|
||||
| Apple Clang | 14.0.3 |
|
||||
|
||||
### Conan configuration
|
||||
|
||||
Clio does not require anything but default settings in your (`~/.conan/profiles/default`) Conan profile. It's best to have no extra flags specified.
|
||||
> Mac example:
|
||||
```
|
||||
[settings]
|
||||
os=Macos
|
||||
os_build=Macos
|
||||
arch=armv8
|
||||
arch_build=armv8
|
||||
compiler=apple-clang
|
||||
compiler.version=14
|
||||
compiler.libcxx=libc++
|
||||
build_type=Release
|
||||
compiler.cppstd=20
|
||||
```
|
||||
> Linux example:
|
||||
```
|
||||
[settings]
|
||||
os=Linux
|
||||
os_build=Linux
|
||||
arch=x86_64
|
||||
arch_build=x86_64
|
||||
compiler=gcc
|
||||
compiler.version=11
|
||||
compiler.libcxx=libstdc++11
|
||||
build_type=Release
|
||||
compiler.cppstd=20
|
||||
```
|
||||
|
||||
### Artifactory
|
||||
|
||||
1. Make sure artifactory is setup with Conan
|
||||
```sh
|
||||
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
```
|
||||
Now you should be able to download prebuilt `xrpl` package on some platforms.
|
||||
|
||||
2. Remove old packages you may have cached:
|
||||
```sh
|
||||
conan remove -f xrpl
|
||||
```
|
||||
|
||||
## Building Clio
|
||||
|
||||
Navigate to Clio's root directory and perform
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
|
||||
```
|
||||
If all goes well, `conan install` will find required packages and `cmake` will do the rest. you should end up with `clio_server` and `clio_tests` in the `build` directory (the current directory).
|
||||
|
||||
> **Tip:** You can omit the `-o tests=True` in `conan install` command above if you don't want to build `clio_tests`.
|
||||
|
||||
> **Tip:** To generate a Code Coverage report, include `-o coverage=True` in the `conan install` command above, along with `-o tests=True` to enable tests. After running the `cmake` commands, execute `make clio_tests-ccov`. The coverage report will be found at `clio_tests-llvm-cov/index.html`.
|
||||
|
||||
## Running
|
||||
```sh
|
||||
@@ -96,12 +150,12 @@ The parameters `ssl_cert_file` and `ssl_key_file` can also be added to the top l
|
||||
An example of how to specify `ssl_cert_file` and `ssl_key_file` in the config:
|
||||
|
||||
```json
|
||||
"server":{
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233
|
||||
},
|
||||
"ssl_cert_file" : "/full/path/to/cert.file",
|
||||
"ssl_key_file" : "/full/path/to/key.file"
|
||||
"ssl_cert_file": "/full/path/to/cert.file",
|
||||
"ssl_key_file": "/full/path/to/key.file"
|
||||
```
|
||||
|
||||
Once your config files are ready, start rippled and Clio. It doesn't matter which you
|
||||
@@ -172,6 +226,56 @@ which can cause high latencies. A possible alternative to this is to just deploy
|
||||
a database in each region, and the Clio nodes in each region use their region's database.
|
||||
This is effectively two systems.
|
||||
|
||||
Clio supports API versioning as [described here](https://xrpl.org/request-formatting.html#api-versioning).
|
||||
It's possible to configure `minimum`, `maximum` and `default` version like so:
|
||||
```json
|
||||
"api_version": {
|
||||
"min": 1,
|
||||
"max": 2,
|
||||
"default": 1
|
||||
}
|
||||
```
|
||||
All of the above are optional.
|
||||
Clio will fallback to hardcoded defaults when not specified in the config file or configured values are outside
|
||||
of the minimum and maximum supported versions hardcoded in `src/rpc/common/APIVersion.h`.
|
||||
> **Note:** See `example-config.json` for more details.
|
||||
|
||||
## Admin rights for requests
|
||||
|
||||
By default clio checks admin privileges by IP address from request (only `127.0.0.1` is considered to be an admin).
|
||||
It is not very secure because the IP could be spoofed.
|
||||
For a better security `admin_password` could be provided in the `server` section of clio's config:
|
||||
```json
|
||||
"server": {
|
||||
"admin_password": "secret"
|
||||
}
|
||||
```
|
||||
If the password is presented in the config, clio will check the Authorization header (if any) in each request for the password.
|
||||
The Authorization header should contain type `Password` and the password from the config, e.g. `Password secret`.
|
||||
Exactly equal password gains admin rights for the request or a websocket connection.
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports Prometheus metrics collection. It accepts Prometheus requests on the port configured in `server` section of config.
|
||||
Prometheus metrics are enabled by default. To disable it add `"prometheus_enabled": false` to the config.
|
||||
It is important to know that clio responds to Prometheus request only if they are admin requests, so Prometheus should be configured to send admin password in header.
|
||||
There is an example of docker-compose file, Prometheus and Grafana configs in [examples/infrastructure](examples/infrastructure).
|
||||
|
||||
## Using clang-tidy for static analysis
|
||||
|
||||
Minimum clang-tidy version required is 16.0.
|
||||
Clang-tidy could be run by cmake during building the project.
|
||||
For that provide the option `-o lint=True` for `conan install` command:
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
By default cmake will try to find clang-tidy automatically in your system.
|
||||
To force cmake use desired binary set `CLIO_CLANG_TIDY_BIN` environment variable as path to clang-tidy binary.
|
||||
E.g.:
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@16/bin/clang-tidy
|
||||
```
|
||||
|
||||
## Developing against `rippled` in standalone mode
|
||||
|
||||
If you wish you develop against a `rippled` instance running in standalone
|
||||
|
||||
@@ -1,14 +1,22 @@
|
||||
/*
|
||||
* This is an example configuration file. Please do not use without modifying to suit your needs.
|
||||
*/
|
||||
{
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
// This option can be used to setup a secure connect bundle connection
|
||||
"secure_connect_bundle": "[path/to/zip. ignore if using contact_points]",
|
||||
// The following options are used only if using contact_points
|
||||
"contact_points": "[ip. ignore if using secure_connect_bundle]",
|
||||
"port": "[port. ignore if using_secure_connect_bundle]",
|
||||
"keyspace": "clio",
|
||||
// Authentication settings
|
||||
"username": "[username, if any]",
|
||||
"password": "[password, if any]",
|
||||
"max_requests_outstanding": 25000,
|
||||
// Other common settings
|
||||
"keyspace": "clio",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
}
|
||||
},
|
||||
|
||||
91
conanfile.py
Normal file
91
conanfile.py
Normal file
@@ -0,0 +1,91 @@
|
||||
from conan import ConanFile
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
import re
|
||||
|
||||
class Clio(ConanFile):
|
||||
name = 'clio'
|
||||
license = 'ISC'
|
||||
author = 'Alex Kremer <akremer@ripple.com>, John Freeman <jfreeman@ripple.com>'
|
||||
url = 'https://github.com/xrplf/clio'
|
||||
description = 'Clio RPC server'
|
||||
settings = 'os', 'compiler', 'build_type', 'arch'
|
||||
options = {
|
||||
'fPIC': [True, False],
|
||||
'verbose': [True, False],
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'docs': [True, False], # doxygen API docs; create custom target 'docs'
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.12',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/2.0.0-rc1',
|
||||
]
|
||||
|
||||
default_options = {
|
||||
'fPIC': True,
|
||||
'verbose': False,
|
||||
'tests': False,
|
||||
'packaging': False,
|
||||
'coverage': False,
|
||||
'lint': False,
|
||||
'docs': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
'cassandra-cpp-driver/*:shared': False,
|
||||
'date/*:header_only': True,
|
||||
'grpc/*:shared': False,
|
||||
'grpc/*:secure': True,
|
||||
'libpq/*:shared': False,
|
||||
'lz4/*:shared': False,
|
||||
'openssl/*:shared': False,
|
||||
'protobuf/*:shared': False,
|
||||
'protobuf/*:with_zlib': True,
|
||||
'snappy/*:shared': False,
|
||||
'gtest/*:no_main': True,
|
||||
}
|
||||
|
||||
exports_sources = (
|
||||
'CMakeLists.txt', 'CMake/*', 'src/*'
|
||||
)
|
||||
|
||||
def requirements(self):
|
||||
if self.options.tests:
|
||||
self.requires('gtest/1.14.0')
|
||||
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
self.options['boost'].visibility = 'global'
|
||||
|
||||
def layout(self):
|
||||
cmake_layout(self)
|
||||
# Fix this setting to follow the default introduced in Conan 1.48
|
||||
# to align with our build instructions.
|
||||
self.folders.generators = 'build/generators'
|
||||
|
||||
generators = 'CMakeDeps'
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
tc.variables['verbose'] = self.options.verbose
|
||||
tc.variables['tests'] = self.options.tests
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['lint'] = self.options.lint
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
cmake = CMake(self)
|
||||
cmake.configure()
|
||||
cmake.build()
|
||||
|
||||
def package(self):
|
||||
cmake = CMake(self)
|
||||
cmake.install()
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
* This is an example configuration file. Please do not use without modifying to suit your needs.
|
||||
*/
|
||||
{
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
@@ -9,9 +12,21 @@
|
||||
"table_prefix": "",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
"threads": 8,
|
||||
//
|
||||
// Advanced options. USE AT OWN RISK:
|
||||
// ---
|
||||
"core_connections_per_host": 1 // Defaults to 1
|
||||
//
|
||||
// Below options will use defaults from cassandra driver if left unspecified.
|
||||
// See https://docs.datastax.com/en/developer/cpp-driver/2.17/api/struct.CassCluster/ for details.
|
||||
//
|
||||
// "queue_size_io": 2
|
||||
//
|
||||
// ---
|
||||
}
|
||||
},
|
||||
"allow_no_etl": false, // Allow Clio to run without valid ETL source, otherwise Clio will stop if ETL check fails
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
@@ -20,21 +35,24 @@
|
||||
}
|
||||
],
|
||||
"dos_guard": {
|
||||
// Comma-separated list of IPs to exclude from rate limiting
|
||||
"whitelist": [
|
||||
"127.0.0.1"
|
||||
], // comma-separated list of ips to exclude from rate limiting
|
||||
/* The below values are the default values and are only specified here
|
||||
* for documentation purposes. The rate limiter currently limits
|
||||
* connections and bandwidth per ip. The rate limiter looks at the raw
|
||||
* ip of a client connection, and so requests routed through a load
|
||||
* balancer will all have the same ip and be treated as a single client
|
||||
*/
|
||||
"max_fetches": 1000000, // max bytes per ip per sweep interval
|
||||
"max_connections": 20, // max connections per ip
|
||||
"max_requests": 20, // max connections per ip
|
||||
"sweep_interval": 1 // time in seconds before resetting bytes per ip count
|
||||
],
|
||||
//
|
||||
// The below values are the default values and are only specified here
|
||||
// for documentation purposes. The rate limiter currently limits
|
||||
// connections and bandwidth per IP. The rate limiter looks at the raw
|
||||
// IP of a client connection, and so requests routed through a load
|
||||
// balancer will all have the same IP and be treated as a single client.
|
||||
//
|
||||
"max_fetches": 1000000, // Max bytes per IP per sweep interval
|
||||
"max_connections": 20, // Max connections per IP
|
||||
"max_requests": 20, // Max connections per IP per sweep interval
|
||||
"sweep_interval": 1 // Time in seconds before resetting max_fetches and max_requests
|
||||
},
|
||||
"cache": {
|
||||
// Comma-separated list of peer nodes that Clio can use to download cache from at startup
|
||||
"peers": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
@@ -45,11 +63,18 @@
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233,
|
||||
/* Max number of requests to queue up before rejecting further requests.
|
||||
* Defaults to 0, which disables the limit
|
||||
*/
|
||||
"max_queue_size": 500
|
||||
// Max number of requests to queue up before rejecting further requests.
|
||||
// Defaults to 0, which disables the limit.
|
||||
"max_queue_size": 500,
|
||||
// If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash
|
||||
// If matches, the request will be considered as admin request
|
||||
"admin_password": "xrp",
|
||||
// If local_admin is true, Clio will consider requests come from 127.0.0.1 as admin requests
|
||||
// It's true by default unless admin_password is set,'local_admin' : true and 'admin_password' can not be set at the same time
|
||||
"local_amdin": false
|
||||
},
|
||||
// Overrides log level on a per logging channel.
|
||||
// Defaults to global "log_level" for each unspecified channel.
|
||||
"log_channels": [
|
||||
{
|
||||
"channel": "Backend",
|
||||
@@ -76,18 +101,26 @@
|
||||
"log_level": "trace"
|
||||
}
|
||||
],
|
||||
"prometheus_enabled": true,
|
||||
"log_level": "info",
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format
|
||||
// Log format (this is the default format)
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%",
|
||||
"log_to_console": true,
|
||||
"log_directory": "./clio_log",
|
||||
// Clio logs to file in the specified directory only if "log_directory" is set
|
||||
// "log_directory": "./clio_log",
|
||||
"log_rotation_size": 2048,
|
||||
"log_directory_max_size": 51200,
|
||||
"log_rotation_hour_interval": 12,
|
||||
"log_tag_style": "uint",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false,
|
||||
//"start_sequence": [integer] the ledger index to start from,
|
||||
//"finish_sequence": [integer] the ledger index to finish at,
|
||||
//"ssl_cert_file" : "/full/path/to/cert.file",
|
||||
//"ssl_key_file" : "/full/path/to/key.file"
|
||||
// "start_sequence": [integer] the ledger index to start from,
|
||||
// "finish_sequence": [integer] the ledger index to finish at,
|
||||
// "ssl_cert_file" : "/full/path/to/cert.file",
|
||||
// "ssl_key_file" : "/full/path/to/key.file"
|
||||
"api_version": {
|
||||
"min": 1, // Minimum API version supported (could be 1 or 2)
|
||||
"max": 2, // Maximum API version supported (could be 1 or 2, but >= min)
|
||||
"default": 1 // Clio behaves the same as rippled by default
|
||||
}
|
||||
}
|
||||
|
||||
25
examples/infrastructure/README.md
Normal file
25
examples/infrastructure/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Example of clio monitoring infrastructure
|
||||
|
||||
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
|
||||
|
||||
The structure of the directory:
|
||||
- `compose.yaml`
|
||||
Docker-compose file with Prometheus and Grafana set up.
|
||||
- `prometheus.yaml`
|
||||
Defines metrics collection from Clio and Prometheus itself.
|
||||
Demonstrates how to setup Clio target and Clio's admin authorisation in Prometheus.
|
||||
- `grafana/clio_dashboard.json`
|
||||
Json file containing preconfigured dashboard in Grafana format.
|
||||
- `grafana/dashboard_local.yaml`
|
||||
Grafana configuration file defining the directory to search for dashboards json files.
|
||||
- `grafana/datasources.yaml`
|
||||
Grafana configuration file defining Prometheus as a data source for Grafana.
|
||||
|
||||
## How to try
|
||||
|
||||
1. Make sure you have `docker` and `docker-compose` installed.
|
||||
2. Run `docker-compose up -d` from this directory. It will start docker containers with Prometheus and Grafana.
|
||||
3. Open [http://localhost:3000/dashboards](http://localhost:3000/dashboards). Grafana login `admin`, password `grafana`.
|
||||
There will be preconfigured Clio dashboard.
|
||||
|
||||
If Clio is not running yet launch Clio to see metrics. Some of the metrics may appear only after requests to Clio.
|
||||
20
examples/infrastructure/compose.yaml
Normal file
20
examples/infrastructure/compose.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
grafana:
|
||||
image: grafana/grafana
|
||||
ports:
|
||||
- 3000:3000
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=grafana
|
||||
volumes:
|
||||
- ./grafana/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
- ./grafana/dashboard_local.yaml:/etc/grafana/provisioning/dashboards/local.yaml
|
||||
- ./grafana/clio_dashboard.json:/var/lib/grafana/dashboards/clio_dashboard.json
|
||||
1102
examples/infrastructure/grafana/clio_dashboard.json
Normal file
1102
examples/infrastructure/grafana/clio_dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
23
examples/infrastructure/grafana/dashboard_local.yaml
Normal file
23
examples/infrastructure/grafana/dashboard_local.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'Clio dashboard'
|
||||
# <int> Org id. Default to 1
|
||||
orgId: 1
|
||||
# <string> name of the dashboard folder.
|
||||
folder: ''
|
||||
# <string> folder UID. will be automatically generated if not specified
|
||||
folderUid: ''
|
||||
# <string> provider type. Default to 'file'
|
||||
type: file
|
||||
# <bool> disable dashboard deletion
|
||||
disableDeletion: false
|
||||
# <int> how often Grafana will scan for changed dashboards
|
||||
updateIntervalSeconds: 10
|
||||
# <bool> allow updating provisioned dashboards from the UI
|
||||
allowUiUpdates: false
|
||||
options:
|
||||
# <string, required> path to dashboard files on disk. Required when using the 'file' type
|
||||
path: /var/lib/grafana/dashboards
|
||||
# <bool> use folder names from filesystem to create folders in Grafana
|
||||
foldersFromFilesStructure: true
|
||||
8
examples/infrastructure/grafana/datasources.yaml
Normal file
8
examples/infrastructure/grafana/datasources.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
access: proxy
|
||||
19
examples/infrastructure/prometheus.yaml
Normal file
19
examples/infrastructure/prometheus.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
scrape_configs:
|
||||
- job_name: clio
|
||||
scrape_interval: 5s
|
||||
scrape_timeout: 5s
|
||||
authorization:
|
||||
type: Password
|
||||
# sha256sum from password `xrp`
|
||||
# use echo -n 'your_password' | shasum -a 256 to get hash
|
||||
credentials: 0e1dcf1ff020cceabf8f4a60a32e814b5b46ee0bb8cd4af5c814e4071bd86a18
|
||||
static_configs:
|
||||
- targets:
|
||||
- host.docker.internal:51233
|
||||
- job_name: prometheus
|
||||
honor_timestamps: true
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
@@ -1,583 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/LedgerCache.h>
|
||||
#include <backend/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
/**
|
||||
* @brief Throws an error when database read time limit is exceeded.
|
||||
*
|
||||
* This class is throws an error when read time limit is exceeded but
|
||||
* is also paired with a separate class to retry the connection.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception
|
||||
{
|
||||
public:
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
return "Database read timed out. Please retry the request";
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Separate class that reattempts connection after time limit.
|
||||
*
|
||||
* @tparam F Represents a class of handlers for Cassandra database.
|
||||
* @param func Instance of Cassandra database handler class.
|
||||
* @param waitMs Is the arbitrary time limit of 500ms.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
retryOnTimeout(F func, size_t waitMs = 500)
|
||||
{
|
||||
static clio::Logger log{"Backend"};
|
||||
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
return func();
|
||||
}
|
||||
catch (DatabaseTimeout& t)
|
||||
{
|
||||
log.error() << "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Passes in serialized handlers in an asynchronous fashion.
|
||||
*
|
||||
* Note that the synchronous auto passes handlers critical to supporting
|
||||
* the Clio backend. The coroutine types are checked if same/different.
|
||||
*
|
||||
* @tparam F Represents a class of handlers for Cassandra database.
|
||||
* @param f R-value instance of Cassandra handler class.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
synchronous(F&& f)
|
||||
{
|
||||
/** @brief Serialized handlers and their execution.
|
||||
*
|
||||
* The ctx class is converted into a serialized handler, also named
|
||||
* ctx, and is used to pass a stream of data into the method.
|
||||
*/
|
||||
boost::asio::io_context ctx;
|
||||
boost::asio::io_context::strand strand(ctx);
|
||||
std::optional<boost::asio::io_context::work> work;
|
||||
|
||||
/*! @brief Place the ctx within the vector of serialized handlers. */
|
||||
work.emplace(ctx);
|
||||
|
||||
/**
|
||||
* @brief If/else statements regarding coroutine type matching.
|
||||
*
|
||||
* R is the currently executing coroutine that is about to get passed.
|
||||
* If corountine types do not match, the current one's type is stored.
|
||||
*/
|
||||
using R = typename boost::result_of<F(boost::asio::yield_context&)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value)
|
||||
{
|
||||
/**
|
||||
* @brief When the coroutine type is the same
|
||||
*
|
||||
* The spawn function enables programs to implement asynchronous logic
|
||||
* in a synchronous manner. res stores the instance of the currently
|
||||
* executing coroutine, yield. The different type is returned.
|
||||
*/
|
||||
R res;
|
||||
boost::asio::spawn(strand, [&f, &work, &res](boost::asio::yield_context yield) {
|
||||
res = f(yield);
|
||||
work.reset();
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*! @brief When the corutine type is different, run as normal. */
|
||||
boost::asio::spawn(strand, [&f, &work](boost::asio::yield_context yield) {
|
||||
f(yield);
|
||||
work.reset();
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reestablishes synchronous connection on timeout.
|
||||
*
|
||||
* @tparam Represents a class of handlers for Cassandra database.
|
||||
* @param f R-value instance of Cassandra database handler class.
|
||||
* @return auto
|
||||
*/
|
||||
template <class F>
|
||||
auto
|
||||
synchronousAndRetryOnTimeout(F&& f)
|
||||
{
|
||||
return retryOnTimeout([&]() { return synchronous(f); });
|
||||
}
|
||||
|
||||
/*! @brief Handles ledger and transaction backend data. */
|
||||
class BackendInterface
|
||||
{
|
||||
/**
|
||||
* @brief Shared mutexes and a cache for the interface.
|
||||
*
|
||||
* rngMutex is a shared mutex. Shared mutexes prevent shared data
|
||||
* from being accessed by multiple threads and has two levels of
|
||||
* access: shared and exclusive.
|
||||
*/
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
LedgerCache cache_;
|
||||
|
||||
/**
|
||||
* @brief Public read methods
|
||||
*
|
||||
* All of these reads methods can throw DatabaseTimeout. When writing
|
||||
* code in an RPC handler, this exception does not need to be caught:
|
||||
* when an RPC results in a timeout, an error is returned to the client.
|
||||
*/
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Cache that holds states of the ledger
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Cache that holds states of the ledger
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/*! @brief Fetches a specific ledger by sequence number. */
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches a specific ledger by hash. */
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches the latest ledger sequence. */
|
||||
virtual std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches the current ledger range while locking that process */
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock lck(rngMtx_);
|
||||
return range;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences to be tracked.
|
||||
*
|
||||
* Function that continues updating the range sliding window or creates
|
||||
* a new sliding window once the maxSequence limit has been reached.
|
||||
*
|
||||
* @param newMax Unsigned 32-bit integer representing new max of range.
|
||||
*/
|
||||
void
|
||||
updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock lck(rngMtx_);
|
||||
assert(!range || newMax >= range->maxSequence);
|
||||
if (!range)
|
||||
range = {newMax, newMax};
|
||||
else
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the fees for specific transactions.
|
||||
*
|
||||
* @param seq Unsigned 32-bit integer reprsenting sequence.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::optional<ripple::Fees>
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief TRANSACTION METHODS */
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
*
|
||||
* @param hash Unsigned 256-bit integer representing hash.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::optional<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches multiple transactions.
|
||||
*
|
||||
* @param hashes Unsigned integer value representing a hash.
|
||||
* @param yield The currently executing coroutine.
|
||||
* @return std::vector<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific account
|
||||
*
|
||||
* @param account A specific XRPL Account, speciifed by unique type
|
||||
* accountID.
|
||||
* @param limit Paging limit for how many transactions can be returned per
|
||||
* page.
|
||||
* @param forward Boolean whether paging happens forwards or backwards.
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return TransactionsAndCursor
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence Unsigned 32-bit integer for latest total
|
||||
* transactions.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<TransactionAndMetadata>
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<ripple::uint256>
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief NFT methods */
|
||||
/**
|
||||
* @brief Fetches a specific NFT
|
||||
*
|
||||
* @param tokenID Unsigned 256-bit integer.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::optional<NFT>
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
*
|
||||
* @param tokenID Unsigned 256-bit integer.
|
||||
* @param limit Paging limit as to how many transactions return per page.
|
||||
* @param forward Boolean whether paging happens forwards or backwards.
|
||||
* @param cursorIn Represents transaction number and ledger sequence.
|
||||
* @param yield Currently executing coroutine is passed in as input.
|
||||
* @return TransactionsAndCursor
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief STATE DATA METHODS */
|
||||
/**
|
||||
* @brief Fetches a specific ledger object: vector of unsigned chars
|
||||
*
|
||||
* @param key Unsigned 256-bit integer.
|
||||
* @param sequence Unsigned 32-bit integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::optional<Blob>
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
|
||||
const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects: a vector of vectors of unsigned chars.
|
||||
*
|
||||
* @param keys Unsigned 256-bit integer.
|
||||
* @param sequence Unsigned 32-bit integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<Blob>
|
||||
*/
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Virtual function version of fetchLedgerObject */
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/*! @brief Virtual function version of fetchLedgerObjects */
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers: vector of objects
|
||||
*
|
||||
* Objects are made of a key value, vector of unsigned chars (blob),
|
||||
* and a boolean detailing whether keys and blob match.
|
||||
*
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<LedgerObject>
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
*
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param limit Paging limit as to how many transactions returned per page.
|
||||
* @param outOfOrder Boolean on whether ledger page is out of order.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return LedgerPage
|
||||
*/
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Fetches successor object from key/index. */
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const;
|
||||
|
||||
/*! @brief Fetches successor key from key/index. */
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const;
|
||||
|
||||
/*! @brief Virtual function version of fetchSuccessorKey. */
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
*
|
||||
* @param book Unsigned 256-bit integer.
|
||||
* @param ledgerSequence Standard unsigned integer.
|
||||
* @param limit Pagaing limit as to how many transactions returned per page.
|
||||
* @param cursor Important metadata returned every time paging occurs.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return BookOffersPage
|
||||
*/
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Returns a ledger range
|
||||
*
|
||||
* Ledger range is a struct of min and max sequence numbers). Due to
|
||||
* the use of [&], which denotes a special case of a lambda expression
|
||||
* where values found outside the scope are passed by reference, wrt the
|
||||
* currently executing coroutine.
|
||||
*
|
||||
* @return std::optional<LedgerRange>
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange() const
|
||||
{
|
||||
return synchronous([&](boost::asio::yield_context yield) { return hardFetchLedgerRange(yield); });
|
||||
}
|
||||
|
||||
/*! @brief Virtual function equivalent of hardFetchLedgerRange. */
|
||||
virtual std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow() const;
|
||||
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const;
|
||||
|
||||
/**
|
||||
* @brief Writes to a specific ledger.
|
||||
*
|
||||
* @param ledgerInfo Const on ledger information.
|
||||
* @param ledgerHeader r-value string representing ledger header.
|
||||
*/
|
||||
virtual void
|
||||
writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& ledgerHeader) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes a new ledger object.
|
||||
*
|
||||
* The key and blob are r-value references and do NOT have memory addresses.
|
||||
*
|
||||
* @param key String represented as an r-value.
|
||||
* @param seq Unsigned integer representing a sequence.
|
||||
* @param blob r-value vector of unsigned characters (blob).
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
*
|
||||
* @param hash r-value reference. No memory address.
|
||||
* @param seq Unsigned 32-bit integer.
|
||||
* @param date Unsigned 32-bit integer.
|
||||
* @param transaction r-value reference. No memory address.
|
||||
* @param metadata r-value refrence. No memory address.
|
||||
*/
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new NFT.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeNFTs(std::vector<NFTsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new set of account transactions.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new transaction for a specific NFT.
|
||||
*
|
||||
* @param data Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
* @param key Passed in as an r-value reference.
|
||||
* @param seq Unsigned 32-bit integer.
|
||||
* @param successor Passed in as an r-value reference.
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0;
|
||||
|
||||
/*! @brief Tells database we will write data for a specific ledger. */
|
||||
virtual void
|
||||
startWrites() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Tells database we finished writing all data for a specific ledger.
|
||||
*
|
||||
* TODO: change the return value to represent different results:
|
||||
* Committed, write conflict, errored, successful but not committed
|
||||
*
|
||||
* @param ledgerSequence Const unsigned 32-bit integer on ledger sequence.
|
||||
* @return true
|
||||
* @return false
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t const ledgerSequence);
|
||||
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Private helper method to write ledger object
|
||||
*
|
||||
* @param key r-value string representing key.
|
||||
* @param seq Unsigned 32-bit integer representing sequence.
|
||||
* @param blob r-value vector of unsigned chars.
|
||||
*/
|
||||
virtual void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
using BackendInterface = Backend::BackendInterface;
|
||||
@@ -1,98 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
#include <backend/Types.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
class LedgerCache
|
||||
{
|
||||
struct CacheEntry
|
||||
{
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
|
||||
// counters for fetchLedgerObject(s) hit rate
|
||||
mutable std::atomic_uint32_t objectReqCounter_ = 0;
|
||||
mutable std::atomic_uint32_t objectHitCounter_ = 0;
|
||||
|
||||
// counters for fetchSuccessorKey hit rate
|
||||
mutable std::atomic_uint32_t successorReqCounter_ = 0;
|
||||
mutable std::atomic_uint32_t successorHitCounter_ = 0;
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
|
||||
// temporary set to prevent background thread from writing already deleted data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
// Update the cache with new ledger objects set isBackground to true when writing old data from a background thread
|
||||
void
|
||||
update(std::vector<LedgerObject> const& blobs, uint32_t seq, bool isBackground = false);
|
||||
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
void
|
||||
setDisabled();
|
||||
|
||||
void
|
||||
setFull();
|
||||
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
|
||||
// whether the cache has all data for the most recent ledger
|
||||
bool
|
||||
isFull() const;
|
||||
|
||||
size_t
|
||||
size() const;
|
||||
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
@@ -1,79 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <concepts>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeSettingsProvider = requires(T a) {
|
||||
{ a.getSettings() } -> std::same_as<Settings>;
|
||||
{ a.getKeyspace() } -> std::same_as<std::string>;
|
||||
{ a.getTablePrefix() } -> std::same_as<std::optional<std::string>>;
|
||||
{ a.getReplicationFactor() } -> std::same_as<uint16_t>;
|
||||
{ a.getTtl() } -> std::same_as<uint16_t>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeExecutionStrategy = requires(
|
||||
T a,
|
||||
Settings settings,
|
||||
Handle handle,
|
||||
Statement statement,
|
||||
std::vector<Statement> statements,
|
||||
PreparedStatement prepared,
|
||||
boost::asio::yield_context token
|
||||
) {
|
||||
{ T(settings, handle) };
|
||||
{ a.sync() } -> std::same_as<void>;
|
||||
{ a.isTooBusy() } -> std::same_as<bool>;
|
||||
{ a.writeSync(statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.writeSync(prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.write(prepared) } -> std::same_as<void>;
|
||||
{ a.write(std::move(statements)) } -> std::same_as<void>;
|
||||
{ a.read(token, prepared) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statement) } -> std::same_as<ResultOrError>;
|
||||
{ a.read(token, statements) } -> std::same_as<ResultOrError>;
|
||||
{ a.readEach(token, statements) } -> std::same_as<std::vector<Result>>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
|
||||
{ T(ioc) };
|
||||
{ a.shouldRetry(err) } -> std::same_as<bool>;
|
||||
{ a.retry([](){}) } -> std::same_as<void>;
|
||||
{ a.calculateDelay(attempt) } -> std::same_as<std::chrono::milliseconds>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
193
src/data/BackendCounters.cpp
Normal file
193
src/data/BackendCounters.cpp
Normal file
@@ -0,0 +1,193 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <data/BackendCounters.h>
|
||||
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
|
||||
namespace data {
|
||||
|
||||
using namespace util::prometheus;
|
||||
|
||||
BackendCounters::BackendCounters()
|
||||
: tooBusyCounter_(PrometheusService::counterInt(
|
||||
"backend_too_busy_total_number",
|
||||
Labels(),
|
||||
"The total number of times the backend was too busy to process a request"
|
||||
))
|
||||
, writeSyncCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync"}}),
|
||||
"The total number of times the backend had to write synchronously"
|
||||
))
|
||||
, writeSyncRetryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync_retry"}}),
|
||||
"The total number of times the backend had to retry a synchronous write"
|
||||
))
|
||||
, asyncWriteCounters_{"write_async"}
|
||||
, asyncReadCounters_{"read_async"}
|
||||
{
|
||||
}
|
||||
|
||||
BackendCounters::PtrType
|
||||
BackendCounters::make()
|
||||
{
|
||||
struct EnableMakeShared : public BackendCounters {};
|
||||
return std::make_shared<EnableMakeShared>();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerTooBusy()
|
||||
{
|
||||
++tooBusyCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSync()
|
||||
{
|
||||
++writeSyncCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSyncRetry()
|
||||
{
|
||||
++writeSyncRetryCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteStarted()
|
||||
{
|
||||
asyncWriteCounters_.registerStarted(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteFinished()
|
||||
{
|
||||
asyncWriteCounters_.registerFinished(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteRetry()
|
||||
{
|
||||
asyncWriteCounters_.registerRetry(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadStarted(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerStarted(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadFinished(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerFinished(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadRetry(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerRetry(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadError(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerError(count);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::report() const
|
||||
{
|
||||
boost::json::object result;
|
||||
result["too_busy"] = tooBusyCounter_.get().value();
|
||||
result["write_sync"] = writeSyncCounter_.get().value();
|
||||
result["write_sync_retry"] = writeSyncRetryCounter_.get().value();
|
||||
for (auto const& [key, value] : asyncWriteCounters_.report())
|
||||
result[key] = value;
|
||||
for (auto const& [key, value] : asyncReadCounters_.report())
|
||||
result[key] = value;
|
||||
return result;
|
||||
}
|
||||
|
||||
BackendCounters::AsyncOperationCounters::AsyncOperationCounters(std::string name)
|
||||
: name_(std::move(name))
|
||||
, pendingCounter_(PrometheusService::gaugeInt(
|
||||
"backend_operations_current_number",
|
||||
Labels({{"operation", name_}, {"status", "pending"}}),
|
||||
"The current number of pending " + name_ + " operations"
|
||||
))
|
||||
, completedCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "completed"}}),
|
||||
"The total number of completed " + name_ + " operations"
|
||||
))
|
||||
, retryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "retry"}}),
|
||||
"The total number of retried " + name_ + " operations"
|
||||
))
|
||||
, errorCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "error"}}),
|
||||
"The total number of errored " + name_ + " operations"
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerStarted(std::uint64_t const count)
|
||||
{
|
||||
pendingCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerFinished(std::uint64_t const count)
|
||||
{
|
||||
assert(pendingCounter_.get().value() >= static_cast<std::int64_t>(count));
|
||||
pendingCounter_.get() -= count;
|
||||
completedCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerRetry(std::uint64_t count)
|
||||
{
|
||||
retryCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerError(std::uint64_t count)
|
||||
{
|
||||
assert(pendingCounter_.get().value() >= static_cast<std::int64_t>(count));
|
||||
pendingCounter_.get() -= count;
|
||||
errorCounter_.get() += count;
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::AsyncOperationCounters::report() const
|
||||
{
|
||||
return boost::json::object{
|
||||
{name_ + "_pending", pendingCounter_.get().value()},
|
||||
{name_ + "_completed", completedCounter_.get().value()},
|
||||
{name_ + "_retry", retryCounter_.get().value()},
|
||||
{name_ + "_error", errorCounter_.get().value()}};
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
138
src/data/BackendCounters.h
Normal file
138
src/data/BackendCounters.h
Normal file
@@ -0,0 +1,138 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A concept for a class that can be used to count backend operations.
|
||||
*/
|
||||
// clang-format off
|
||||
template <typename T>
|
||||
concept SomeBackendCounters = requires(T a) {
|
||||
typename T::PtrType;
|
||||
{ a.registerTooBusy() } -> std::same_as<void>;
|
||||
{ a.registerWriteSync() } -> std::same_as<void>;
|
||||
{ a.registerWriteSyncRetry() } -> std::same_as<void>;
|
||||
{ a.registerWriteStarted() } -> std::same_as<void>;
|
||||
{ a.registerWriteFinished() } -> std::same_as<void>;
|
||||
{ a.registerWriteRetry() } -> std::same_as<void>;
|
||||
{ a.registerReadStarted(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadFinished(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadRetry(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.registerReadError(std::uint64_t{}) } -> std::same_as<void>;
|
||||
{ a.report() } -> std::same_as<boost::json::object>;
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
/**
|
||||
* @brief Holds statistics about the backend.
|
||||
*
|
||||
* @note This class is thread-safe.
|
||||
*/
|
||||
class BackendCounters {
|
||||
public:
|
||||
using PtrType = std::shared_ptr<BackendCounters>;
|
||||
|
||||
static PtrType
|
||||
make();
|
||||
|
||||
void
|
||||
registerTooBusy();
|
||||
|
||||
void
|
||||
registerWriteSync();
|
||||
|
||||
void
|
||||
registerWriteSyncRetry();
|
||||
|
||||
void
|
||||
registerWriteStarted();
|
||||
|
||||
void
|
||||
registerWriteFinished();
|
||||
|
||||
void
|
||||
registerWriteRetry();
|
||||
|
||||
void
|
||||
registerReadStarted(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadFinished(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadRetry(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadError(std::uint64_t count = 1u);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
BackendCounters();
|
||||
|
||||
class AsyncOperationCounters {
|
||||
public:
|
||||
AsyncOperationCounters(std::string name);
|
||||
|
||||
void
|
||||
registerStarted(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerFinished(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerRetry(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerError(std::uint64_t count);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
std::reference_wrapper<util::prometheus::GaugeInt> pendingCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> completedCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> retryCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> errorCounter_;
|
||||
};
|
||||
|
||||
std::reference_wrapper<util::prometheus::CounterInt> tooBusyCounter_;
|
||||
|
||||
std::reference_wrapper<util::prometheus::CounterInt> writeSyncCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> writeSyncRetryCounter_;
|
||||
|
||||
AsyncOperationCounters asyncWriteCounters_{"write_async"};
|
||||
AsyncOperationCounters asyncReadCounters_{"read_async"};
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -19,19 +19,26 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/CassandraBackend.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/CassandraBackend.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
namespace Backend {
|
||||
std::shared_ptr<BackendInterface>
|
||||
make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A factory function that creates the backend based on a config.
|
||||
*
|
||||
* @param config The clio config to use
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
make_Backend(util::Config const& config)
|
||||
{
|
||||
static clio::Logger log{"Backend"};
|
||||
log.info() << "Constructing BackendInterface";
|
||||
static util::Logger const log{"Backend"};
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
auto const readOnly = config.valueOr("read_only", false);
|
||||
|
||||
@@ -39,24 +46,21 @@ make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
// TODO: retire `cassandra-new` by next release after 2.0
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new"))
|
||||
{
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new")) {
|
||||
auto cfg = config.section("database." + type);
|
||||
backend =
|
||||
std::make_shared<Backend::Cassandra::CassandraBackend>(Backend::Cassandra::SettingsProvider{cfg}, readOnly);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
throw std::runtime_error("Invalid database type");
|
||||
|
||||
auto const rng = backend->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
{
|
||||
if (rng) {
|
||||
backend->updateRange(rng->minSequence);
|
||||
backend->updateRange(rng->maxSequence);
|
||||
}
|
||||
|
||||
log.info() << "Constructed BackendInterface Successfully";
|
||||
LOG(log.info()) << "Constructed BackendInterface Successfully";
|
||||
return backend;
|
||||
}
|
||||
} // namespace Backend
|
||||
} // namespace data
|
||||
@@ -17,28 +17,25 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
|
||||
using namespace clio;
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
clio::Logger gLog{"Backend"};
|
||||
util::Logger gLog{"Backend"};
|
||||
} // namespace
|
||||
|
||||
namespace Backend {
|
||||
namespace data {
|
||||
bool
|
||||
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
{
|
||||
gLog.debug() << "Want finish writes for " << ledgerSequence;
|
||||
LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence;
|
||||
auto commitRes = doFinishWrites();
|
||||
if (commitRes)
|
||||
{
|
||||
gLog.debug() << "Successfully commited. Updating range now to " << ledgerSequence;
|
||||
if (commitRes) {
|
||||
LOG(gLog.debug()) << "Successfully commited. Updating range now to " << ledgerSequence;
|
||||
updateRange(ledgerSequence);
|
||||
}
|
||||
return commitRes;
|
||||
@@ -50,27 +47,9 @@ BackendInterface::writeLedgerObject(std::string&& key, std::uint32_t const seq,
|
||||
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const
|
||||
{
|
||||
gLog.trace() << "called";
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
return hardFetchLedgerRange(yield);
|
||||
}
|
||||
catch (DatabaseTimeout& t)
|
||||
{
|
||||
;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow() const
|
||||
{
|
||||
gLog.trace() << "called";
|
||||
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
|
||||
}
|
||||
|
||||
@@ -79,52 +58,49 @@ std::optional<Blob>
|
||||
BackendInterface::fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj)
|
||||
{
|
||||
gLog.trace() << "Cache hit - " << ripple::strHex(key);
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
}
|
||||
else
|
||||
{
|
||||
gLog.trace() << "Cache miss - " << ripple::strHex(key);
|
||||
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj)
|
||||
gLog.trace() << "Missed cache and missed in db";
|
||||
else
|
||||
gLog.trace() << "Missed cache but found in db";
|
||||
return dbObj;
|
||||
if (!dbObj) {
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Missed cache but found in db";
|
||||
}
|
||||
return dbObj;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
BackendInterface::fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
std::vector<ripple::uint256> misses;
|
||||
for (size_t i = 0; i < keys.size(); ++i)
|
||||
{
|
||||
for (size_t i = 0; i < keys.size(); ++i) {
|
||||
auto obj = cache_.get(keys[i], sequence);
|
||||
if (obj)
|
||||
if (obj) {
|
||||
results[i] = *obj;
|
||||
else
|
||||
} else {
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
gLog.trace() << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
}
|
||||
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
|
||||
if (misses.size())
|
||||
{
|
||||
if (!misses.empty()) {
|
||||
auto objs = doFetchLedgerObjects(misses, sequence, yield);
|
||||
for (size_t i = 0, j = 0; i < results.size(); ++i)
|
||||
{
|
||||
if (results[i].size() == 0)
|
||||
{
|
||||
for (size_t i = 0, j = 0; i < results.size(); ++i) {
|
||||
if (results[i].empty()) {
|
||||
results[i] = objs[j];
|
||||
++j;
|
||||
}
|
||||
@@ -138,13 +114,15 @@ std::optional<ripple::uint256>
|
||||
BackendInterface::fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
if (succ)
|
||||
gLog.trace() << "Cache hit - " << ripple::strHex(key);
|
||||
else
|
||||
gLog.trace() << "Cache miss - " << ripple::strHex(key);
|
||||
if (succ) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
}
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
|
||||
@@ -152,11 +130,11 @@ std::optional<LedgerObject>
|
||||
BackendInterface::fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
|
||||
if (succ)
|
||||
{
|
||||
if (succ) {
|
||||
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
|
||||
if (!obj)
|
||||
return {{*succ, {}}};
|
||||
@@ -171,7 +149,8 @@ BackendInterface::fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
// TODO try to speed this up. This can take a few seconds. The goal is
|
||||
// to get it down to a few hundred milliseconds.
|
||||
@@ -185,29 +164,26 @@ BackendInterface::fetchBookOffers(
|
||||
std::uint32_t numPages = 0;
|
||||
long succMillis = 0;
|
||||
long pageMillis = 0;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
while (keys.size() < limit) {
|
||||
auto mid1 = std::chrono::system_clock::now();
|
||||
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
|
||||
auto mid2 = std::chrono::system_clock::now();
|
||||
numSucc++;
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd)
|
||||
{
|
||||
gLog.trace() << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
if (!offerDir || offerDir->key >= bookEnd) {
|
||||
LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
while (keys.size() < limit) {
|
||||
++numPages;
|
||||
ripple::STLedgerEntry sle{ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
|
||||
ripple::STLedgerEntry const sle{
|
||||
ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
|
||||
auto indexes = sle.getFieldV256(ripple::sfIndexes);
|
||||
keys.insert(keys.end(), indexes.begin(), indexes.end());
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (!next)
|
||||
{
|
||||
gLog.trace() << "Next is empty. breaking";
|
||||
if (next == 0u) {
|
||||
LOG(gLog.trace()) << "Next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||
@@ -221,15 +197,14 @@ BackendInterface::fetchBookOffers(
|
||||
}
|
||||
auto mid = std::chrono::system_clock::now();
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i)
|
||||
{
|
||||
gLog.trace() << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i) {
|
||||
LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
assert(objs[i].size());
|
||||
assert(!objs[i].empty());
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
gLog.debug() << "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
LOG(gLog.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc)
|
||||
<< " times"
|
||||
@@ -242,75 +217,94 @@ BackendInterface::fetchBookOffers(
|
||||
return page;
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRange() const
|
||||
{
|
||||
return synchronous([this](auto yield) { return hardFetchLedgerRange(yield); });
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock const lck(rngMtx_);
|
||||
return range;
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
assert(!range || newMax >= range->maxSequence);
|
||||
if (!range) {
|
||||
range = {newMax, newMax};
|
||||
} else {
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
}
|
||||
|
||||
LedgerPage
|
||||
BackendInterface::fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context& yield) const
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
LedgerPage page;
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
bool reachedEnd = false;
|
||||
while (keys.size() < limit && !reachedEnd)
|
||||
{
|
||||
ripple::uint256 const& curCursor = keys.size() ? keys.back() : cursor ? *cursor : firstKey;
|
||||
while (keys.size() < limit && !reachedEnd) {
|
||||
ripple::uint256 const& curCursor = !keys.empty() ? keys.back() : (cursor ? *cursor : firstKey);
|
||||
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
if (!succ)
|
||||
if (!succ) {
|
||||
reachedEnd = true;
|
||||
else
|
||||
keys.push_back(std::move(*succ));
|
||||
} else {
|
||||
keys.push_back(*succ);
|
||||
}
|
||||
}
|
||||
|
||||
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < objects.size(); ++i)
|
||||
{
|
||||
if (objects[i].size())
|
||||
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
|
||||
else if (!outOfOrder)
|
||||
{
|
||||
gLog.error() << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
for (size_t i = 0; i < objects.size(); ++i) {
|
||||
if (!objects[i].empty()) {
|
||||
page.objects.push_back({keys[i], std::move(objects[i])});
|
||||
} else if (!outOfOrder) {
|
||||
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
<< " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j)
|
||||
{
|
||||
for (size_t j = 0; j < objects.size(); ++j) {
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
gLog.error() << msg.str();
|
||||
LOG(gLog.error()) << msg.str();
|
||||
}
|
||||
}
|
||||
if (keys.size() && !reachedEnd)
|
||||
if (!keys.empty() && !reachedEnd)
|
||||
page.cursor = keys.back();
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
std::optional<ripple::Fees>
|
||||
BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const
|
||||
BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context yield) const
|
||||
{
|
||||
ripple::Fees fees;
|
||||
|
||||
auto key = ripple::keylet::fees().key;
|
||||
auto bytes = fetchLedgerObject(key, seq, yield);
|
||||
|
||||
if (!bytes)
|
||||
{
|
||||
gLog.error() << "Could not find fees";
|
||||
if (!bytes) {
|
||||
LOG(gLog.error()) << "Could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
ripple::SerialIter it(bytes->data(), bytes->size());
|
||||
ripple::SLE sle{it, key};
|
||||
ripple::SLE const sle{it, key};
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
|
||||
fees.base = sle.getFieldU64(ripple::sfBaseFee);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReferenceFeeUnits) != -1)
|
||||
fees.units = sle.getFieldU32(ripple::sfReferenceFeeUnits);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReserveBase) != -1)
|
||||
fees.reserve = sle.getFieldU32(ripple::sfReserveBase);
|
||||
|
||||
@@ -320,4 +314,4 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context&
|
||||
return fees;
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
} // namespace data
|
||||
592
src/data/BackendInterface.h
Normal file
592
src/data/BackendInterface.h
Normal file
@@ -0,0 +1,592 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/DBHelpers.h>
|
||||
#include <data/LedgerCache.h>
|
||||
#include <data/Types.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Represents a database timeout error.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception {
|
||||
public:
|
||||
char const*
|
||||
what() const throw() override
|
||||
{
|
||||
return "Database read timed out. Please retry the request";
|
||||
}
|
||||
};
|
||||
|
||||
static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
/**
|
||||
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @param waitMs Delay between retry attempts
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
retryOnTimeout(FnType func, size_t waitMs = DEFAULT_WAIT_BETWEEN_RETRY)
|
||||
{
|
||||
static util::Logger const log{"Backend"};
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
return func();
|
||||
} catch (DatabaseTimeout const&) {
|
||||
LOG(log.error()) << "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchronously executes the given function object inside a coroutine.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
synchronous(FnType&& func)
|
||||
{
|
||||
boost::asio::io_context ctx;
|
||||
|
||||
using R = typename boost::result_of<FnType(boost::asio::yield_context)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value) {
|
||||
R res;
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) {
|
||||
res = func(yield);
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
} else {
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
|
||||
ctx.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchronously execute the given function object and retry until no DatabaseTimeout is thrown.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
synchronousAndRetryOnTimeout(FnType&& func)
|
||||
{
|
||||
return retryOnTimeout([&]() { return synchronous(func); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief The interface to the database used by Clio.
|
||||
*/
|
||||
class BackendInterface {
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
LedgerCache cache_;
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
// TODO: Remove this hack. Cache should not be exposed thru BackendInterface
|
||||
/**
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by sequence number.
|
||||
*
|
||||
* @param sequence The sequence number to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The ripple::LedgerHeader if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by hash.
|
||||
*
|
||||
* @param hash The hash to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The ripple::LedgerHeader if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches the latest ledger sequence.
|
||||
*
|
||||
* @param yield The coroutine context
|
||||
* @return Latest sequence wrapped in an optional if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch the current ledger range.
|
||||
*
|
||||
* @return The current ledger range if populated; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const;
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences that are stored in the DB.
|
||||
*
|
||||
* @param newMax The new maximum sequence available
|
||||
*/
|
||||
void
|
||||
updateRange(uint32_t newMax);
|
||||
|
||||
/**
|
||||
* @brief Fetch the fees from a specific ledger sequence.
|
||||
*
|
||||
* @param seq The sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return ripple::Fees if fees are found; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t seq, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
*
|
||||
* @param hash The hash of the transaction to fetch
|
||||
* @param yield The coroutine context
|
||||
* @return TransactionAndMetadata if transaction is found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches multiple transactions.
|
||||
*
|
||||
* @param hashes A vector of hashes to fetch transactions for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of TransactionAndMetadata matching the given hashes
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific account.
|
||||
*
|
||||
* @param account The account to fetch transactions for
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param forward Whether to fetch the page forwards or backwards from the given cursor
|
||||
* @param cursor The cursor to resume fetching from
|
||||
* @param yield The coroutine context
|
||||
* @return Results and a cursor to resume from
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return Results as a vector of TransactionAndMetadata
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return Hashes as ripple::uint256 in a vector
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific NFT.
|
||||
*
|
||||
* @param tokenID The ID of the NFT
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return NFT object on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
*
|
||||
* @param tokenID The ID of the NFT
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param forward Whether to fetch the page forwards or backwards from the given cursor
|
||||
* @param cursorIn The cursor to resume fetching from
|
||||
* @param yield The coroutine context
|
||||
* @return Results and a cursor to resume from
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all NFTs issued by a given address.
|
||||
*
|
||||
* @param issuer AccountID of issuer you wish you query.
|
||||
* @param taxon Optional taxon of NFTs by which you wish to filter.
|
||||
* @param limit Paging limit.
|
||||
* @param cursorIn Optional cursor to allow us to pick up from where we
|
||||
* last left off.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<NFT> of NFTs issued by this account, or
|
||||
* this issuer/taxon combination if taxon is passed and an optional marker
|
||||
*/
|
||||
virtual NFTsAndCursor
|
||||
fetchNFTsByIssuer(
|
||||
ripple::AccountID const& issuer,
|
||||
std::optional<std::uint32_t> const& taxon,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger object.
|
||||
*
|
||||
* Currently the real fetch happens in doFetchLedgerObject and fetchLedgerObject attempts to fetch from Cache first
|
||||
* and only calls out to the real DB if a cache miss ocurred.
|
||||
*
|
||||
* @param key The key of the object
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects by their keys.
|
||||
*
|
||||
* Currently the real fetch happens in doFetchLedgerObjects and fetchLedgerObjects attempts to fetch from Cache
|
||||
* first and only calls out to the real DB for each of the keys that was not found in the cache.
|
||||
*
|
||||
* @param keys A vector with the keys of the objects to fetch
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of ledger objects as Blobs
|
||||
*/
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching a ledger object.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching ledger objects.
|
||||
*
|
||||
* @param keys The keys to fetch for
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of Blobs representing each fetched object
|
||||
*/
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of LedgerObject representing the diff
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
*
|
||||
* @param cursor The cursor to resume fetching from
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param outOfOrder If set to true max available sequence is used instead of ledgerSequence
|
||||
* @param yield The coroutine context
|
||||
* @return The ledger page
|
||||
*/
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor object.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor key.
|
||||
*
|
||||
* Thea real fetch happens in doFetchSuccessorKey. This function will attempt to lookup the successor in the cache
|
||||
* first and only if it's not found in the cache will it fetch from the actual DB.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor key on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Database-specific implementation of fetching the successor key
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
*
|
||||
* @param book Unsigned 256-bit integer.
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param limit Pagaing limit as to how many transactions returned per page.
|
||||
* @param yield The coroutine context
|
||||
* @return The book offers page
|
||||
*/
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Synchronously fetches the ledger range from DB.
|
||||
*
|
||||
* This function just wraps hardFetchLedgerRange(boost::asio::yield_context) using synchronous(FnType&&).
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange() const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the ledger range from DB.
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches the ledger range from DB retrying until no DatabaseTimeout is thrown.
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow() const;
|
||||
|
||||
/**
|
||||
* @brief Writes to a specific ledger.
|
||||
*
|
||||
* @param ledgerHeader Ledger header.
|
||||
* @param blob r-value string serialization of ledger header.
|
||||
*/
|
||||
virtual void
|
||||
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes a new ledger object.
|
||||
*
|
||||
* @param key The key to write the ledger object under
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param blob The data to write
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
*
|
||||
* @param hash The hash of the transaction
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param date The timestamp of the entry
|
||||
* @param transaction The transaction data to write
|
||||
* @param metadata The metadata to write
|
||||
*/
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t seq,
|
||||
std::uint32_t date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata
|
||||
) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes NFTs to the database.
|
||||
*
|
||||
* @param data A vector of NFTsData objects representing the NFTs
|
||||
*/
|
||||
virtual void
|
||||
writeNFTs(std::vector<NFTsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new set of account transactions.
|
||||
*
|
||||
* @param data A vector of AccountTransactionsData objects representing the account transactions
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write NFTs transactions.
|
||||
*
|
||||
* @param data A vector of NFTTransactionsData objects
|
||||
*/
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
* @param key Key of the object that the passed successor will be the successor for
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param successor The successor data to write
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(std::string&& key, std::uint32_t seq, std::string&& successor) = 0;
|
||||
|
||||
/**
|
||||
* @brief Starts a write transaction with the DB. No-op for cassandra.
|
||||
*
|
||||
* Note: Can potentially be deprecated and removed.
|
||||
*/
|
||||
virtual void
|
||||
startWrites() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Tells database we finished writing all data for a specific ledger.
|
||||
*
|
||||
* Uses doFinishWrites to synchronize with the pending writes.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to finish writing for
|
||||
* @return true on success; false otherwise
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t ledgerSequence);
|
||||
|
||||
/**
|
||||
* @return true if database is overwhelmed; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
/**
|
||||
* @return json object containing backend usage statistics
|
||||
*/
|
||||
virtual boost::json::object
|
||||
stats() const = 0;
|
||||
|
||||
private:
|
||||
virtual void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
using BackendInterface = data::BackendInterface;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -17,30 +17,29 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
#include <boost/container/flat_set.hpp>
|
||||
|
||||
#include <backend/Types.h>
|
||||
#include <data/Types.h>
|
||||
|
||||
/**
|
||||
* @brief Struct used to keep track of what to write to account_transactions/account_tx tables
|
||||
* @brief Struct used to keep track of what to write to account_transactions/account_tx tables.
|
||||
*/
|
||||
struct AccountTransactionsData
|
||||
{
|
||||
struct AccountTransactionsData {
|
||||
boost::container::flat_set<ripple::AccountID> accounts;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
std::uint32_t ledgerSequence{};
|
||||
std::uint32_t transactionIndex{};
|
||||
ripple::uint256 txHash;
|
||||
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash, beast::Journal& j)
|
||||
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
|
||||
: accounts(meta.getAffectedAccounts())
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
@@ -52,12 +51,11 @@ struct AccountTransactionsData
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents a link from a tx to an NFT that was targeted/modified/created by it
|
||||
* @brief Represents a link from a tx to an NFT that was targeted/modified/created by it.
|
||||
*
|
||||
* Gets written to nf_token_transactions table and the like.
|
||||
*/
|
||||
struct NFTTransactionsData
|
||||
{
|
||||
struct NFTTransactionsData {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
@@ -74,8 +72,7 @@ struct NFTTransactionsData
|
||||
*
|
||||
* Gets written to nf_tokens table and the like.
|
||||
*/
|
||||
struct NFTsData
|
||||
{
|
||||
struct NFTsData {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
|
||||
@@ -107,7 +104,8 @@ struct NFTsData
|
||||
ripple::uint256 const& tokenID,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::Blob const& uri,
|
||||
ripple::TxMeta const& meta)
|
||||
ripple::TxMeta const& meta
|
||||
)
|
||||
: tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
@@ -133,41 +131,68 @@ struct NFTsData
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
ripple::AccountID const& owner,
|
||||
ripple::Blob const& uri)
|
||||
ripple::Blob const& uri
|
||||
)
|
||||
: tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied object is an offer.
|
||||
*
|
||||
* @param object The object to check
|
||||
* @return true if the object is an offer; false otherwise
|
||||
*/
|
||||
template <class T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
{
|
||||
short offer_bytes = (object[1] << 8) | object[2];
|
||||
return offer_bytes == 0x006f;
|
||||
static constexpr short OFFER_OFFSET = 0x006f;
|
||||
static constexpr short SHIFT = 8;
|
||||
|
||||
short offer_bytes = (object[1] << SHIFT) | object[2];
|
||||
return offer_bytes == OFFER_OFFSET;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied hex represents an offer object.
|
||||
*
|
||||
* @param object The object to check
|
||||
* @return true if the object is an offer; false otherwise
|
||||
*/
|
||||
template <class T>
|
||||
inline bool
|
||||
isOfferHex(T const& object)
|
||||
{
|
||||
auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4);
|
||||
if (blob)
|
||||
{
|
||||
short offer_bytes = ((*blob)[1] << 8) | (*blob)[2];
|
||||
return offer_bytes == 0x006f;
|
||||
}
|
||||
return isOffer(*blob);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied object is a dir node.
|
||||
*
|
||||
* @param object The object to check
|
||||
* @return true if the object is a dir node; false otherwise
|
||||
*/
|
||||
template <class T>
|
||||
inline bool
|
||||
isDirNode(T const& object)
|
||||
{
|
||||
short spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == 0x0064;
|
||||
static constexpr short DIR_NODE_SPACE_KEY = 0x0064;
|
||||
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == DIR_NODE_SPACE_KEY;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied object is a book dir.
|
||||
*
|
||||
* @param key The key into the object
|
||||
* @param object The object to check
|
||||
* @return true if the object is a book dir; false otherwise
|
||||
*/
|
||||
template <class T, class R>
|
||||
inline bool
|
||||
isBookDir(T const& key, R const& object)
|
||||
@@ -179,33 +204,55 @@ isBookDir(T const& key, R const& object)
|
||||
return !sle[~ripple::sfOwner].has_value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the book out of an offer object.
|
||||
*
|
||||
* @param offer The offer to get the book for
|
||||
* @return Book as ripple::uint256
|
||||
*/
|
||||
template <class T>
|
||||
inline ripple::uint256
|
||||
getBook(T const& offer)
|
||||
{
|
||||
ripple::SerialIter it{offer.data(), offer.size()};
|
||||
ripple::SLE sle{it, {}};
|
||||
ripple::SLE const sle{it, {}};
|
||||
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
|
||||
|
||||
return book;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the book base.
|
||||
*
|
||||
* @param key The key to get the book base out of
|
||||
* @return Book base as ripple::uint256
|
||||
*/
|
||||
template <class T>
|
||||
inline ripple::uint256
|
||||
getBookBase(T const& key)
|
||||
{
|
||||
static constexpr size_t KEY_SIZE = 24;
|
||||
|
||||
assert(key.size() == ripple::uint256::size());
|
||||
|
||||
ripple::uint256 ret;
|
||||
for (size_t i = 0; i < 24; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < KEY_SIZE; ++i)
|
||||
ret.data()[i] = key.data()[i];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stringify a ripple::uint256.
|
||||
*
|
||||
* @param input The input value
|
||||
* @return The input value as a string
|
||||
*/
|
||||
inline std::string
|
||||
uint256ToString(ripple::uint256 const& uint)
|
||||
uint256ToString(ripple::uint256 const& input)
|
||||
{
|
||||
return {reinterpret_cast<const char*>(uint.data()), uint.size()};
|
||||
return {reinterpret_cast<char const*>(input.data()), ripple::uint256::size()};
|
||||
}
|
||||
|
||||
/** @brief The ripple epoch start timestamp. Midnight on 1st January 2000. */
|
||||
static constexpr std::uint32_t rippleEpochStart = 946684800;
|
||||
@@ -17,14 +17,14 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/LedgerCache.h>
|
||||
#include <data/LedgerCache.h>
|
||||
|
||||
namespace Backend {
|
||||
namespace data {
|
||||
|
||||
uint32_t
|
||||
LedgerCache::latestLedgerSequence() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
return latestSeq_;
|
||||
}
|
||||
|
||||
@@ -35,27 +35,21 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
return;
|
||||
|
||||
{
|
||||
std::scoped_lock lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
{
|
||||
std::scoped_lock const lck{mtx_};
|
||||
if (seq > latestSeq_) {
|
||||
assert(seq == latestSeq_ + 1 || latestSeq_ == 0);
|
||||
latestSeq_ = seq;
|
||||
}
|
||||
for (auto const& obj : objs)
|
||||
{
|
||||
if (obj.blob.size())
|
||||
{
|
||||
if (isBackground && deletes_.count(obj.key))
|
||||
for (auto const& obj : objs) {
|
||||
if (!obj.blob.empty()) {
|
||||
if (isBackground && deletes_.contains(obj.key))
|
||||
continue;
|
||||
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq)
|
||||
{
|
||||
if (seq > e.seq) {
|
||||
e = {seq, obj.blob};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
map_.erase(obj.key);
|
||||
if (!full_ && !isBackground)
|
||||
deletes_.insert(obj.key);
|
||||
@@ -69,14 +63,14 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock{mtx_};
|
||||
successorReqCounter_++;
|
||||
std::shared_lock const lck{mtx_};
|
||||
++successorReqCounter_.get();
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.upper_bound(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
successorHitCounter_++;
|
||||
++successorHitCounter_.get();
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
|
||||
@@ -85,7 +79,7 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.lower_bound(key);
|
||||
@@ -98,16 +92,16 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
std::optional<Blob>
|
||||
LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
return {};
|
||||
objectReqCounter_++;
|
||||
++objectReqCounter_.get();
|
||||
auto e = map_.find(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
if (seq < e->second.seq)
|
||||
return {};
|
||||
objectHitCounter_++;
|
||||
++objectHitCounter_.get();
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
@@ -124,7 +118,7 @@ LedgerCache::setFull()
|
||||
return;
|
||||
|
||||
full_ = true;
|
||||
std::scoped_lock lck{mtx_};
|
||||
std::scoped_lock const lck{mtx_};
|
||||
deletes_.clear();
|
||||
}
|
||||
|
||||
@@ -137,24 +131,24 @@ LedgerCache::isFull() const
|
||||
size_t
|
||||
LedgerCache::size() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
std::shared_lock const lck{mtx_};
|
||||
return map_.size();
|
||||
}
|
||||
|
||||
float
|
||||
LedgerCache::getObjectHitRate() const
|
||||
{
|
||||
if (!objectReqCounter_)
|
||||
if (objectReqCounter_.get().value() == 0u)
|
||||
return 1;
|
||||
return ((float)objectHitCounter_) / objectReqCounter_;
|
||||
return static_cast<float>(objectHitCounter_.get().value()) / objectReqCounter_.get().value();
|
||||
}
|
||||
|
||||
float
|
||||
LedgerCache::getSuccessorHitRate() const
|
||||
{
|
||||
if (!successorReqCounter_)
|
||||
if (successorReqCounter_.get().value() == 0u)
|
||||
return 1;
|
||||
return ((float)successorHitCounter_) / successorReqCounter_;
|
||||
return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value();
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
} // namespace data
|
||||
167
src/data/LedgerCache.h
Normal file
167
src/data/LedgerCache.h
Normal file
@@ -0,0 +1,167 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
#include <data/Types.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <util/prometheus/Prometheus.h>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Cache for an entire ledger.
|
||||
*/
|
||||
class LedgerCache {
|
||||
struct CacheEntry {
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
|
||||
// counters for fetchLedgerObject(s) hit rate
|
||||
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "request"}, {"fetch", "ledger_objects"}}),
|
||||
"LedgerCache statistics"
|
||||
)};
|
||||
std::reference_wrapper<util::prometheus::CounterInt> objectHitCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "ledger_objects"}})
|
||||
)};
|
||||
|
||||
// counters for fetchSuccessorKey hit rate
|
||||
std::reference_wrapper<util::prometheus::CounterInt> successorReqCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "request"}, {"fetch", "successor_key"}}),
|
||||
"ledgerCache"
|
||||
)};
|
||||
std::reference_wrapper<util::prometheus::CounterInt> successorHitCounter_{PrometheusService::counterInt(
|
||||
"ledger_cache_counter_total_number",
|
||||
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
|
||||
)};
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
|
||||
mutable std::shared_mutex mtx_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
std::atomic_bool disabled_ = false;
|
||||
|
||||
// temporary set to prevent background thread from writing already deleted data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Update the cache with new ledger objects.
|
||||
*
|
||||
* @param blobs The ledger objects to update cache with
|
||||
* @param seq The sequence to update cache for
|
||||
* @param isBackground Should be set to true when writing old data from a background thread
|
||||
*/
|
||||
void
|
||||
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch a cached object by its key and sequence number.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached successor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
/**
|
||||
* @brief Gets a cached predcessor.
|
||||
*
|
||||
* Note: This function always returns std::nullopt when @ref isFull() returns false.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param seq The sequence to fetch for
|
||||
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
/**
|
||||
* @brief Disables the cache.
|
||||
*/
|
||||
void
|
||||
setDisabled();
|
||||
|
||||
/**
|
||||
* @brief Sets the full flag to true.
|
||||
*
|
||||
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
|
||||
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
|
||||
* config).
|
||||
*/
|
||||
void
|
||||
setFull();
|
||||
|
||||
/**
|
||||
* @return The latest ledger sequence for which cache is available.
|
||||
*/
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
|
||||
/**
|
||||
* @return true if the cache has all data for the most recent ledger; false otherwise
|
||||
*/
|
||||
bool
|
||||
isFull() const;
|
||||
|
||||
/**
|
||||
* @return The total size of the cache.
|
||||
*/
|
||||
size_t
|
||||
size() const;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting an object in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getObjectHitRate() const;
|
||||
|
||||
/**
|
||||
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
|
||||
*/
|
||||
float
|
||||
getSuccessorHitRate() const;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -1,4 +1,5 @@
|
||||
# Clio Backend
|
||||
# Backend
|
||||
|
||||
## Background
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra and ScyllaDB are the only supported databases that are production-ready. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.
|
||||
|
||||
@@ -21,51 +21,58 @@
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
// *** return types
|
||||
namespace data {
|
||||
|
||||
using Blob = std::vector<unsigned char>;
|
||||
|
||||
struct LedgerObject
|
||||
{
|
||||
/**
|
||||
* @brief Represents an object in the ledger.
|
||||
*/
|
||||
struct LedgerObject {
|
||||
ripple::uint256 key;
|
||||
Blob blob;
|
||||
|
||||
bool
|
||||
operator==(const LedgerObject& other) const
|
||||
operator==(LedgerObject const& other) const
|
||||
{
|
||||
return key == other.key && blob == other.blob;
|
||||
}
|
||||
};
|
||||
|
||||
struct LedgerPage
|
||||
{
|
||||
/**
|
||||
* @brief Represents a page of LedgerObjects.
|
||||
*/
|
||||
struct LedgerPage {
|
||||
std::vector<LedgerObject> objects;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
struct BookOffersPage
|
||||
{
|
||||
|
||||
/**
|
||||
* @brief Represents a page of book offer objects.
|
||||
*/
|
||||
struct BookOffersPage {
|
||||
std::vector<LedgerObject> offers;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
struct TransactionAndMetadata
|
||||
{
|
||||
|
||||
/**
|
||||
* @brief Represents a transaction and its metadata bundled together.
|
||||
*/
|
||||
struct TransactionAndMetadata {
|
||||
Blob transaction;
|
||||
Blob metadata;
|
||||
std::uint32_t ledgerSequence = 0;
|
||||
std::uint32_t date = 0;
|
||||
|
||||
TransactionAndMetadata() = default;
|
||||
TransactionAndMetadata(
|
||||
Blob const& transaction,
|
||||
Blob const& metadata,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t date)
|
||||
: transaction{transaction}, metadata{metadata}, ledgerSequence{ledgerSequence}, date{date}
|
||||
TransactionAndMetadata(Blob transaction, Blob metadata, std::uint32_t ledgerSequence, std::uint32_t date)
|
||||
: transaction{std::move(transaction)}, metadata{std::move(metadata)}, ledgerSequence{ledgerSequence}, date{date}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -78,17 +85,19 @@ struct TransactionAndMetadata
|
||||
}
|
||||
|
||||
bool
|
||||
operator==(const TransactionAndMetadata& other) const
|
||||
operator==(TransactionAndMetadata const& other) const
|
||||
{
|
||||
return transaction == other.transaction && metadata == other.metadata &&
|
||||
ledgerSequence == other.ledgerSequence && date == other.date;
|
||||
}
|
||||
};
|
||||
|
||||
struct TransactionsCursor
|
||||
{
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
/**
|
||||
* @brief Represents a cursor into the transactions table.
|
||||
*/
|
||||
struct TransactionsCursor {
|
||||
std::uint32_t ledgerSequence = 0;
|
||||
std::uint32_t transactionIndex = 0;
|
||||
|
||||
TransactionsCursor() = default;
|
||||
TransactionsCursor(std::uint32_t ledgerSequence, std::uint32_t transactionIndex)
|
||||
@@ -101,9 +110,6 @@ struct TransactionsCursor
|
||||
{
|
||||
}
|
||||
|
||||
TransactionsCursor&
|
||||
operator=(TransactionsCursor const&) = default;
|
||||
|
||||
bool
|
||||
operator==(TransactionsCursor const& other) const = default;
|
||||
|
||||
@@ -114,27 +120,31 @@ struct TransactionsCursor
|
||||
}
|
||||
};
|
||||
|
||||
struct TransactionsAndCursor
|
||||
{
|
||||
/**
|
||||
* @brief Represests a bundle of transactions with metadata and a cursor to the next page.
|
||||
*/
|
||||
struct TransactionsAndCursor {
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
std::optional<TransactionsCursor> cursor;
|
||||
};
|
||||
|
||||
struct NFT
|
||||
{
|
||||
/**
|
||||
* @brief Represents a NFToken.
|
||||
*/
|
||||
struct NFT {
|
||||
ripple::uint256 tokenID;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t ledgerSequence{};
|
||||
ripple::AccountID owner;
|
||||
Blob uri;
|
||||
bool isBurned;
|
||||
bool isBurned{};
|
||||
|
||||
NFT() = default;
|
||||
NFT(ripple::uint256 const& tokenID,
|
||||
std::uint32_t ledgerSequence,
|
||||
ripple::AccountID const& owner,
|
||||
Blob const& uri,
|
||||
Blob uri,
|
||||
bool isBurned)
|
||||
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{uri}, isBurned{isBurned}
|
||||
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{std::move(uri)}, isBurned{isBurned}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -143,9 +153,8 @@ struct NFT
|
||||
{
|
||||
}
|
||||
|
||||
// clearly two tokens are the same if they have the same ID, but this
|
||||
// struct stores the state of a given token at a given ledger sequence, so
|
||||
// we also need to compare with ledgerSequence
|
||||
// clearly two tokens are the same if they have the same ID, but this struct stores the state of a given token at a
|
||||
// given ledger sequence, so we also need to compare with ledgerSequence.
|
||||
bool
|
||||
operator==(NFT const& other) const
|
||||
{
|
||||
@@ -153,12 +162,21 @@ struct NFT
|
||||
}
|
||||
};
|
||||
|
||||
struct LedgerRange
|
||||
{
|
||||
std::uint32_t minSequence;
|
||||
std::uint32_t maxSequence;
|
||||
struct NFTsAndCursor {
|
||||
std::vector<NFT> nfts;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Stores a range of sequences as a min and max pair.
|
||||
*/
|
||||
struct LedgerRange {
|
||||
std::uint32_t minSequence = 0;
|
||||
std::uint32_t maxSequence = 0;
|
||||
};
|
||||
|
||||
constexpr ripple::uint256 firstKey{"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 lastKey{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 hi192{"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
} // namespace Backend
|
||||
|
||||
} // namespace data
|
||||
126
src/data/cassandra/Concepts.h
Normal file
126
src/data/cassandra/Concepts.h
Normal file
@@ -0,0 +1,126 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/Types.h>
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <concepts>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief The requirements of a settings provider.
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeSettingsProvider = requires(T a) {
|
||||
{
|
||||
a.getSettings()
|
||||
} -> std::same_as<Settings>;
|
||||
{
|
||||
a.getKeyspace()
|
||||
} -> std::same_as<std::string>;
|
||||
{
|
||||
a.getTablePrefix()
|
||||
} -> std::same_as<std::optional<std::string>>;
|
||||
{
|
||||
a.getReplicationFactor()
|
||||
} -> std::same_as<uint16_t>;
|
||||
{
|
||||
a.getTtl()
|
||||
} -> std::same_as<uint16_t>;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief The requirements of an execution strategy.
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeExecutionStrategy = requires(
|
||||
T a,
|
||||
Settings settings,
|
||||
Handle handle,
|
||||
Statement statement,
|
||||
std::vector<Statement> statements,
|
||||
PreparedStatement prepared,
|
||||
boost::asio::yield_context token
|
||||
) {
|
||||
{
|
||||
T(settings, handle)
|
||||
};
|
||||
{
|
||||
a.sync()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.isTooBusy()
|
||||
} -> std::same_as<bool>;
|
||||
{
|
||||
a.writeSync(statement)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.writeSync(prepared)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.write(prepared)
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.write(std::move(statements))
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.read(token, prepared)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.read(token, statement)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.read(token, statements)
|
||||
} -> std::same_as<ResultOrError>;
|
||||
{
|
||||
a.readEach(token, statements)
|
||||
} -> std::same_as<std::vector<Result>>;
|
||||
{
|
||||
a.stats()
|
||||
} -> std::same_as<boost::json::object>;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief The requirements of a retry policy.
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
|
||||
{
|
||||
T(ioc)
|
||||
};
|
||||
{
|
||||
a.shouldRetry(err)
|
||||
} -> std::same_as<bool>;
|
||||
{
|
||||
a.retry([]() {})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.calculateDelay(attempt)
|
||||
} -> std::same_as<std::chrono::milliseconds>;
|
||||
};
|
||||
|
||||
} // namespace data::cassandra
|
||||
@@ -22,33 +22,35 @@
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief A simple container for both error message and error code
|
||||
* @brief A simple container for both error message and error code.
|
||||
*/
|
||||
class CassandraError
|
||||
{
|
||||
class CassandraError {
|
||||
std::string message_;
|
||||
uint32_t code_;
|
||||
uint32_t code_{};
|
||||
|
||||
public:
|
||||
CassandraError() = default; // default constructible required by Expected
|
||||
CassandraError(std::string message, uint32_t code) : message_{message}, code_{code}
|
||||
CassandraError(std::string message, uint32_t code) : message_{std::move(message)}, code_{code}
|
||||
{
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend std::string
|
||||
operator+(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
|
||||
operator+(T const& lhs, CassandraError const& rhs)
|
||||
requires std::is_convertible_v<T, std::string>
|
||||
{
|
||||
return lhs + rhs.message();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend bool
|
||||
operator==(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
|
||||
operator==(T const& lhs, CassandraError const& rhs)
|
||||
requires std::is_convertible_v<T, std::string>
|
||||
{
|
||||
return lhs == rhs.message();
|
||||
}
|
||||
@@ -67,28 +69,38 @@ public:
|
||||
return os;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The final error message as a std::string
|
||||
*/
|
||||
std::string
|
||||
message() const
|
||||
{
|
||||
return message_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The error code
|
||||
*/
|
||||
uint32_t
|
||||
code() const
|
||||
{
|
||||
return code_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the wrapped error is considered a timeout; false otherwise
|
||||
*/
|
||||
bool
|
||||
isTimeout() const
|
||||
{
|
||||
if (code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
|
||||
return code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
|
||||
code_ == CASS_ERROR_SERVER_UNAVAILABLE or code_ == CASS_ERROR_SERVER_OVERLOADED or
|
||||
code_ == CASS_ERROR_SERVER_READ_TIMEOUT)
|
||||
return true;
|
||||
return false;
|
||||
code_ == CASS_ERROR_SERVER_READ_TIMEOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the wrapped error is an invalid query; false otherwise
|
||||
*/
|
||||
bool
|
||||
isInvalidQuery() const
|
||||
{
|
||||
@@ -96,4 +108,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -17,9 +17,9 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
Handle::Handle(Settings clusterSettings) : cluster_{clusterSettings}
|
||||
{
|
||||
@@ -88,17 +88,17 @@ std::vector<Handle::FutureType>
|
||||
Handle::asyncExecuteEach(std::vector<Statement> const& statements) const
|
||||
{
|
||||
std::vector<Handle::FutureType> futures;
|
||||
futures.reserve(statements.size());
|
||||
for (auto const& statement : statements)
|
||||
futures.push_back(cass_session_execute(session_, statement));
|
||||
futures.emplace_back(cass_session_execute(session_, statement));
|
||||
return futures;
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
Handle::executeEach(std::vector<Statement> const& statements) const
|
||||
{
|
||||
for (auto futures = asyncExecuteEach(statements); auto const& future : futures)
|
||||
{
|
||||
if (auto const rc = future.await(); not rc)
|
||||
for (auto futures = asyncExecuteEach(statements); auto const& future : futures) {
|
||||
if (auto rc = future.await(); not rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -145,11 +145,12 @@ Handle::asyncExecute(std::vector<Statement> const& statements, std::function<voi
|
||||
Handle::PreparedStatementType
|
||||
Handle::prepare(std::string_view query) const
|
||||
{
|
||||
Handle::FutureType future = cass_session_prepare(session_, query.data());
|
||||
if (auto const rc = future.await(); rc)
|
||||
Handle::FutureType const future = cass_session_prepare(session_, query.data());
|
||||
auto const rc = future.await();
|
||||
if (rc)
|
||||
return cass_future_get_prepared(future);
|
||||
else
|
||||
|
||||
throw std::runtime_error(rc.error().message());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -19,15 +19,15 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/Batch.h>
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/Future.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
#include <backend/cassandra/impl/Session.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/Batch.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/Future.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
#include <data/cassandra/impl/Session.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
@@ -37,13 +37,12 @@
|
||||
#include <iterator>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Represents a handle to the cassandra database cluster
|
||||
*/
|
||||
class Handle
|
||||
{
|
||||
class Handle {
|
||||
detail::Cluster cluster_;
|
||||
detail::Session session_;
|
||||
|
||||
@@ -57,28 +56,31 @@ public:
|
||||
using ResultType = Result;
|
||||
|
||||
/**
|
||||
* @brief Construct a new handle from a @ref Settings object
|
||||
* @brief Construct a new handle from a @ref detail::Settings object.
|
||||
*
|
||||
* @param clusterSettings The settings to use
|
||||
*/
|
||||
explicit Handle(Settings clusterSettings = Settings::defaultSettings());
|
||||
|
||||
/**
|
||||
* @brief Construct a new handle with default settings and only by setting
|
||||
* the contact points
|
||||
* @brief Construct a new handle with default settings and only by setting the contact points.
|
||||
*
|
||||
* @param contactPoints The contact points to use instead of settings
|
||||
*/
|
||||
explicit Handle(std::string_view contactPoints);
|
||||
|
||||
/**
|
||||
* @brief Disconnects gracefully if possible
|
||||
* @brief Disconnects gracefully if possible.
|
||||
*/
|
||||
~Handle();
|
||||
|
||||
/**
|
||||
* @brief Move is supported
|
||||
* @brief Move is supported.
|
||||
*/
|
||||
Handle(Handle&&) = default;
|
||||
|
||||
/**
|
||||
* @brief Connect to the cluster asynchronously
|
||||
* @brief Connect to the cluster asynchronously.
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
@@ -86,31 +88,37 @@ public:
|
||||
asyncConnect() const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncConnect() const for how this works.
|
||||
*
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
connect() const;
|
||||
|
||||
/**
|
||||
* @brief Connect to the the specified keyspace asynchronously
|
||||
* @brief Connect to the the specified keyspace asynchronously.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncConnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncConnect(std::string_view) const for how this works.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
connect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Disconnect from the cluster asynchronously
|
||||
* @brief Disconnect from the cluster asynchronously.
|
||||
*
|
||||
* @return A future
|
||||
*/
|
||||
@@ -118,32 +126,40 @@ public:
|
||||
asyncDisconnect() const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncDisconnect() const for how this works.
|
||||
*
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
disconnect() const;
|
||||
|
||||
/**
|
||||
* @brief Reconnect to the the specified keyspace asynchronously
|
||||
* @brief Reconnect to the the specified keyspace asynchronously.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncReconnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncReconnect(std::string_view) const for how this works.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[nodiscard]] MaybeErrorType
|
||||
reconnect(std::string_view keyspace) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a simple query with optional args asynchronously
|
||||
* @brief Execute a simple query with optional args asynchronously.
|
||||
*
|
||||
* @param query The query to execute
|
||||
* @param args The arguments to bind for execution
|
||||
* @return A future
|
||||
*/
|
||||
template <typename... Args>
|
||||
@@ -155,10 +171,13 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecute(std::string_view, Args&&...) const for how this
|
||||
* works.
|
||||
* See asyncExecute(std::string_view, Args&&...) const for how this works.
|
||||
*
|
||||
* @param query The query to execute
|
||||
* @param args The arguments to bind for execution
|
||||
* @return The result or an error
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
@@ -168,30 +187,34 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Execute each of the statements asynchronously
|
||||
* @brief Execute each of the statements asynchronously.
|
||||
*
|
||||
* Batched version is not always the right option. Especially since it only
|
||||
* supports INSERT, UPDATE and DELETE statements.
|
||||
* This can be used as an alternative when statements need to execute in
|
||||
* bulk.
|
||||
* Batched version is not always the right option.
|
||||
* Especially since it only supports INSERT, UPDATE and DELETE statements.
|
||||
* This can be used as an alternative when statements need to execute in bulk.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @return A vector of future objects
|
||||
*/
|
||||
[[nodiscard]] std::vector<FutureType>
|
||||
asyncExecuteEach(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecuteEach(std::vector<StatementType> const&) const for
|
||||
* how this works.
|
||||
* See @ref asyncExecuteEach(std::vector<StatementType> const&) const for how this works.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
executeEach(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a prepared statement with optional args asynchronously
|
||||
* @brief Execute a prepared statement with optional args asynchronously.
|
||||
*
|
||||
* @param statement The prepared statement to execute
|
||||
* @param args The arguments to bind for execution
|
||||
* @return A future
|
||||
*/
|
||||
template <typename... Args>
|
||||
@@ -203,10 +226,13 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecute(std::vector<StatementType> const&, Args&&...) const
|
||||
* for how this works.
|
||||
* See asyncExecute(std::vector<StatementType> const&, Args&&...) const for how this works.
|
||||
*
|
||||
* @param statement The prepared statement to bind and execute
|
||||
* @param args The arguments to bind for execution
|
||||
* @return The result or an error
|
||||
*/
|
||||
template <typename... Args>
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
@@ -216,61 +242,70 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Execute one (bound or simple) statements asynchronously
|
||||
* @brief Execute one (bound or simple) statements asynchronously.
|
||||
*
|
||||
* @param statement The statement to execute
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncExecute(StatementType const& statement) const;
|
||||
|
||||
/**
|
||||
* @brief Execute one (bound or simple) statements asynchronously with a
|
||||
* callback
|
||||
* @brief Execute one (bound or simple) statements asynchronously with a callback.
|
||||
*
|
||||
* @param statement The statement to execute
|
||||
* @param cb The callback to execute when data is ready
|
||||
* @return A future that holds onto the callback provided
|
||||
*/
|
||||
[[nodiscard]] FutureWithCallbackType
|
||||
asyncExecute(StatementType const& statement, std::function<void(ResultOrErrorType)>&& cb) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecute(StatementType const&) const for how this
|
||||
* works.
|
||||
* See @ref asyncExecute(StatementType const&) const for how this works.
|
||||
*
|
||||
* @param statement The statement to execute
|
||||
* @return The result or an error
|
||||
*/
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
execute(StatementType const& statement) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @return A future
|
||||
*/
|
||||
[[nodiscard]] FutureType
|
||||
asyncExecute(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Synchonous version of the above
|
||||
* @brief Synchonous version of the above.
|
||||
*
|
||||
* See @ref asyncExecute(std::vector<StatementType> const&) const for how
|
||||
* this works.
|
||||
* See @ref asyncExecute(std::vector<StatementType> const&) const for how this works.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @return Possibly an error
|
||||
*/
|
||||
[[maybe_unused]] MaybeErrorType
|
||||
execute(std::vector<StatementType> const& statements) const;
|
||||
|
||||
/**
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously
|
||||
* with a completion callback
|
||||
* @brief Execute a batch of (bound or simple) statements asynchronously with a completion callback.
|
||||
*
|
||||
* @param statements The statements to execute
|
||||
* @param cb The callback to execute when data is ready
|
||||
* @return A future that holds onto the callback provided
|
||||
*/
|
||||
[[nodiscard]] FutureWithCallbackType
|
||||
asyncExecute(std::vector<StatementType> const& statements, std::function<void(ResultOrErrorType)>&& cb) const;
|
||||
|
||||
/**
|
||||
* @brief Prepare a statement
|
||||
* @brief Prepare a statement.
|
||||
*
|
||||
* @return A @ref PreparedStatementType
|
||||
* @param query
|
||||
* @return A prepared statement
|
||||
* @throws std::runtime_error with underlying error description on failure
|
||||
*/
|
||||
[[nodiscard]] PreparedStatementType
|
||||
@@ -278,12 +313,13 @@ public:
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Extracts the results into series of std::tuple<Types...> by creating a
|
||||
* simple wrapper with an STL input iterator inside.
|
||||
* @brief Extracts the results into series of std::tuple<Types...> by creating a simple wrapper with an STL input
|
||||
* iterator inside.
|
||||
*
|
||||
* You can call .begin() and .end() in order to iterate as usual.
|
||||
* This also means that you can use it in a range-based for or with some
|
||||
* algorithms.
|
||||
* This also means that you can use it in a range-based for or with some algorithms.
|
||||
*
|
||||
* @param result The result to iterate
|
||||
*/
|
||||
template <typename... Types>
|
||||
[[nodiscard]] detail::ResultExtractor<Types...>
|
||||
@@ -292,4 +328,4 @@ extract(Handle::ResultType const& result)
|
||||
return {result};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -19,17 +19,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Concepts.h>
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/SettingsProvider.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Concepts.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <fmt/compile.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
[[nodiscard]] std::string inline qualifiedTableName(SettingsProviderType const& provider, std::string_view name)
|
||||
@@ -38,17 +38,11 @@ template <SomeSettingsProvider SettingsProviderType>
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Manages the DB schema and provides access to prepared statements
|
||||
* @brief Manages the DB schema and provides access to prepared statements.
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
class Schema
|
||||
{
|
||||
// Current schema version.
|
||||
// Update this everytime you update the schema.
|
||||
// Migrations will be ran automatically based on this value.
|
||||
static constexpr uint16_t version = 1u;
|
||||
|
||||
clio::Logger log_{"Backend"};
|
||||
class Schema {
|
||||
util::Logger log_{"Backend"};
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
|
||||
public:
|
||||
@@ -67,7 +61,8 @@ public:
|
||||
AND durable_writes = true
|
||||
)",
|
||||
settingsProvider_.get().getKeyspace(),
|
||||
settingsProvider_.get().getReplicationFactor());
|
||||
settingsProvider_.get().getReplicationFactor()
|
||||
);
|
||||
}();
|
||||
|
||||
// =======================
|
||||
@@ -90,7 +85,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -105,7 +101,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -118,7 +115,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -132,7 +130,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -145,7 +144,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -160,7 +160,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -172,7 +173,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -184,7 +186,8 @@ public:
|
||||
WITH default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -194,7 +197,8 @@ public:
|
||||
sequence bigint
|
||||
)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -210,7 +214,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -225,7 +230,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -240,7 +246,8 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
@@ -255,16 +262,16 @@ public:
|
||||
AND default_time_to_live = {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"),
|
||||
settingsProvider_.get().getTtl()));
|
||||
settingsProvider_.get().getTtl()
|
||||
));
|
||||
|
||||
return statements;
|
||||
}();
|
||||
|
||||
/**
|
||||
* @brief Prepared statements holder
|
||||
* @brief Prepared statements holder.
|
||||
*/
|
||||
class Statements
|
||||
{
|
||||
class Statements {
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
std::reference_wrapper<Handle const> handle_;
|
||||
|
||||
@@ -285,7 +292,8 @@ public:
|
||||
(key, sequence, object)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertTransaction = [this]() {
|
||||
@@ -295,7 +303,8 @@ public:
|
||||
(hash, ledger_sequence, date, transaction, metadata)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerTransaction = [this]() {
|
||||
@@ -305,7 +314,8 @@ public:
|
||||
(ledger_sequence, hash)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertSuccessor = [this]() {
|
||||
@@ -315,7 +325,8 @@ public:
|
||||
(key, seq, next)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")));
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertDiff = [this]() {
|
||||
@@ -325,7 +336,8 @@ public:
|
||||
(seq, key)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")));
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertAccountTx = [this]() {
|
||||
@@ -335,7 +347,8 @@ public:
|
||||
(account, seq_idx, hash)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFT = [this]() {
|
||||
@@ -345,7 +358,8 @@ public:
|
||||
(token_id, sequence, owner, is_burned)
|
||||
VALUES (?, ?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertIssuerNFT = [this]() {
|
||||
@@ -355,7 +369,8 @@ public:
|
||||
(issuer, taxon, token_id)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")));
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFTURI = [this]() {
|
||||
@@ -365,7 +380,8 @@ public:
|
||||
(token_id, sequence, uri)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertNFTTx = [this]() {
|
||||
@@ -375,7 +391,8 @@ public:
|
||||
(token_id, seq_idx, hash)
|
||||
VALUES (?, ?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHeader = [this]() {
|
||||
@@ -385,7 +402,8 @@ public:
|
||||
(sequence, header)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHash = [this]() {
|
||||
@@ -395,7 +413,8 @@ public:
|
||||
(hash, sequence)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
|
||||
));
|
||||
}();
|
||||
|
||||
//
|
||||
@@ -410,7 +429,8 @@ public:
|
||||
WHERE is_latest = ?
|
||||
IF sequence IN (?, null)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement deleteLedgerRange = [this]() {
|
||||
@@ -420,7 +440,8 @@ public:
|
||||
SET sequence = ?
|
||||
WHERE is_latest = false
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
//
|
||||
@@ -437,7 +458,8 @@ public:
|
||||
ORDER BY seq DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")));
|
||||
qualifiedTableName(settingsProvider_.get(), "successor")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectDiff = [this]() {
|
||||
@@ -447,7 +469,8 @@ public:
|
||||
FROM {}
|
||||
WHERE seq = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")));
|
||||
qualifiedTableName(settingsProvider_.get(), "diff")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectObject = [this]() {
|
||||
@@ -460,7 +483,8 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectTransaction = [this]() {
|
||||
@@ -470,7 +494,8 @@ public:
|
||||
FROM {}
|
||||
WHERE hash = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAllTransactionHashesInLedger = [this]() {
|
||||
@@ -480,7 +505,8 @@ public:
|
||||
FROM {}
|
||||
WHERE ledger_sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPageKeys = [this]() {
|
||||
@@ -494,7 +520,8 @@ public:
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPage = [this]() {
|
||||
@@ -508,7 +535,8 @@ public:
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement getToken = [this]() {
|
||||
@@ -519,7 +547,8 @@ public:
|
||||
WHERE key = ?
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")));
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTx = [this]() {
|
||||
@@ -528,25 +557,25 @@ public:
|
||||
SELECT hash, seq_idx
|
||||
FROM {}
|
||||
WHERE account = ?
|
||||
AND seq_idx <= ?
|
||||
AND seq_idx < ?
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
// the smallest transaction idx is 0, we use uint to store the transaction index, so we shall use ">=" to
|
||||
// include it(the transaction with idx 0) in the result
|
||||
PreparedStatement selectAccountTxForward = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT hash, seq_idx
|
||||
FROM {}
|
||||
WHERE account = ?
|
||||
AND seq_idx >= ?
|
||||
AND seq_idx > ?
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")));
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFT = [this]() {
|
||||
@@ -559,7 +588,22 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTBulk = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id, sequence, owner, is_burned
|
||||
FROM {}
|
||||
WHERE token_id IN ?
|
||||
AND sequence <= ?
|
||||
ORDER BY sequence DESC
|
||||
PER PARTITION LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTURI = [this]() {
|
||||
@@ -572,7 +616,22 @@ public:
|
||||
ORDER BY sequence DESC
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTURIBulk = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id, uri
|
||||
FROM {}
|
||||
WHERE token_id IN ?
|
||||
AND sequence <= ?
|
||||
ORDER BY sequence DESC
|
||||
PER PARTITION LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTTx = [this]() {
|
||||
@@ -585,7 +644,8 @@ public:
|
||||
ORDER BY seq_idx DESC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTTxForward = [this]() {
|
||||
@@ -598,7 +658,37 @@ public:
|
||||
ORDER BY seq_idx ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuer = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND (taxon, token_id) > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND taxon = ?
|
||||
AND token_id > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerByHash = [this]() {
|
||||
@@ -609,7 +699,8 @@ public:
|
||||
WHERE hash = ?
|
||||
LIMIT 1
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerBySeq = [this]() {
|
||||
@@ -619,7 +710,8 @@ public:
|
||||
FROM {}
|
||||
WHERE sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledgers")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLatestLedger = [this]() {
|
||||
@@ -629,7 +721,8 @@ public:
|
||||
FROM {}
|
||||
WHERE is_latest = true
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerRange = [this]() {
|
||||
@@ -638,23 +731,24 @@ public:
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Recreates the prepared statements
|
||||
* @brief Recreates the prepared statements.
|
||||
*/
|
||||
void
|
||||
prepareStatements(Handle const& handle)
|
||||
{
|
||||
log_.info() << "Preparing cassandra statements";
|
||||
LOG(log_.info()) << "Preparing cassandra statements";
|
||||
statements_ = std::make_unique<Statements>(settingsProvider_, handle);
|
||||
log_.info() << "Finished preparing statements";
|
||||
LOG(log_.info()) << "Finished preparing statements";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Provides access to statements
|
||||
* @brief Provides access to statements.
|
||||
*/
|
||||
std::unique_ptr<Statements> const&
|
||||
operator->() const
|
||||
@@ -666,4 +760,4 @@ private:
|
||||
std::unique_ptr<Statements> statements_{nullptr};
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -17,28 +17,29 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/SettingsProvider.h>
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <config/Config.h>
|
||||
#include <data/cassandra/SettingsProvider.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Constants.h>
|
||||
#include <util/config/Config.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
namespace detail {
|
||||
inline Settings::ContactPoints
|
||||
tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_object())
|
||||
throw std::runtime_error(
|
||||
"Feed entire Cassandra section to parse "
|
||||
"Settings::ContactPoints instead");
|
||||
if (not value.is_object()) {
|
||||
throw std::runtime_error("Feed entire Cassandra section to parse Settings::ContactPoints instead");
|
||||
}
|
||||
|
||||
clio::Config obj{value};
|
||||
util::Config const obj{value};
|
||||
Settings::ContactPoints out;
|
||||
|
||||
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
|
||||
@@ -56,7 +57,7 @@ tag_invoke(boost::json::value_to_tag<Settings::SecureConnectionBundle>, boost::j
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
SettingsProvider::SettingsProvider(clio::Config const& cfg, uint16_t ttl)
|
||||
SettingsProvider::SettingsProvider(util::Config const& cfg, uint16_t ttl)
|
||||
: config_{cfg}
|
||||
, keyspace_{cfg.valueOr<std::string>("keyspace", "clio")}
|
||||
, tablePrefix_{cfg.maybeValue<std::string>("table_prefix")}
|
||||
@@ -75,18 +76,15 @@ SettingsProvider::getSettings() const
|
||||
std::optional<std::string>
|
||||
SettingsProvider::parseOptionalCertificate() const
|
||||
{
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath)
|
||||
{
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath) {
|
||||
auto const path = std::filesystem::path(*certPath);
|
||||
std::ifstream fileStream(path.string(), std::ios::in);
|
||||
if (!fileStream)
|
||||
{
|
||||
if (!fileStream) {
|
||||
throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string());
|
||||
}
|
||||
|
||||
std::string contents(std::istreambuf_iterator<char>{fileStream}, std::istreambuf_iterator<char>{});
|
||||
if (fileStream.bad())
|
||||
{
|
||||
if (fileStream.bad()) {
|
||||
throw std::system_error(errno, std::generic_category(), "Reading certificate " + path.string());
|
||||
}
|
||||
|
||||
@@ -100,12 +98,9 @@ Settings
|
||||
SettingsProvider::parseSettings() const
|
||||
{
|
||||
auto settings = Settings::defaultSettings();
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle)
|
||||
{
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle) {
|
||||
settings.connectionInfo = *bundle;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
settings.connectionInfo =
|
||||
config_.valueOrThrow<Settings::ContactPoints>("Missing contact_points in Cassandra config");
|
||||
}
|
||||
@@ -115,6 +110,19 @@ SettingsProvider::parseSettings() const
|
||||
config_.valueOr<uint32_t>("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding);
|
||||
settings.maxReadRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
|
||||
settings.coreConnectionsPerHost =
|
||||
config_.valueOr<uint32_t>("core_connections_per_host", settings.coreConnectionsPerHost);
|
||||
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
|
||||
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
|
||||
if (connectTimeoutSecond)
|
||||
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
|
||||
auto const requestTimeoutSecond = config_.maybeValue<uint32_t>("request_timeout");
|
||||
if (requestTimeoutSecond)
|
||||
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
|
||||
settings.certificate = parseOptionalCertificate();
|
||||
settings.username = config_.maybeValue<std::string>("username");
|
||||
settings.password = config_.maybeValue<std::string>("password");
|
||||
@@ -122,4 +130,4 @@ SettingsProvider::parseSettings() const
|
||||
return settings;
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -19,20 +19,19 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <config/Config.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Provides settings for @ref CassandraBackend
|
||||
* @brief Provides settings for @ref BasicCassandraBackend.
|
||||
*/
|
||||
class SettingsProvider
|
||||
{
|
||||
clio::Config config_;
|
||||
class SettingsProvider {
|
||||
util::Config config_;
|
||||
|
||||
std::string keyspace_;
|
||||
std::optional<std::string> tablePrefix_;
|
||||
@@ -41,34 +40,50 @@ class SettingsProvider
|
||||
Settings settings_;
|
||||
|
||||
public:
|
||||
explicit SettingsProvider(clio::Config const& cfg, uint16_t ttl = 0);
|
||||
/**
|
||||
* @brief Create a settings provider from the specified config.
|
||||
*
|
||||
* @param cfg The config of Clio to use
|
||||
* @param ttl Time to live setting
|
||||
*/
|
||||
explicit SettingsProvider(util::Config const& cfg, uint16_t ttl = 0);
|
||||
|
||||
/*! Get the cluster settings */
|
||||
/**
|
||||
* @return The cluster settings
|
||||
*/
|
||||
[[nodiscard]] Settings
|
||||
getSettings() const;
|
||||
|
||||
/*! Get the specified keyspace */
|
||||
/**
|
||||
* @return The specified keyspace
|
||||
*/
|
||||
[[nodiscard]] inline std::string
|
||||
getKeyspace() const
|
||||
{
|
||||
return keyspace_;
|
||||
}
|
||||
|
||||
/*! Get an optional table prefix to use in all queries */
|
||||
/**
|
||||
* @return The optional table prefix to use in all queries
|
||||
*/
|
||||
[[nodiscard]] inline std::optional<std::string>
|
||||
getTablePrefix() const
|
||||
{
|
||||
return tablePrefix_;
|
||||
}
|
||||
|
||||
/*! Get the replication factor */
|
||||
/**
|
||||
* @return The replication factor
|
||||
*/
|
||||
[[nodiscard]] inline uint16_t
|
||||
getReplicationFactor() const
|
||||
{
|
||||
return replicationFactor_;
|
||||
}
|
||||
|
||||
/*! Get the default time to live to use in all `create` queries */
|
||||
/**
|
||||
* @return The default time to live to use in all `create` queries
|
||||
*/
|
||||
[[nodiscard]] inline uint16_t
|
||||
getTtl() const
|
||||
{
|
||||
@@ -83,4 +98,4 @@ private:
|
||||
parseSettings() const;
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -23,7 +23,7 @@
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra {
|
||||
namespace data::cassandra {
|
||||
|
||||
namespace detail {
|
||||
struct Settings;
|
||||
@@ -52,8 +52,7 @@ using Batch = detail::Batch;
|
||||
* because clio uses bigint (int64) everywhere except for when one need
|
||||
* to specify LIMIT, which needs an int32 :-/
|
||||
*/
|
||||
struct Limit
|
||||
{
|
||||
struct Limit {
|
||||
int32_t limit;
|
||||
};
|
||||
|
||||
@@ -64,4 +63,4 @@ using MaybeError = util::Expected<void, CassandraError>;
|
||||
using ResultOrError = util::Expected<Result, CassandraError>;
|
||||
using Error = util::Unexpected<CassandraError>;
|
||||
|
||||
} // namespace Backend::Cassandra
|
||||
} // namespace data::cassandra
|
||||
@@ -19,19 +19,19 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Concepts.h>
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/RetryPolicy.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Concepts.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/RetryPolicy.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/**
|
||||
* @brief A query executor with a changable retry policy
|
||||
@@ -48,16 +48,17 @@ template <
|
||||
typename StatementType,
|
||||
typename HandleType = Handle,
|
||||
SomeRetryPolicy RetryPolicyType = ExponentialBackoffRetryPolicy>
|
||||
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>>
|
||||
{
|
||||
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>> {
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using CallbackType = std::function<void(typename HandleType::ResultOrErrorType)>;
|
||||
using RetryCallbackType = std::function<void()>;
|
||||
|
||||
clio::Logger log_{"Backend"};
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
StatementType data_;
|
||||
RetryPolicyType retryPolicy_;
|
||||
CallbackType onComplete_;
|
||||
RetryCallbackType onRetry_;
|
||||
|
||||
// does not exist during initial construction, hence optional
|
||||
std::optional<FutureWithCallbackType> future_;
|
||||
@@ -68,24 +69,37 @@ public:
|
||||
* @brief Create a new instance of the AsyncExecutor and execute it.
|
||||
*/
|
||||
static void
|
||||
run(boost::asio::io_context& ioc, HandleType const& handle, StatementType&& data, CallbackType&& onComplete)
|
||||
run(boost::asio::io_context& ioc,
|
||||
HandleType const& handle,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry)
|
||||
{
|
||||
// this is a helper that allows us to use std::make_shared below
|
||||
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType>
|
||||
{
|
||||
EnableMakeShared(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete))
|
||||
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType> {
|
||||
EnableMakeShared(
|
||||
boost::asio::io_context& ioc,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry
|
||||
)
|
||||
: AsyncExecutor(ioc, std::move(data), std::move(onComplete), std::move(onRetry))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete));
|
||||
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete), std::move(onRetry));
|
||||
ptr->execute(handle);
|
||||
}
|
||||
|
||||
private:
|
||||
AsyncExecutor(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}
|
||||
AsyncExecutor(
|
||||
boost::asio::io_context& ioc,
|
||||
StatementType&& data,
|
||||
CallbackType&& onComplete,
|
||||
RetryCallbackType&& onRetry
|
||||
)
|
||||
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}, onRetry_{std::move(onRetry)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -96,24 +110,23 @@ private:
|
||||
|
||||
// lifetime is extended by capturing self ptr
|
||||
auto handler = [this, &handle, self](auto&& res) mutable {
|
||||
if (res)
|
||||
{
|
||||
onComplete_(std::move(res));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (retryPolicy_.shouldRetry(res.error()))
|
||||
if (res) {
|
||||
onComplete_(std::forward<decltype(res)>(res));
|
||||
} else {
|
||||
if (retryPolicy_.shouldRetry(res.error())) {
|
||||
onRetry_();
|
||||
retryPolicy_.retry([self, &handle]() { self->execute(handle); });
|
||||
else
|
||||
onComplete_(std::move(res)); // report error
|
||||
} else {
|
||||
onComplete_(std::forward<decltype(res)>(res)); // report error
|
||||
}
|
||||
}
|
||||
|
||||
self = nullptr; // explicitly decrement refcount
|
||||
};
|
||||
|
||||
std::scoped_lock lck{mtx_};
|
||||
std::scoped_lock const lck{mtx_};
|
||||
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,40 +17,39 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/impl/Batch.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/impl/Batch.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
};
|
||||
constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
// todo: use an appropritae value instead of CASS_BATCH_TYPE_LOGGED for
|
||||
// different use cases
|
||||
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
|
||||
Batch::Batch(std::vector<Statement> const& statements)
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), batchDeleter}
|
||||
{
|
||||
cass_batch_set_is_idempotent(*this, cass_true);
|
||||
|
||||
for (auto const& statement : statements)
|
||||
for (auto const& statement : statements) {
|
||||
if (auto const res = add(statement); not res)
|
||||
throw std::runtime_error("Failed to add statement to batch: " + res.error());
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError
|
||||
Batch::add(Statement const& statement)
|
||||
{
|
||||
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK) {
|
||||
return Error{CassandraError{cass_error_desc(rc), rc}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,19 +19,18 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Batch : public ManagedObject<CassBatch>
|
||||
{
|
||||
struct Batch : public ManagedObject<CassBatch> {
|
||||
Batch(std::vector<Statement> const& statements);
|
||||
|
||||
MaybeError
|
||||
add(Statement const& statement);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,20 +17,21 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Cluster.h>
|
||||
#include <backend/cassandra/impl/SslContext.h>
|
||||
#include <backend/cassandra/impl/Statement.h>
|
||||
#include <data/cassandra/impl/Cluster.h>
|
||||
#include <data/cassandra/impl/SslContext.h>
|
||||
#include <data/cassandra/impl/Statement.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <fmt/core.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
|
||||
template <class... Ts>
|
||||
struct overloadSet : Ts...
|
||||
{
|
||||
struct overloadSet : Ts... {
|
||||
using Ts::operator()...;
|
||||
};
|
||||
|
||||
@@ -39,53 +40,46 @@ template <class... Ts>
|
||||
overloadSet(Ts...) -> overloadSet<Ts...>;
|
||||
}; // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), clusterDeleter}
|
||||
{
|
||||
using std::to_string;
|
||||
|
||||
cass_cluster_set_token_aware_routing(*this, cass_true);
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(std::string{"Error setting cassandra protocol version to v4: "} + cass_error_desc(rc));
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK) {
|
||||
throw std::runtime_error(
|
||||
std::string{"Error setting cassandra io threads to "} + to_string(settings.threads) + ": " +
|
||||
cass_error_desc(rc));
|
||||
fmt::format("Error setting cassandra io threads to {}: {}", settings.threads, cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
|
||||
cass_log_set_level(settings.enableLog ? CASS_LOG_TRACE : CASS_LOG_DISABLED);
|
||||
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
// TODO: other options to experiment with and consider later:
|
||||
// cass_cluster_set_max_concurrent_requests_threshold(*this, 10000);
|
||||
// cass_cluster_set_queue_size_event(*this, 100000);
|
||||
// cass_cluster_set_queue_size_io(*this, 100000);
|
||||
// cass_cluster_set_write_bytes_high_water_mark(*this, 16 * 1024 * 1024); // 16mb
|
||||
// cass_cluster_set_write_bytes_low_water_mark(*this, 8 * 1024 * 1024); // half of allowance
|
||||
// cass_cluster_set_pending_requests_high_water_mark(*this, 5000);
|
||||
// cass_cluster_set_pending_requests_low_water_mark(*this, 2500); // half
|
||||
// cass_cluster_set_max_requests_per_flush(*this, 1000);
|
||||
// cass_cluster_set_max_concurrent_creation(*this, 8);
|
||||
// cass_cluster_set_max_connections_per_host(*this, 6);
|
||||
// cass_cluster_set_core_connections_per_host(*this, 4);
|
||||
// cass_cluster_set_constant_speculative_execution_policy(*this, 1000, 1024);
|
||||
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
|
||||
rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(
|
||||
*this, settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
|
||||
rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error(std::string{"Could not set queue size for IO per host: "} + cass_error_desc(rc));
|
||||
auto const queueSize =
|
||||
settings.queueSizeIO.value_or(settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
|
||||
if (auto const rc = cass_cluster_set_queue_size_io(*this, queueSize); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Could not set queue size for IO per host: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
setupConnection(settings);
|
||||
setupCertificate(settings);
|
||||
setupCredentials(settings);
|
||||
|
||||
LOG(log_.info()) << "Threads: " << settings.threads;
|
||||
LOG(log_.info()) << "Core connections per host: " << settings.coreConnectionsPerHost;
|
||||
LOG(log_.info()) << "IO queue size: " << queueSize;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -95,7 +89,8 @@ Cluster::setupConnection(Settings const& settings)
|
||||
overloadSet{
|
||||
[this](Settings::ContactPoints const& points) { setupContactPoints(points); },
|
||||
[this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }},
|
||||
settings.connectionInfo);
|
||||
settings.connectionInfo
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -103,18 +98,20 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string const& label, std::string const& value) {
|
||||
if (rc != CASS_OK)
|
||||
throw std::runtime_error("Cassandra: Error setting " + label + " [" + value + "]: " + cass_error_desc(rc));
|
||||
if (rc != CASS_OK) {
|
||||
throw std::runtime_error(
|
||||
fmt::format("Cassandra: Error setting {} [{}]: {}", label, value, cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
{
|
||||
log_.debug() << "Attempt connection using contact points: " << points.contactPoints;
|
||||
LOG(log_.debug()) << "Attempt connection using contact points: " << points.contactPoints;
|
||||
auto const rc = cass_cluster_set_contact_points(*this, points.contactPoints.data());
|
||||
throwErrorIfNeeded(rc, "contact_points", points.contactPoints);
|
||||
}
|
||||
|
||||
if (points.port)
|
||||
{
|
||||
if (points.port) {
|
||||
auto const rc = cass_cluster_set_port(*this, points.port.value());
|
||||
throwErrorIfNeeded(rc, "port", to_string(points.port.value()));
|
||||
}
|
||||
@@ -123,10 +120,9 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
|
||||
void
|
||||
Cluster::setupSecureBundle(Settings::SecureConnectionBundle const& bundle)
|
||||
{
|
||||
log_.debug() << "Attempt connection using secure bundle";
|
||||
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK)
|
||||
{
|
||||
throw std::runtime_error("Failed to connect using secure connection bundle" + bundle.bundle);
|
||||
LOG(log_.debug()) << "Attempt connection using secure bundle";
|
||||
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK) {
|
||||
throw std::runtime_error("Failed to connect using secure connection bundle " + bundle.bundle);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,8 +132,8 @@ Cluster::setupCertificate(Settings const& settings)
|
||||
if (not settings.certificate)
|
||||
return;
|
||||
|
||||
log_.debug() << "Configure SSL context";
|
||||
SslContext context = SslContext(*settings.certificate);
|
||||
LOG(log_.debug()) << "Configure SSL context";
|
||||
SslContext const context = SslContext(*settings.certificate);
|
||||
cass_cluster_set_ssl(*this, context);
|
||||
}
|
||||
|
||||
@@ -147,8 +143,8 @@ Cluster::setupCredentials(Settings const& settings)
|
||||
if (not settings.username || not settings.password)
|
||||
return;
|
||||
|
||||
log_.debug() << "Set credentials; username: " << settings.username.value();
|
||||
LOG(log_.debug()) << "Set credentials; username: " << settings.username.value();
|
||||
cass_cluster_set_credentials(*this, settings.username.value().c_str(), settings.password.value().c_str());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,8 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
@@ -31,32 +31,71 @@
|
||||
#include <thread>
|
||||
#include <variant>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Settings
|
||||
{
|
||||
struct ContactPoints
|
||||
{
|
||||
// TODO: move Settings to public interface, not detail
|
||||
|
||||
/**
|
||||
* @brief Bundles all cassandra settings in one place.
|
||||
*/
|
||||
struct Settings {
|
||||
static constexpr std::size_t DEFAULT_CONNECTION_TIMEOUT = 10000;
|
||||
static constexpr uint32_t DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t DEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
/**
|
||||
* @brief Represents the configuration of contact points for cassandra.
|
||||
*/
|
||||
struct ContactPoints {
|
||||
std::string contactPoints = "127.0.0.1"; // defaults to localhost
|
||||
std::optional<uint16_t> port;
|
||||
std::optional<uint16_t> port = {};
|
||||
};
|
||||
|
||||
struct SecureConnectionBundle
|
||||
{
|
||||
/**
|
||||
* @brief Represents the configuration of a secure connection bundle.
|
||||
*/
|
||||
struct SecureConnectionBundle {
|
||||
std::string bundle; // no meaningful default
|
||||
};
|
||||
|
||||
/** @brief Enables or disables cassandra driver logger */
|
||||
bool enableLog = false;
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{10000};
|
||||
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
|
||||
std::variant<ContactPoints, SecureConnectionBundle> connectionInfo = ContactPoints{};
|
||||
uint32_t threads = std::thread::hardware_concurrency();
|
||||
uint32_t maxWriteRequestsOutstanding = 10'000;
|
||||
uint32_t maxReadRequestsOutstanding = 100'000;
|
||||
std::optional<std::string> certificate; // ssl context
|
||||
std::optional<std::string> username;
|
||||
std::optional<std::string> password;
|
||||
|
||||
/** @brief Connect timeout specified in milliseconds */
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{DEFAULT_CONNECTION_TIMEOUT};
|
||||
|
||||
/** @brief Request timeout specified in milliseconds */
|
||||
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
|
||||
|
||||
/** @brief Connection information; either ContactPoints or SecureConnectionBundle */
|
||||
std::variant<ContactPoints, SecureConnectionBundle> connectionInfo = ContactPoints{};
|
||||
|
||||
/** @brief The number of threads for the driver to pool */
|
||||
uint32_t threads = std::thread::hardware_concurrency();
|
||||
|
||||
/** @brief The maximum number of outstanding write requests at any given moment */
|
||||
uint32_t maxWriteRequestsOutstanding = DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The maximum number of outstanding read requests at any given moment */
|
||||
uint32_t maxReadRequestsOutstanding = DEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The number of connection per host to always have active */
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO{};
|
||||
|
||||
/** @brief SSL certificate */
|
||||
std::optional<std::string> certificate{}; // ssl context
|
||||
|
||||
/** @brief Username/login */
|
||||
std::optional<std::string> username{};
|
||||
|
||||
/** @brief Password to match the `username` */
|
||||
std::optional<std::string> password{};
|
||||
|
||||
/**
|
||||
* @brief Creates a new Settings object as a copy of the current one with overridden contact points.
|
||||
*/
|
||||
Settings
|
||||
withContactPoints(std::string_view contactPoints)
|
||||
{
|
||||
@@ -65,6 +104,9 @@ struct Settings
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the default settings.
|
||||
*/
|
||||
static Settings
|
||||
defaultSettings()
|
||||
{
|
||||
@@ -72,9 +114,8 @@ struct Settings
|
||||
}
|
||||
};
|
||||
|
||||
class Cluster : public ManagedObject<CassCluster>
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
class Cluster : public ManagedObject<CassCluster> {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
public:
|
||||
Cluster(Settings const& settings);
|
||||
@@ -96,4 +137,4 @@ private:
|
||||
setupCredentials(Settings const& settings);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
87
src/data/cassandra/impl/Collection.h
Normal file
87
src/data/cassandra/impl/Collection.h
Normal file
@@ -0,0 +1,87 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Collection : public ManagedObject<CassCollection> {
|
||||
static constexpr auto deleter = [](CassCollection* ptr) { cass_collection_free(ptr); };
|
||||
|
||||
static void
|
||||
throwErrorIfNeeded(CassError const rc, std::string_view const label)
|
||||
{
|
||||
if (rc == CASS_OK)
|
||||
return;
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
|
||||
public:
|
||||
/* implicit */ Collection(CassCollection* ptr);
|
||||
|
||||
template <typename Type>
|
||||
explicit Collection(std::vector<Type> const& value)
|
||||
: ManagedObject{cass_collection_new(CASS_COLLECTION_TYPE_LIST, value.size()), deleter}
|
||||
{
|
||||
bind(value);
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
void
|
||||
bind(std::vector<Type> const& values) const
|
||||
{
|
||||
for (auto const& value : values)
|
||||
append(value);
|
||||
}
|
||||
|
||||
void
|
||||
append(bool const value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_bool(*this, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
|
||||
void
|
||||
append(int64_t const value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_int64(*this, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
|
||||
void
|
||||
append(ripple::uint256 const& value) const
|
||||
{
|
||||
auto const rc = cass_collection_append_bytes(
|
||||
*this,
|
||||
static_cast<cass_byte_t const*>(static_cast<unsigned char const*>(value.data())),
|
||||
ripple::uint256::size()
|
||||
);
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
}
|
||||
};
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,13 +19,15 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/AsyncExecutor.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/BackendCounters.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/AsyncExecutor.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio/async_result.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <atomic>
|
||||
@@ -36,19 +38,19 @@
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
// TODO: this could probably be also moved out of detail and into the main cassandra namespace.
|
||||
|
||||
/**
|
||||
* @brief Implements async and sync querying against the cassandra DB with
|
||||
* support for throttling.
|
||||
* @brief Implements async and sync querying against the cassandra DB with support for throttling.
|
||||
*
|
||||
* Note: A lot of the code that uses yield is repeated below. This is ok for now
|
||||
* because we are hopefully going to be getting rid of it entirely later on.
|
||||
* Note: A lot of the code that uses yield is repeated below.
|
||||
* This is ok for now because we are hopefully going to be getting rid of it entirely later on.
|
||||
*/
|
||||
template <typename HandleType = Handle>
|
||||
class DefaultExecutionStrategy
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
template <typename HandleType = Handle, SomeBackendCounters BackendCountersType = BackendCounters>
|
||||
class DefaultExecutionStrategy {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
std::uint32_t maxWriteRequestsOutstanding_;
|
||||
std::atomic_uint32_t numWriteRequestsOutstanding_ = 0;
|
||||
@@ -68,6 +70,8 @@ class DefaultExecutionStrategy
|
||||
std::reference_wrapper<HandleType const> handle_;
|
||||
std::thread thread_;
|
||||
|
||||
typename BackendCountersType::PtrType counters_;
|
||||
|
||||
public:
|
||||
using ResultOrErrorType = typename HandleType::ResultOrErrorType;
|
||||
using StatementType = typename HandleType::StatementType;
|
||||
@@ -75,20 +79,25 @@ public:
|
||||
using FutureType = typename HandleType::FutureType;
|
||||
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
|
||||
using ResultType = typename HandleType::ResultType;
|
||||
|
||||
using CompletionTokenType = boost::asio::yield_context;
|
||||
using FunctionType = void(boost::system::error_code);
|
||||
using AsyncResultType = boost::asio::async_result<CompletionTokenType, FunctionType>;
|
||||
using HandlerType = typename AsyncResultType::completion_handler_type;
|
||||
|
||||
DefaultExecutionStrategy(Settings settings, HandleType const& handle)
|
||||
/**
|
||||
* @param settings The settings to use
|
||||
* @param handle A handle to the cassandra database
|
||||
*/
|
||||
DefaultExecutionStrategy(
|
||||
Settings const& settings,
|
||||
HandleType const& handle,
|
||||
typename BackendCountersType::PtrType counters = BackendCountersType::make()
|
||||
)
|
||||
: maxWriteRequestsOutstanding_{settings.maxWriteRequestsOutstanding}
|
||||
, maxReadRequestsOutstanding_{settings.maxReadRequestsOutstanding}
|
||||
, work_{ioc_}
|
||||
, handle_{std::cref(handle)}
|
||||
, thread_{[this]() { ioc_.run(); }}
|
||||
, counters_{std::move(counters)}
|
||||
{
|
||||
log_.info() << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
|
||||
LOG(log_.info()) << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
|
||||
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
|
||||
}
|
||||
|
||||
@@ -100,47 +109,52 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Wait for all async writes to finish before unblocking
|
||||
* @brief Wait for all async writes to finish before unblocking.
|
||||
*/
|
||||
void
|
||||
sync()
|
||||
{
|
||||
log_.debug() << "Waiting to sync all writes...";
|
||||
LOG(log_.debug()) << "Waiting to sync all writes...";
|
||||
std::unique_lock<std::mutex> lck(syncMutex_);
|
||||
syncCv_.wait(lck, [this]() { return finishedAllWriteRequests(); });
|
||||
log_.debug() << "Sync done.";
|
||||
}
|
||||
|
||||
bool
|
||||
isTooBusy() const
|
||||
{
|
||||
return numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
LOG(log_.debug()) << "Sync done.";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Blocking query execution used for writing data
|
||||
* @return true if outstanding read requests allowance is exhausted; false otherwise
|
||||
*/
|
||||
bool
|
||||
isTooBusy() const
|
||||
{
|
||||
bool const result = numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
|
||||
if (result)
|
||||
counters_->registerTooBusy();
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever sleeping for 5 milliseconds between attempts.
|
||||
*/
|
||||
ResultOrErrorType
|
||||
writeSync(StatementType const& statement)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
if (auto res = handle_.get().execute(statement); res)
|
||||
{
|
||||
counters_->registerWriteSync();
|
||||
while (true) {
|
||||
auto res = handle_.get().execute(statement);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Cassandra sync write error, retrying: " << res.error();
|
||||
|
||||
counters_->registerWriteSyncRetry();
|
||||
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Blocking query execution used for writing data
|
||||
* @brief Blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever sleeping for 5 milliseconds between attempts.
|
||||
*/
|
||||
@@ -152,11 +166,11 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data
|
||||
* @brief Non-blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor
|
||||
*
|
||||
* @param prepradeStatement Statement to prepare and execute
|
||||
* @param preparedStatement Statement to prepare and execute
|
||||
* @param args Args to bind to the prepared statement
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
@@ -167,13 +181,23 @@ public:
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statement)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statement), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
ioc_,
|
||||
handle_,
|
||||
std::move(statement),
|
||||
[this](auto const&) {
|
||||
decrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteFinished();
|
||||
},
|
||||
[this]() { counters_->registerWriteRetry(); }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking batched query execution used for writing data
|
||||
* @brief Non-blocking batched query execution used for writing data.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor.
|
||||
*
|
||||
@@ -188,9 +212,18 @@ public:
|
||||
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
// Note: lifetime is controlled by std::shared_from_this internally
|
||||
AsyncExecutor<std::decay_t<decltype(statements)>, HandleType>::run(
|
||||
ioc_, handle_, std::move(statements), [this](auto const&) { decrementOutstandingRequestCount(); });
|
||||
ioc_,
|
||||
handle_,
|
||||
std::move(statements),
|
||||
[this](auto const&) {
|
||||
decrementOutstandingRequestCount();
|
||||
counters_->registerWriteFinished();
|
||||
},
|
||||
[this]() { counters_->registerWriteRetry(); }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -199,7 +232,7 @@ public:
|
||||
* Retries forever until successful or throws an exception on timeout.
|
||||
*
|
||||
* @param token Completion token (yield_context)
|
||||
* @param prepradeStatement Statement to prepare and execute
|
||||
* @param preparedStatement Statement to prepare and execute
|
||||
* @param args Args to bind to the prepared statement
|
||||
* @throw DatabaseTimeout on timeout
|
||||
* @return ResultType or error wrapped in Expected
|
||||
@@ -224,37 +257,43 @@ public:
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
auto const numStatements = statements.size();
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
counters_->registerReadStarted(numStatements);
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
numReadRequestsOutstanding_ += numStatements;
|
||||
|
||||
auto const future = handle_.get().asyncExecute(statements, [handler](auto&&) mutable {
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
});
|
||||
});
|
||||
auto init = [this, &statements, &future]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
future.emplace(handle_.get().asyncExecute(statements, [sself](auto&& res) mutable {
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself),
|
||||
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); }
|
||||
);
|
||||
}));
|
||||
};
|
||||
|
||||
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
numReadRequestsOutstanding_ -= numStatements;
|
||||
|
||||
// it's safe to call blocking get on future here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
if (auto res = future.get(); res)
|
||||
{
|
||||
if (res) {
|
||||
counters_->registerReadFinished(numStatements);
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed batch read in coroutine: " << res.error();
|
||||
|
||||
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
|
||||
try {
|
||||
throwErrorIfNeeded(res.error());
|
||||
} catch (...) {
|
||||
counters_->registerReadError(numStatements);
|
||||
throw;
|
||||
}
|
||||
counters_->registerReadRetry(numStatements);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,36 +310,41 @@ public:
|
||||
[[maybe_unused]] ResultOrErrorType
|
||||
read(CompletionTokenType token, StatementType const& statement)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
std::optional<FutureWithCallbackType> future;
|
||||
counters_->registerReadStarted();
|
||||
|
||||
// todo: perhaps use policy instead
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
++numReadRequestsOutstanding_;
|
||||
auto init = [this, &statement, &future]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
|
||||
auto const future = handle_.get().asyncExecute(statement, [handler](auto const&) mutable {
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
});
|
||||
});
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
future.emplace(handle_.get().asyncExecute(statement, [sself](auto&& res) mutable {
|
||||
boost::asio::post(
|
||||
boost::asio::get_associated_executor(*sself),
|
||||
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); }
|
||||
);
|
||||
}));
|
||||
};
|
||||
|
||||
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
--numReadRequestsOutstanding_;
|
||||
|
||||
// it's safe to call blocking get on future here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
if (auto res = future.get(); res)
|
||||
{
|
||||
if (res) {
|
||||
counters_->registerReadFinished();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed read in coroutine: " << res.error();
|
||||
|
||||
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
|
||||
try {
|
||||
throwErrorIfNeeded(res.error());
|
||||
} catch (...) {
|
||||
counters_->registerReadError();
|
||||
throw;
|
||||
}
|
||||
counters_->registerReadRetry();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,26 +362,26 @@ public:
|
||||
std::vector<ResultType>
|
||||
readEach(CompletionTokenType token, std::vector<StatementType> const& statements)
|
||||
{
|
||||
auto handler = HandlerType{token};
|
||||
auto result = AsyncResultType{handler};
|
||||
|
||||
std::atomic_bool hadError = false;
|
||||
std::atomic_uint64_t errorsCount = 0u;
|
||||
std::atomic_int numOutstanding = statements.size();
|
||||
numReadRequestsOutstanding_ += statements.size();
|
||||
|
||||
auto futures = std::vector<FutureWithCallbackType>{};
|
||||
futures.reserve(numOutstanding);
|
||||
counters_->registerReadStarted(statements.size());
|
||||
|
||||
// used as the handler for each async statement individually
|
||||
auto executionHandler = [handler, &hadError, &numOutstanding](auto const& res) mutable {
|
||||
auto init = [this, &statements, &futures, &errorsCount, &numOutstanding]<typename Self>(Self& self) {
|
||||
auto sself = std::make_shared<Self>(std::move(self));
|
||||
auto executionHandler = [&errorsCount, &numOutstanding, sself](auto const& res) mutable {
|
||||
if (not res)
|
||||
hadError = true;
|
||||
++errorsCount;
|
||||
|
||||
// when all async operations complete unblock the result
|
||||
if (--numOutstanding == 0)
|
||||
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
|
||||
handler(boost::system::error_code{});
|
||||
if (--numOutstanding == 0) {
|
||||
boost::asio::post(boost::asio::get_associated_executor(*sself), [sself]() mutable {
|
||||
sself->complete();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
std::transform(
|
||||
@@ -346,21 +390,27 @@ public:
|
||||
std::back_inserter(futures),
|
||||
[this, &executionHandler](auto const& statement) {
|
||||
return handle_.get().asyncExecute(statement, executionHandler);
|
||||
});
|
||||
|
||||
// suspend coroutine until completion handler is called
|
||||
result.get();
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
boost::asio::async_compose<CompletionTokenType, void()>(
|
||||
init, token, boost::asio::get_associated_executor(token)
|
||||
);
|
||||
numReadRequestsOutstanding_ -= statements.size();
|
||||
|
||||
if (hadError)
|
||||
if (errorsCount > 0) {
|
||||
assert(errorsCount <= statements.size());
|
||||
counters_->registerReadError(errorsCount);
|
||||
counters_->registerReadFinished(statements.size() - errorsCount);
|
||||
throw DatabaseTimeout{};
|
||||
}
|
||||
counters_->registerReadFinished(statements.size());
|
||||
|
||||
std::vector<ResultType> results;
|
||||
results.reserve(futures.size());
|
||||
|
||||
// it's safe to call blocking get on futures here as we already
|
||||
// waited for the coroutine to resume above.
|
||||
// it's safe to call blocking get on futures here as we already waited for the coroutine to resume above.
|
||||
std::transform(
|
||||
std::make_move_iterator(std::begin(futures)),
|
||||
std::make_move_iterator(std::end(futures)),
|
||||
@@ -369,22 +419,31 @@ public:
|
||||
auto entry = future.get();
|
||||
auto&& res = entry.value();
|
||||
return std::move(res);
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
assert(futures.size() == statements.size());
|
||||
assert(results.size() == statements.size());
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get statistics about the backend.
|
||||
*/
|
||||
boost::json::object
|
||||
stats() const
|
||||
{
|
||||
return counters_->report();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
incrementOutstandingRequestCount()
|
||||
{
|
||||
{
|
||||
std::unique_lock<std::mutex> lck(throttleMutex_);
|
||||
if (!canAddWriteRequest())
|
||||
{
|
||||
log_.trace() << "Max outstanding requests reached. "
|
||||
if (!canAddWriteRequest()) {
|
||||
LOG(log_.trace()) << "Max outstanding requests reached. "
|
||||
<< "Waiting for other requests to finish";
|
||||
throttleCv_.wait(lck, [this]() { return canAddWriteRequest(); });
|
||||
}
|
||||
@@ -396,23 +455,21 @@ private:
|
||||
decrementOutstandingRequestCount()
|
||||
{
|
||||
// sanity check
|
||||
if (numWriteRequestsOutstanding_ == 0)
|
||||
{
|
||||
if (numWriteRequestsOutstanding_ == 0) {
|
||||
assert(false);
|
||||
throw std::runtime_error("decrementing num outstanding below 0");
|
||||
}
|
||||
size_t cur = (--numWriteRequestsOutstanding_);
|
||||
size_t const cur = (--numWriteRequestsOutstanding_);
|
||||
{
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
std::lock_guard lck(throttleMutex_);
|
||||
std::lock_guard const lck(throttleMutex_);
|
||||
throttleCv_.notify_one();
|
||||
}
|
||||
if (cur == 0)
|
||||
{
|
||||
if (cur == 0) {
|
||||
// mutex lock required to prevent race condition around spurious
|
||||
// wakeup
|
||||
std::lock_guard lck(syncMutex_);
|
||||
std::lock_guard const lck(syncMutex_);
|
||||
syncCv_.notify_one();
|
||||
}
|
||||
}
|
||||
@@ -440,4 +497,4 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,18 +17,18 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/Error.h>
|
||||
#include <backend/cassandra/impl/Future.h>
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
#include <data/cassandra/Error.h>
|
||||
#include <data/cassandra/impl/Future.h>
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
static constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, futureDeleter}
|
||||
{
|
||||
@@ -37,11 +37,10 @@ namespace Backend::Cassandra::detail {
|
||||
MaybeError
|
||||
Future::await() const
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc)
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc) {
|
||||
auto errMsg = [this](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(*this, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}(cass_error_desc(rc));
|
||||
@@ -53,45 +52,42 @@ Future::await() const
|
||||
ResultOrError
|
||||
Future::get() const
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc)
|
||||
{
|
||||
if (auto const rc = cass_future_error_code(*this); rc) {
|
||||
auto const errMsg = [this](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(*this, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("future::get()");
|
||||
return Error{CassandraError{errMsg, rc}};
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
return Result{cass_future_get_result(*this)};
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
invokeHelper(CassFuture* ptr, void* cbPtr)
|
||||
{
|
||||
// Note: can't use Future{ptr}.get() because double free will occur :/
|
||||
auto* cb = static_cast<FutureWithCallback::fn_t*>(cbPtr);
|
||||
if (auto const rc = cass_future_error_code(ptr); rc)
|
||||
{
|
||||
// Note2: we are moving/copying it locally as a workaround for an issue we are seeing from asio recently.
|
||||
// stackoverflow.com/questions/77004137/boost-asio-async-compose-gets-stuck-under-load
|
||||
auto* cb = static_cast<FutureWithCallback::FnType*>(cbPtr);
|
||||
auto local = std::make_unique<FutureWithCallback::FnType>(std::move(*cb));
|
||||
if (auto const rc = cass_future_error_code(ptr); rc) {
|
||||
auto const errMsg = [&ptr](std::string const& label) {
|
||||
char const* message;
|
||||
std::size_t len;
|
||||
char const* message = nullptr;
|
||||
std::size_t len = 0;
|
||||
cass_future_error_message(ptr, &message, &len);
|
||||
return label + ": " + std::string{message, len};
|
||||
}("invokeHelper");
|
||||
(*cb)(Error{CassandraError{errMsg, rc}});
|
||||
}
|
||||
else
|
||||
{
|
||||
(*cb)(Result{cass_future_get_result(ptr)});
|
||||
(*local)(Error{CassandraError{errMsg, rc}});
|
||||
} else {
|
||||
(*local)(Result{cass_future_get_result(ptr)});
|
||||
}
|
||||
}
|
||||
|
||||
/* implicit */ FutureWithCallback::FutureWithCallback(CassFuture* ptr, fn_t&& cb)
|
||||
: Future{ptr}, cb_{std::make_unique<fn_t>(std::move(cb))}
|
||||
/* implicit */ FutureWithCallback::FutureWithCallback(CassFuture* ptr, FnType&& cb)
|
||||
: Future{ptr}, cb_{std::make_unique<FnType>(std::move(cb))}
|
||||
{
|
||||
// Instead of passing `this` as the userdata void*, we pass the address of
|
||||
// the callback itself which will survive std::move of the
|
||||
@@ -99,4 +95,4 @@ invokeHelper(CassFuture* ptr, void* cbPtr)
|
||||
cass_future_set_callback(*this, &invokeHelper, cb_.get());
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,15 +19,14 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct Future : public ManagedObject<CassFuture>
|
||||
{
|
||||
struct Future : public ManagedObject<CassFuture> {
|
||||
/* implicit */ Future(CassFuture* ptr);
|
||||
|
||||
MaybeError
|
||||
@@ -38,21 +37,20 @@ struct Future : public ManagedObject<CassFuture>
|
||||
};
|
||||
|
||||
void
|
||||
invokeHelper(CassFuture* ptr, void* self);
|
||||
invokeHelper(CassFuture* ptr, void* cbPtr);
|
||||
|
||||
class FutureWithCallback : public Future
|
||||
{
|
||||
class FutureWithCallback : public Future {
|
||||
public:
|
||||
using fn_t = std::function<void(ResultOrError)>;
|
||||
using fn_ptr_t = std::unique_ptr<fn_t>;
|
||||
using FnType = std::function<void(ResultOrError)>;
|
||||
using FnPtrType = std::unique_ptr<FnType>;
|
||||
|
||||
/* implicit */ FutureWithCallback(CassFuture* ptr, fn_t&& cb);
|
||||
/* implicit */ FutureWithCallback(CassFuture* ptr, FnType&& cb);
|
||||
FutureWithCallback(FutureWithCallback const&) = delete;
|
||||
FutureWithCallback(FutureWithCallback&&) = default;
|
||||
|
||||
private:
|
||||
/*! Wrapped in a unique_ptr so it can survive std::move :/ */
|
||||
fn_ptr_t cb_;
|
||||
/** Wrapped in a unique_ptr so it can survive std::move :/ */
|
||||
FnPtrType cb_;
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -21,11 +21,10 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
template <typename Managed>
|
||||
class ManagedObject
|
||||
{
|
||||
class ManagedObject {
|
||||
protected:
|
||||
std::unique_ptr<Managed, void (*)(Managed*)> ptr_;
|
||||
|
||||
@@ -36,12 +35,11 @@ public:
|
||||
if (rawPtr == nullptr)
|
||||
throw std::runtime_error("Could not create DB object - got nullptr");
|
||||
}
|
||||
ManagedObject(ManagedObject&&) = default;
|
||||
|
||||
operator Managed* const() const
|
||||
operator Managed*() const
|
||||
{
|
||||
return ptr_.get();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,14 +17,14 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Result.h>
|
||||
#include <data/cassandra/impl/Result.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
static constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/* implicit */ Result::Result(CassResult const* ptr) : ManagedObject{ptr, resultDeleter}
|
||||
{
|
||||
@@ -43,7 +43,7 @@ Result::hasRows() const
|
||||
}
|
||||
|
||||
/* implicit */ ResultIterator::ResultIterator(CassIterator* ptr)
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr)}
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr) != 0u}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ ResultIterator::fromResult(Result const& result)
|
||||
[[maybe_unused]] bool
|
||||
ResultIterator::moveForward()
|
||||
{
|
||||
hasMore_ = cass_iterator_next(*this);
|
||||
hasMore_ = (cass_iterator_next(*this) != 0u);
|
||||
return hasMore_;
|
||||
}
|
||||
|
||||
@@ -66,4 +66,4 @@ ResultIterator::hasMore() const
|
||||
return hasMore_;
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,8 +19,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
@@ -31,7 +31,7 @@
|
||||
#include <iterator>
|
||||
#include <tuple>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
@@ -44,80 +44,64 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
Type output;
|
||||
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using uint_tuple_t = std::tuple<uint32_t, uint32_t>;
|
||||
using uchar_vector_t = std::vector<unsigned char>;
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
using UintTupleType = std::tuple<uint32_t, uint32_t>;
|
||||
using UCharVectorType = std::vector<unsigned char>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, ripple::uint256>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract ripple::uint256");
|
||||
output = ripple::uint256::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, ripple::AccountID>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract ripple::AccountID");
|
||||
output = ripple::AccountID::fromVoid(buf);
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uchar_vector_t>)
|
||||
{
|
||||
cass_byte_t const* buf;
|
||||
std::size_t bufSize;
|
||||
} else if constexpr (std::is_same_v<DecayedType, UCharVectorType>) {
|
||||
cass_byte_t const* buf = nullptr;
|
||||
std::size_t bufSize = 0;
|
||||
auto const rc = cass_value_get_bytes(cass_row_get_column(row, idx), &buf, &bufSize);
|
||||
throwErrorIfNeeded(rc, "Extract vector<unsigned char>");
|
||||
output = uchar_vector_t{buf, buf + bufSize};
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uint_tuple_t>)
|
||||
{
|
||||
output = UCharVectorType{buf, buf + bufSize};
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType>) {
|
||||
auto const* tuple = cass_row_get_column(row, idx);
|
||||
output = TupleIterator::fromTuple(tuple).extract<uint32_t, uint32_t>();
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<decayed_t, std::string>)
|
||||
{
|
||||
char const* value;
|
||||
std::size_t len;
|
||||
} else if constexpr (std::is_convertible_v<DecayedType, std::string>) {
|
||||
char const* value = nullptr;
|
||||
std::size_t len = 0;
|
||||
auto const rc = cass_value_get_string(cass_row_get_column(row, idx), &value, &len);
|
||||
throwErrorIfNeeded(rc, "Extract string");
|
||||
output = std::string{value, len};
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
{
|
||||
cass_bool_t flag;
|
||||
} else if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
cass_bool_t flag = cass_bool_t::cass_false;
|
||||
auto const rc = cass_value_get_bool(cass_row_get_column(row, idx), &flag);
|
||||
throwErrorIfNeeded(rc, "Extract bool");
|
||||
output = flag ? true : false;
|
||||
output = flag != cass_bool_t::cass_false;
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
int64_t out = 0;
|
||||
auto const rc = cass_value_get_int64(cass_row_get_column(row, idx), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64");
|
||||
output = static_cast<decayed_t>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
output = static_cast<DecayedType>(out);
|
||||
} else {
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
struct Result : public ManagedObject<CassResult const>
|
||||
{
|
||||
struct Result : public ManagedObject<CassResult const> {
|
||||
/* implicit */ Result(CassResult const* ptr);
|
||||
|
||||
[[nodiscard]] std::size_t
|
||||
@@ -128,7 +112,8 @@ struct Result : public ManagedObject<CassResult const>
|
||||
|
||||
template <typename... RowTypes>
|
||||
std::optional<std::tuple<RowTypes...>>
|
||||
get() const requires(std::tuple_size<std::tuple<RowTypes...>>{} > 1)
|
||||
get() const
|
||||
requires(std::tuple_size<std::tuple<RowTypes...>>{} > 1)
|
||||
{
|
||||
// row managed internally by cassandra driver, hence no ManagedObject.
|
||||
auto const* row = cass_result_first_row(*this);
|
||||
@@ -153,8 +138,7 @@ struct Result : public ManagedObject<CassResult const>
|
||||
}
|
||||
};
|
||||
|
||||
class ResultIterator : public ManagedObject<CassIterator>
|
||||
{
|
||||
class ResultIterator : public ManagedObject<CassIterator> {
|
||||
bool hasMore_ = false;
|
||||
|
||||
public:
|
||||
@@ -185,17 +169,13 @@ public:
|
||||
};
|
||||
|
||||
template <typename... Types>
|
||||
class ResultExtractor
|
||||
{
|
||||
class ResultExtractor {
|
||||
std::reference_wrapper<Result const> ref_;
|
||||
|
||||
public:
|
||||
struct Sentinel
|
||||
{
|
||||
};
|
||||
struct Sentinel {};
|
||||
|
||||
struct Iterator
|
||||
{
|
||||
struct Iterator {
|
||||
using iterator_category = std::input_iterator_tag;
|
||||
using difference_type = std::size_t; // rows count
|
||||
using value_type = std::tuple<Types...>;
|
||||
@@ -254,4 +234,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,10 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Handle.h>
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <log/Logger.h>
|
||||
#include <data/cassandra/Handle.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <util/Expected.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
@@ -30,14 +30,13 @@
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/**
|
||||
* @brief A retry policy that employs exponential backoff
|
||||
*/
|
||||
class ExponentialBackoffRetryPolicy
|
||||
{
|
||||
clio::Logger log_{"Backend"};
|
||||
class ExponentialBackoffRetryPolicy {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
boost::asio::steady_timer timer_;
|
||||
uint32_t attempt_ = 0u;
|
||||
@@ -46,7 +45,7 @@ public:
|
||||
/**
|
||||
* @brief Create a new retry policy instance with the io_context provided
|
||||
*/
|
||||
ExponentialBackoffRetryPolicy(boost::asio::io_context& ioc) : timer_{ioc}
|
||||
ExponentialBackoffRetryPolicy(boost::asio::io_context& ioc) : timer_{boost::asio::make_strand(ioc)}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -59,7 +58,7 @@ public:
|
||||
shouldRetry([[maybe_unused]] CassandraError err)
|
||||
{
|
||||
auto const delay = calculateDelay(attempt_);
|
||||
log_.error() << "Cassandra write error: " << err << ", current retries " << attempt_ << ", retrying in "
|
||||
LOG(log_.error()) << "Cassandra write error: " << err << ", current retries " << attempt_ << ", retrying in "
|
||||
<< delay.count() << " milliseconds";
|
||||
|
||||
return true; // keep retrying forever
|
||||
@@ -75,7 +74,7 @@ public:
|
||||
retry(Fn&& fn)
|
||||
{
|
||||
timer_.expires_after(calculateDelay(attempt_++));
|
||||
timer_.async_wait([fn = std::forward<Fn>(fn)]([[maybe_unused]] const auto& err) {
|
||||
timer_.async_wait([fn = std::forward<Fn>(fn)]([[maybe_unused]] auto const& err) {
|
||||
// todo: deal with cancellation (thru err)
|
||||
fn();
|
||||
});
|
||||
@@ -84,11 +83,11 @@ public:
|
||||
/**
|
||||
* @brief Calculates the wait time before attempting another retry
|
||||
*/
|
||||
std::chrono::milliseconds
|
||||
static std::chrono::milliseconds
|
||||
calculateDelay(uint32_t attempt)
|
||||
{
|
||||
return std::chrono::milliseconds{lround(std::pow(2, std::min(10u, attempt)))};
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,14 +19,13 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Session : public ManagedObject<CassSession>
|
||||
{
|
||||
class Session : public ManagedObject<CassSession> {
|
||||
static constexpr auto deleter = [](CassSession* ptr) { cass_session_free(ptr); };
|
||||
|
||||
public:
|
||||
@@ -35,4 +34,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,21 +17,20 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/SslContext.h>
|
||||
#include <data/cassandra/impl/SslContext.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), contextDeleter}
|
||||
{
|
||||
cass_ssl_set_verify_flags(*this, CASS_SSL_VERIFY_NONE);
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK)
|
||||
{
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK) {
|
||||
throw std::runtime_error(std::string{"Error setting Cassandra SSL Context: "} + cass_error_desc(rc));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,17 +19,16 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
struct SslContext : public ManagedObject<CassSsl>
|
||||
{
|
||||
struct SslContext : public ManagedObject<CassSsl> {
|
||||
explicit SslContext(std::string const& certificate);
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,9 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/Types.h>
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
#include <data/cassandra/Types.h>
|
||||
#include <data/cassandra/impl/Collection.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
#include <util/Expected.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
@@ -33,10 +34,9 @@
|
||||
#include <compare>
|
||||
#include <iterator>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Statement : public ManagedObject<CassStatement>
|
||||
{
|
||||
class Statement : public ManagedObject<CassStatement> {
|
||||
static constexpr auto deleter = [](CassStatement* ptr) { cass_statement_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
@@ -44,7 +44,7 @@ class Statement : public ManagedObject<CassStatement>
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new statement with optionally provided arguments
|
||||
* @brief Construct a new statement with optionally provided arguments.
|
||||
*
|
||||
* Note: it's up to the user to make sure the bound parameters match
|
||||
* the format of the query (e.g. amount of '?' matches count of args).
|
||||
@@ -64,16 +64,25 @@ public:
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
}
|
||||
|
||||
Statement(Statement&&) = default;
|
||||
|
||||
/**
|
||||
* @brief Binds the given arguments to the statement.
|
||||
*
|
||||
* @param args Arguments to bind
|
||||
*/
|
||||
template <typename... Args>
|
||||
void
|
||||
bind(Args&&... args) const
|
||||
{
|
||||
std::size_t idx = 0;
|
||||
std::size_t idx = 0; // NOLINT(misc-const-correctness)
|
||||
(this->bindAt<Args>(idx++, std::forward<Args>(args)), ...);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Binds an argument to a specific index.
|
||||
*
|
||||
* @param idx The index of the argument
|
||||
* @param value The value to bind it to
|
||||
*/
|
||||
template <typename Type>
|
||||
void
|
||||
bindAt(std::size_t const idx, Type&& value) const
|
||||
@@ -88,62 +97,55 @@ public:
|
||||
return cass_statement_bind_bytes(*this, idx, static_cast<cass_byte_t const*>(data), size);
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using uchar_vec_t = std::vector<unsigned char>;
|
||||
using uint_tuple_t = std::tuple<uint32_t, uint32_t>;
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
using UCharVectorType = std::vector<unsigned char>;
|
||||
using UintTupleType = std::tuple<uint32_t, uint32_t>;
|
||||
using UintByteTupleType = std::tuple<uint32_t, ripple::uint256>;
|
||||
using ByteVectorType = std::vector<ripple::uint256>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, ripple::uint256>)
|
||||
{
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, ripple::AccountID>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::AccountID");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uchar_vec_t>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, UCharVectorType>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind vector<unsigned char>");
|
||||
}
|
||||
else if constexpr (std::is_convertible_v<decayed_t, std::string>)
|
||||
{
|
||||
} else if constexpr (std::is_convertible_v<DecayedType, std::string>) {
|
||||
// reinterpret_cast is needed here :'(
|
||||
auto const rc = bindBytes(reinterpret_cast<unsigned char const*>(value.data()), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as bytes)");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, uint_tuple_t>)
|
||||
{
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::move(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32>");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType> || std::is_same_v<DecayedType, UintByteTupleType>) {
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::forward<Type>(value)});
|
||||
throwErrorIfNeeded(rc, "Bind tuple<uint32, uint32> or <uint32_t, ripple::uint256>");
|
||||
} else if constexpr (std::is_same_v<DecayedType, ByteVectorType>) {
|
||||
auto const rc = cass_statement_bind_collection(*this, idx, Collection{std::forward<Type>(value)});
|
||||
throwErrorIfNeeded(rc, "Bind collection");
|
||||
} else if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
auto const rc = cass_statement_bind_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
else if constexpr (std::is_same_v<decayed_t, Limit>)
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, Limit>) {
|
||||
auto const rc = cass_statement_bind_int32(*this, idx, value.limit);
|
||||
throwErrorIfNeeded(rc, "Bind limit (int32)");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
{
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
auto const rc = cass_statement_bind_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const>
|
||||
{
|
||||
/**
|
||||
* @brief Represents a prepared statement on the DB side.
|
||||
*
|
||||
* This is used to produce Statement objects that can be executed.
|
||||
*/
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const> {
|
||||
static constexpr auto deleter = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
|
||||
public:
|
||||
@@ -151,6 +153,12 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Bind the given arguments and produce a ready to execute Statement.
|
||||
*
|
||||
* @param args The arguments to bind
|
||||
* @return A bound and ready to execute Statement object
|
||||
*/
|
||||
template <typename... Args>
|
||||
Statement
|
||||
bind(Args&&... args) const
|
||||
@@ -161,4 +169,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,14 +17,14 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/cassandra/impl/Tuple.h>
|
||||
#include <data/cassandra/impl/Tuple.h>
|
||||
|
||||
namespace {
|
||||
static constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
static constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
/* implicit */ Tuple::Tuple(CassTuple* ptr) : ManagedObject{ptr, tupleDeleter}
|
||||
{
|
||||
@@ -40,4 +40,4 @@ TupleIterator::fromTuple(CassValue const* value)
|
||||
return {cass_iterator_from_tuple(value)};
|
||||
}
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -19,8 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/cassandra/impl/ManagedObject.h>
|
||||
#include <data/cassandra/impl/ManagedObject.h>
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <functional>
|
||||
@@ -28,10 +29,9 @@
|
||||
#include <string_view>
|
||||
#include <tuple>
|
||||
|
||||
namespace Backend::Cassandra::detail {
|
||||
namespace data::cassandra::detail {
|
||||
|
||||
class Tuple : public ManagedObject<CassTuple>
|
||||
{
|
||||
class Tuple : public ManagedObject<CassTuple> {
|
||||
static constexpr auto deleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
@@ -61,36 +61,38 @@ public:
|
||||
{
|
||||
using std::to_string;
|
||||
auto throwErrorIfNeeded = [idx](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + " at idx " + to_string(idx) + ": " + cass_error_desc(rc));
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
|
||||
if constexpr (std::is_same_v<decayed_t, bool>)
|
||||
{
|
||||
if constexpr (std::is_same_v<DecayedType, bool>) {
|
||||
auto const rc = cass_tuple_set_bool(*this, idx, value ? cass_true : cass_false);
|
||||
throwErrorIfNeeded(rc, "Bind bool");
|
||||
}
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
else if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
{
|
||||
else if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
auto const rc = cass_tuple_set_int64(*this, idx, value);
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
}
|
||||
else
|
||||
{
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
auto const rc = cass_tuple_set_bytes(
|
||||
*this,
|
||||
idx,
|
||||
static_cast<cass_byte_t const*>(static_cast<unsigned char const*>(value.data())),
|
||||
value.size()
|
||||
);
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
} else {
|
||||
// type not supported for binding
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class TupleIterator : public ManagedObject<CassIterator>
|
||||
{
|
||||
class TupleIterator : public ManagedObject<CassIterator> {
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
@@ -119,31 +121,27 @@ private:
|
||||
throw std::logic_error("Could not extract next value from tuple iterator");
|
||||
|
||||
auto throwErrorIfNeeded = [](CassError rc, std::string_view label) {
|
||||
if (rc != CASS_OK)
|
||||
{
|
||||
if (rc != CASS_OK) {
|
||||
auto const tag = '[' + std::string{label} + ']';
|
||||
throw std::logic_error(tag + ": " + cass_error_desc(rc));
|
||||
}
|
||||
};
|
||||
|
||||
using decayed_t = std::decay_t<Type>;
|
||||
using DecayedType = std::decay_t<Type>;
|
||||
|
||||
// clio only uses bigint (int64_t) so we convert any incoming type
|
||||
if constexpr (std::is_convertible_v<decayed_t, int64_t>)
|
||||
{
|
||||
int64_t out;
|
||||
if constexpr (std::is_convertible_v<DecayedType, int64_t>) {
|
||||
int64_t out = 0;
|
||||
auto const rc = cass_value_get_int64(cass_iterator_get_value(*this), &out);
|
||||
throwErrorIfNeeded(rc, "Extract int64 from tuple");
|
||||
output = static_cast<decayed_t>(out);
|
||||
}
|
||||
else
|
||||
{
|
||||
output = static_cast<DecayedType>(out);
|
||||
} else {
|
||||
// type not supported for extraction
|
||||
static_assert(unsupported_v<decayed_t>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Backend::Cassandra::detail
|
||||
} // namespace data::cassandra::detail
|
||||
@@ -17,6 +17,7 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
@@ -26,6 +27,7 @@
|
||||
#include <queue>
|
||||
#include <sstream>
|
||||
|
||||
namespace etl {
|
||||
/**
|
||||
* @brief This datastructure is used to keep track of the sequence of the most recent ledger validated by the network.
|
||||
*
|
||||
@@ -34,8 +36,7 @@
|
||||
* Any later calls to methods of this datastructure will not wait. Once the datastructure is stopped, the datastructure
|
||||
* remains stopped for the rest of its lifetime.
|
||||
*/
|
||||
class NetworkValidatedLedgers
|
||||
{
|
||||
class NetworkValidatedLedgers {
|
||||
// max sequence validated by network
|
||||
std::optional<uint32_t> max_;
|
||||
|
||||
@@ -43,6 +44,9 @@ class NetworkValidatedLedgers
|
||||
std::condition_variable cv_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief A factory function for NetworkValidatedLedgers.
|
||||
*/
|
||||
static std::shared_ptr<NetworkValidatedLedgers>
|
||||
make_ValidatedLedgers()
|
||||
{
|
||||
@@ -50,14 +54,14 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Notify the datastructure that idx has been validated by the network
|
||||
* @brief Notify the datastructure that idx has been validated by the network.
|
||||
*
|
||||
* @param idx sequence validated by network
|
||||
* @param idx Sequence validated by network
|
||||
*/
|
||||
void
|
||||
push(uint32_t idx)
|
||||
{
|
||||
std::lock_guard lck(m_);
|
||||
std::lock_guard const lck(m_);
|
||||
if (!max_ || idx > *max_)
|
||||
max_ = idx;
|
||||
cv_.notify_all();
|
||||
@@ -68,7 +72,7 @@ public:
|
||||
*
|
||||
* If no ledgers are known to have been validated, this function waits until the next ledger is validated
|
||||
*
|
||||
* @return sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
* @return Sequence of most recently validated ledger. empty optional if the datastructure has been stopped
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
getMostRecent()
|
||||
@@ -79,9 +83,9 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Waits for the sequence to be validated by the network
|
||||
* @brief Waits for the sequence to be validated by the network.
|
||||
*
|
||||
* @param sequence to wait for
|
||||
* @param sequence The sequence to wait for
|
||||
* @return true if sequence was validated, false otherwise a return value of false means the datastructure has been
|
||||
* stopped
|
||||
*/
|
||||
@@ -90,24 +94,24 @@ public:
|
||||
{
|
||||
std::unique_lock lck(m_);
|
||||
auto pred = [sequence, this]() -> bool { return (max_ && sequence <= *max_); };
|
||||
if (maxWaitMs)
|
||||
if (maxWaitMs) {
|
||||
cv_.wait_for(lck, std::chrono::milliseconds(*maxWaitMs));
|
||||
else
|
||||
} else {
|
||||
cv_.wait(lck, pred);
|
||||
}
|
||||
return pred();
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: does the note make sense? lockfree queues provide the same blocking behaviour just without mutex, don't they?
|
||||
/**
|
||||
* @brief Generic thread-safe queue with a max capacity
|
||||
* @brief Generic thread-safe queue with a max capacity.
|
||||
*
|
||||
* @note (original note) We can't use a lockfree queue here, since we need the ability to wait for an element to be
|
||||
* added or removed from the queue. These waits are blocking calls.
|
||||
*/
|
||||
template <class T>
|
||||
class ThreadSafeQueue
|
||||
{
|
||||
class ThreadSafeQueue {
|
||||
std::queue<T> queue_;
|
||||
|
||||
mutable std::mutex m_;
|
||||
@@ -116,21 +120,21 @@ class ThreadSafeQueue
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the queue
|
||||
* @brief Create an instance of the queue.
|
||||
*
|
||||
* @param maxSize maximum size of the queue. Calls that would cause the queue to exceed this size will block until
|
||||
* free space is available
|
||||
* free space is available.
|
||||
*/
|
||||
ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Push element onto the queue
|
||||
* @brief Push element onto the queue.
|
||||
*
|
||||
* Note: This method will block until free space is available
|
||||
* Note: This method will block until free space is available.
|
||||
*
|
||||
* @param elt element to push onto queue
|
||||
* @param elt Element to push onto queue
|
||||
*/
|
||||
void
|
||||
push(T const& elt)
|
||||
@@ -142,11 +146,11 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Push element onto the queue
|
||||
* @brief Push element onto the queue.
|
||||
*
|
||||
* Note: This method will block until free space is available
|
||||
*
|
||||
* @param elt element to push onto queue. elt is moved from
|
||||
* @param elt Element to push onto queue. Ownership is transferred
|
||||
*/
|
||||
void
|
||||
push(T&& elt)
|
||||
@@ -158,11 +162,11 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Pop element from the queue
|
||||
* @brief Pop element from the queue.
|
||||
*
|
||||
* Note: Will block until queue is non-empty
|
||||
* Note: Will block until queue is non-empty.
|
||||
*
|
||||
* @return element popped from queue
|
||||
* @return Element popped from queue
|
||||
*/
|
||||
T
|
||||
pop()
|
||||
@@ -178,14 +182,14 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Attempt to pop an element
|
||||
* @brief Attempt to pop an element.
|
||||
*
|
||||
* @return element popped from queue or empty optional if queue was empty
|
||||
* @return Element popped from queue or empty optional if queue was empty
|
||||
*/
|
||||
std::optional<T>
|
||||
tryPop()
|
||||
{
|
||||
std::scoped_lock lck(m_);
|
||||
std::scoped_lock const lck(m_);
|
||||
if (queue_.empty())
|
||||
return {};
|
||||
|
||||
@@ -200,22 +204,22 @@ public:
|
||||
/**
|
||||
* @brief Parititions the uint256 keyspace into numMarkers partitions, each of equal size.
|
||||
*
|
||||
* @param numMarkers total markers to partition for
|
||||
* @param numMarkers Total markers to partition for
|
||||
*/
|
||||
inline std::vector<ripple::uint256>
|
||||
getMarkers(size_t numMarkers)
|
||||
{
|
||||
assert(numMarkers <= 256);
|
||||
|
||||
unsigned char incr = 256 / numMarkers;
|
||||
unsigned char const incr = 256 / numMarkers;
|
||||
|
||||
std::vector<ripple::uint256> markers;
|
||||
markers.reserve(numMarkers);
|
||||
ripple::uint256 base{0};
|
||||
for (size_t i = 0; i < numMarkers; ++i)
|
||||
{
|
||||
for (size_t i = 0; i < numMarkers; ++i) {
|
||||
markers.push_back(base);
|
||||
base.data()[0] += incr;
|
||||
}
|
||||
return markers;
|
||||
}
|
||||
} // namespace etl
|
||||
@@ -18,22 +18,25 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ETLService.h>
|
||||
#include <util/Constants.h>
|
||||
|
||||
using namespace clio;
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace etl {
|
||||
// Database must be populated when this starts
|
||||
std::optional<uint32_t>
|
||||
ETLService::runETLPipeline(uint32_t startSequence, int numExtractors)
|
||||
ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
{
|
||||
if (finishSequence_ && startSequence > *finishSequence_)
|
||||
return {};
|
||||
|
||||
log_.debug() << "Starting etl pipeline";
|
||||
LOG(log_.debug()) << "Starting etl pipeline";
|
||||
state_.isWriting = true;
|
||||
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng || rng->maxSequence < startSequence - 1)
|
||||
{
|
||||
if (!rng || rng->maxSequence < startSequence - 1) {
|
||||
assert(false);
|
||||
throw std::runtime_error("runETLPipeline: parent ledger is null");
|
||||
}
|
||||
@@ -42,11 +45,14 @@ ETLService::runETLPipeline(uint32_t startSequence, int numExtractors)
|
||||
auto extractors = std::vector<std::unique_ptr<ExtractorType>>{};
|
||||
auto pipe = DataPipeType{numExtractors, startSequence};
|
||||
|
||||
for (auto i = 0u; i < numExtractors; ++i)
|
||||
for (auto i = 0u; i < numExtractors; ++i) {
|
||||
extractors.push_back(std::make_unique<ExtractorType>(
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_));
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_
|
||||
));
|
||||
}
|
||||
|
||||
auto transformer = TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, startSequence, state_};
|
||||
auto transformer =
|
||||
TransformerType{pipe, backend_, ledgerLoader_, ledgerPublisher_, amendmentBlockHandler_, startSequence, state_};
|
||||
transformer.waitTillFinished(); // suspend current thread until exit condition is met
|
||||
pipe.cleanup(); // TODO: this should probably happen automatically using destructor
|
||||
|
||||
@@ -56,12 +62,13 @@ ETLService::runETLPipeline(uint32_t startSequence, int numExtractors)
|
||||
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
|
||||
log_.debug() << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / 1000000000.0;
|
||||
static constexpr auto NANOSECONDS_PER_SECOND = 1'000'000'000.0;
|
||||
LOG(log_.debug()) << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / NANOSECONDS_PER_SECOND;
|
||||
|
||||
state_.isWriting = false;
|
||||
|
||||
log_.debug() << "Stopping etl pipeline";
|
||||
LOG(log_.debug()) << "Stopping etl pipeline";
|
||||
return lastPublishedSeq;
|
||||
}
|
||||
|
||||
@@ -77,73 +84,66 @@ void
|
||||
ETLService::monitor()
|
||||
{
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (!rng)
|
||||
{
|
||||
log_.info() << "Database is empty. Will download a ledger "
|
||||
"from the network.";
|
||||
std::optional<ripple::LedgerInfo> ledger;
|
||||
if (!rng) {
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
std::optional<ripple::LedgerHeader> ledger;
|
||||
|
||||
if (startSequence_)
|
||||
{
|
||||
log_.info() << "ledger sequence specified in config. "
|
||||
try {
|
||||
if (startSequence_) {
|
||||
LOG(log_.info()) << "ledger sequence specified in config. "
|
||||
<< "Will begin ETL process starting with ledger " << *startSequence_;
|
||||
ledger = ledgerLoader_.loadInitialLedger(*startSequence_);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.info() << "Waiting for next ledger to be validated by network...";
|
||||
} else {
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
std::optional<uint32_t> mostRecentValidated = networkValidatedLedgers_->getMostRecent();
|
||||
|
||||
if (mostRecentValidated)
|
||||
{
|
||||
log_.info() << "Ledger " << *mostRecentValidated << " has been validated. "
|
||||
<< "Downloading...";
|
||||
if (mostRecentValidated) {
|
||||
LOG(log_.info()) << "Ledger " << *mostRecentValidated << " has been validated. Downloading...";
|
||||
ledger = ledgerLoader_.loadInitialLedger(*mostRecentValidated);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.info() << "The wait for the next validated "
|
||||
<< "ledger has been aborted. "
|
||||
<< "Exiting monitor loop";
|
||||
} else {
|
||||
LOG(log_.info()) << "The wait for the next validated ledger has been aborted. "
|
||||
"Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
|
||||
return amendmentBlockHandler_.onAmendmentBlock();
|
||||
}
|
||||
|
||||
if (ledger)
|
||||
{
|
||||
if (ledger) {
|
||||
rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.error() << "Failed to load initial ledger. Exiting monitor loop";
|
||||
} else {
|
||||
LOG(log_.error()) << "Failed to load initial ledger. Exiting monitor loop";
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
if (startSequence_)
|
||||
log_.warn() << "start sequence specified but db is already populated";
|
||||
LOG(log_.warn()) << "start sequence specified but db is already populated";
|
||||
|
||||
log_.info() << "Database already populated. Picking up from the tip of history";
|
||||
LOG(log_.info()) << "Database already populated. Picking up from the tip of history";
|
||||
cacheLoader_.load(rng->maxSequence);
|
||||
}
|
||||
|
||||
assert(rng);
|
||||
uint32_t nextSequence = rng->maxSequence + 1;
|
||||
|
||||
log_.debug() << "Database is populated. "
|
||||
LOG(log_.debug()) << "Database is populated. "
|
||||
<< "Starting monitor loop. sequence = " << nextSequence;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence)
|
||||
{
|
||||
while (not isStopping()) {
|
||||
nextSequence = publishNextSequence(nextSequence);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t
|
||||
ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence) {
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
}
|
||||
else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, 1000))
|
||||
{
|
||||
log_.info() << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
} else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::MILLISECONDS_PER_SECOND)) {
|
||||
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
// Attempt to take over responsibility of ETL writer after 10 failed
|
||||
@@ -153,63 +153,61 @@ ETLService::monitor()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
bool const success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
|
||||
if (!success)
|
||||
{
|
||||
log_.warn() << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
if (!success) {
|
||||
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
|
||||
// returns the most recent sequence published empty optional if no sequence was published
|
||||
std::optional<uint32_t> lastPublished = runETLPipeline(nextSequence, extractorThreads_);
|
||||
log_.info() << "Aborting ETL. Falling back to publishing";
|
||||
LOG(log_.info()) << "Aborting ETL. Falling back to publishing";
|
||||
|
||||
// if no ledger was published, don't increment nextSequence
|
||||
if (lastPublished)
|
||||
nextSequence = *lastPublished + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
++nextSequence;
|
||||
}
|
||||
}
|
||||
}
|
||||
return nextSequence;
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::monitorReadOnly()
|
||||
{
|
||||
log_.debug() << "Starting reporting in strict read only mode";
|
||||
LOG(log_.debug()) << "Starting reporting in strict read only mode";
|
||||
|
||||
auto const latestSequenceOpt = [this]() -> std::optional<uint32_t> {
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
uint32_t latestSequence;
|
||||
|
||||
if (!rng)
|
||||
{
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent())
|
||||
latestSequence = *net;
|
||||
else
|
||||
if (!rng) {
|
||||
if (auto net = networkValidatedLedgers_->getMostRecent()) {
|
||||
return *net;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
return rng->maxSequence;
|
||||
}();
|
||||
|
||||
if (!latestSequenceOpt.has_value()) {
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
latestSequence = rng->maxSequence;
|
||||
}
|
||||
|
||||
uint32_t latestSequence = *latestSequenceOpt;
|
||||
|
||||
cacheLoader_.load(latestSequence);
|
||||
latestSequence++;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence)
|
||||
{
|
||||
while (not isStopping()) {
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= latestSequence) {
|
||||
ledgerPublisher_.publish(latestSequence, {});
|
||||
latestSequence = latestSequence + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs first.
|
||||
// Even if we don't hear from rippled, if ledgers are being written to the db, we publish them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, 1000);
|
||||
} else {
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs
|
||||
// first. Even if we don't hear from rippled, if ledgers are being written to the db, we publish
|
||||
// them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::MILLISECONDS_PER_SECOND);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -217,7 +215,7 @@ ETLService::monitorReadOnly()
|
||||
void
|
||||
ETLService::run()
|
||||
{
|
||||
log_.info() << "Starting reporting etl";
|
||||
LOG(log_.info()) << "Starting reporting etl";
|
||||
state_.isStopping = false;
|
||||
|
||||
doWork();
|
||||
@@ -227,29 +225,32 @@ void
|
||||
ETLService::doWork()
|
||||
{
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("rippled: ETLService worker");
|
||||
beast::setCurrentThreadName("ETLService worker");
|
||||
|
||||
if (state_.isReadOnly)
|
||||
if (state_.isReadOnly) {
|
||||
monitorReadOnly();
|
||||
else
|
||||
} else {
|
||||
monitor();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
ETLService::ETLService(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
)
|
||||
: backend_(backend)
|
||||
, loadBalancer_(balancer)
|
||||
, networkValidatedLedgers_(ledgers)
|
||||
, networkValidatedLedgers_(std::move(ledgers))
|
||||
, cacheLoader_(config, ioc, backend, backend->cache())
|
||||
, ledgerFetcher_(backend, balancer)
|
||||
, ledgerLoader_(backend, balancer, ledgerFetcher_, state_)
|
||||
, ledgerPublisher_(ioc, backend, subscriptions, state_)
|
||||
, ledgerPublisher_(ioc, backend, backend->cache(), subscriptions, state_)
|
||||
, amendmentBlockHandler_(ioc, state_)
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
@@ -257,3 +258,4 @@ ETLService::ETLService(
|
||||
extractorThreads_ = config.valueOr<uint32_t>("extractor_threads", extractorThreads_);
|
||||
txnThreshold_ = config.valueOr<size_t>("txn_threshold", txnThreshold_);
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,11 +19,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/LedgerCache.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/LedgerCache.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/Source.h>
|
||||
#include <etl/SystemState.h>
|
||||
#include <etl/impl/AmendmentBlock.h>
|
||||
#include <etl/impl/CacheLoader.h>
|
||||
#include <etl/impl/ExtractionDataPipe.h>
|
||||
#include <etl/impl/Extractor.h>
|
||||
@@ -31,10 +32,11 @@
|
||||
#include <etl/impl/LedgerLoader.h>
|
||||
#include <etl/impl/LedgerPublisher.h>
|
||||
#include <etl/impl/Transformer.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
#include <memory>
|
||||
@@ -42,7 +44,14 @@
|
||||
struct AccountTransactionsData;
|
||||
struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
} // namespace feed
|
||||
|
||||
/**
|
||||
* @brief This namespace contains everything to do with the ETL and ETL sources.
|
||||
*/
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for continuously extracting data from a p2p node, and writing that data to the
|
||||
@@ -57,21 +66,23 @@ class SubscriptionManager;
|
||||
* the others will fall back to monitoring/publishing. In this sense, this class dynamically transitions from monitoring
|
||||
* to writing and from writing to monitoring, based on the activity of other processes running on different machines.
|
||||
*/
|
||||
class ETLService
|
||||
{
|
||||
class ETLService {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using SubscriptionManagerType = SubscriptionManager;
|
||||
using SubscriptionManagerType = feed::SubscriptionManager;
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using NetworkValidatedLedgersType = NetworkValidatedLedgers;
|
||||
using DataPipeType = clio::detail::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheLoaderType = clio::detail::CacheLoader<Backend::LedgerCache>;
|
||||
using LedgerFetcherType = clio::detail::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = clio::detail::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = clio::detail::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = clio::detail::LedgerPublisher<SubscriptionManagerType>;
|
||||
using TransformerType = clio::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType>;
|
||||
using DataPipeType = etl::detail::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
using CacheType = data::LedgerCache;
|
||||
using CacheLoaderType = etl::detail::CacheLoader<CacheType>;
|
||||
using LedgerFetcherType = etl::detail::LedgerFetcher<LoadBalancerType>;
|
||||
using ExtractorType = etl::detail::Extractor<DataPipeType, NetworkValidatedLedgersType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::detail::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::detail::LedgerPublisher<SubscriptionManagerType, CacheType>;
|
||||
using AmendmentBlockHandlerType = etl::detail::AmendmentBlockHandler<>;
|
||||
using TransformerType =
|
||||
etl::detail::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<LoadBalancerType> loadBalancer_;
|
||||
@@ -84,6 +95,7 @@ class ETLService
|
||||
LedgerFetcherType ledgerFetcher_;
|
||||
LedgerLoaderType ledgerLoader_;
|
||||
LedgerPublisherType ledgerPublisher_;
|
||||
AmendmentBlockHandlerType amendmentBlockHandler_;
|
||||
|
||||
SystemState state_;
|
||||
|
||||
@@ -94,7 +106,7 @@ class ETLService
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of ETLService
|
||||
* @brief Create an instance of ETLService.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
@@ -104,21 +116,35 @@ public:
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
*/
|
||||
ETLService(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers);
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief A factory function to spawn new ETLService instances.
|
||||
*
|
||||
* Creates and runs the ETL service.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param balancer Load balancer to use
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
*/
|
||||
static std::shared_ptr<ETLService>
|
||||
make_ETLService(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManagerType> subscriptions,
|
||||
std::shared_ptr<LoadBalancerType> balancer,
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers)
|
||||
std::shared_ptr<NetworkValidatedLedgersType> ledgers
|
||||
)
|
||||
{
|
||||
auto etl = std::make_shared<ETLService>(config, ioc, backend, subscriptions, balancer, ledgers);
|
||||
etl->run();
|
||||
@@ -127,12 +153,12 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stops components and joins worker thread
|
||||
* @brief Stops components and joins worker thread.
|
||||
*/
|
||||
~ETLService()
|
||||
{
|
||||
log_.info() << "onStop called";
|
||||
log_.debug() << "Stopping Reporting ETL";
|
||||
LOG(log_.info()) << "onStop called";
|
||||
LOG(log_.debug()) << "Stopping Reporting ETL";
|
||||
|
||||
state_.isStopping = true;
|
||||
cacheLoader_.stop();
|
||||
@@ -140,11 +166,11 @@ public:
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
|
||||
log_.debug() << "Joined ETLService worker thread";
|
||||
LOG(log_.debug()) << "Joined ETLService worker thread";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time passed since last ledger close, in seconds
|
||||
* @brief Get time passed since last ledger close, in seconds.
|
||||
*/
|
||||
std::uint32_t
|
||||
lastCloseAgeSeconds() const
|
||||
@@ -152,6 +178,17 @@ public:
|
||||
return ledgerPublisher_.lastCloseAgeSeconds();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check for the amendment blocked state.
|
||||
*
|
||||
* @return true if currently amendment blocked; false otherwise
|
||||
*/
|
||||
bool
|
||||
isAmendmentBlocked() const
|
||||
{
|
||||
return state_.isAmendmentBlocked;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get state of ETL as a JSON object
|
||||
*/
|
||||
@@ -169,6 +206,16 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the etl nodes' state
|
||||
* @return the etl nodes' state, nullopt if etl nodes are not connected
|
||||
*/
|
||||
std::optional<etl::ETLState>
|
||||
getETLState() const noexcept
|
||||
{
|
||||
return loadBalancer_->getETLState();
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Run the ETL pipeline.
|
||||
@@ -177,10 +224,11 @@ private:
|
||||
* @note database must already be populated when this function is called
|
||||
*
|
||||
* @param startSequence the first ledger to extract
|
||||
* @param numExtractors number of extractors to use
|
||||
* @return the last ledger written to the database, if any
|
||||
*/
|
||||
std::optional<uint32_t>
|
||||
runETLPipeline(uint32_t startSequence, int offset);
|
||||
runETLPipeline(uint32_t startSequence, uint32_t numExtractors);
|
||||
|
||||
/**
|
||||
* @brief Monitor the network for newly validated ledgers.
|
||||
@@ -194,6 +242,15 @@ private:
|
||||
void
|
||||
monitor();
|
||||
|
||||
/**
|
||||
* @brief Monitor the network for newly validated ledgers and publish them to the ledgers stream
|
||||
*
|
||||
* @param nextSequence the ledger sequence to publish
|
||||
* @return the next ledger sequence to publish
|
||||
*/
|
||||
uint32_t
|
||||
publishNextSequence(uint32_t nextSequence);
|
||||
|
||||
/**
|
||||
* @brief Monitor the database for newly written ledgers.
|
||||
*
|
||||
@@ -207,33 +264,34 @@ private:
|
||||
* @return true if stopping; false otherwise
|
||||
*/
|
||||
bool
|
||||
isStopping()
|
||||
isStopping() const
|
||||
{
|
||||
return state_.isStopping;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the number of markers to use during the initial ledger download
|
||||
* @brief Get the number of markers to use during the initial ledger download.
|
||||
*
|
||||
* This is equivelent to the degree of parallelism during the initial ledger download.
|
||||
*
|
||||
* @return the number of markers
|
||||
*/
|
||||
std::uint32_t
|
||||
getNumMarkers()
|
||||
getNumMarkers() const
|
||||
{
|
||||
return numMarkers_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Start all components to run ETL service
|
||||
* @brief Start all components to run ETL service.
|
||||
*/
|
||||
void
|
||||
run();
|
||||
|
||||
/**
|
||||
* @brief Spawn the worker thread and start monitoring
|
||||
* @brief Spawn the worker thread and start monitoring.
|
||||
*/
|
||||
void
|
||||
doWork();
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
42
src/etl/ETLState.cpp
Normal file
42
src/etl/ETLState.cpp
Normal file
@@ -0,0 +1,42 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ETLState.h>
|
||||
#include <rpc/JS.h>
|
||||
|
||||
namespace etl {
|
||||
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv)
|
||||
{
|
||||
ETLState state;
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
if (!jsonObject.contains(JS(error))) {
|
||||
if (jsonObject.contains(JS(result)) && jsonObject.at(JS(result)).as_object().contains(JS(info))) {
|
||||
auto const rippledInfo = jsonObject.at(JS(result)).as_object().at(JS(info)).as_object();
|
||||
if (rippledInfo.contains(JS(network_id)))
|
||||
state.networkID.emplace(boost::json::value_to<int64_t>(rippledInfo.at(JS(network_id))));
|
||||
}
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
60
src/etl/ETLState.h
Normal file
60
src/etl/ETLState.h
Normal file
@@ -0,0 +1,60 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <data/BackendInterface.h>
|
||||
|
||||
#include <boost/json.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for fetching and storing the state of the ETL information, such as the network id
|
||||
*/
|
||||
struct ETLState {
|
||||
std::optional<uint32_t> networkID;
|
||||
|
||||
/**
|
||||
* @brief Fetch the ETL state from the rippled server
|
||||
* @param source The source to fetch the state from
|
||||
* @return The ETL state, nullopt if source not available
|
||||
*/
|
||||
template <typename Forward>
|
||||
static std::optional<ETLState>
|
||||
fetchETLStateFromSource(Forward const& source) noexcept
|
||||
{
|
||||
auto const serverInfoRippled = data::synchronous([&source](auto yield) {
|
||||
return source.forwardToRippled({{"command", "server_info"}}, std::nullopt, yield);
|
||||
});
|
||||
|
||||
if (serverInfoRippled)
|
||||
return boost::json::value_to<ETLState>(boost::json::value(*serverInfoRippled));
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
ETLState
|
||||
tag_invoke(boost::json::value_to_tag<ETLState>, boost::json::value const& jv);
|
||||
|
||||
} // namespace etl
|
||||
@@ -17,14 +17,15 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <data/DBHelpers.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
#include <util/Random.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/beast/net/IPEndpoint.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
@@ -36,20 +37,21 @@
|
||||
|
||||
#include <thread>
|
||||
|
||||
using namespace clio;
|
||||
using namespace util;
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::unique_ptr<Source>
|
||||
LoadBalancer::make_Source(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
LoadBalancer& balancer)
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer
|
||||
)
|
||||
{
|
||||
auto src =
|
||||
std::make_unique<ProbingSource>(config, ioContext, backend, subscriptions, networkValidatedLedgers, balancer);
|
||||
|
||||
auto src = std::make_unique<ProbingSource>(config, ioc, backend, subscriptions, validatedLedgers, balancer);
|
||||
src->run();
|
||||
|
||||
return src;
|
||||
@@ -57,34 +59,74 @@ LoadBalancer::make_Source(
|
||||
|
||||
std::shared_ptr<LoadBalancer>
|
||||
LoadBalancer::make_LoadBalancer(
|
||||
clio::Config const& config,
|
||||
Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers)
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
)
|
||||
{
|
||||
return std::make_shared<LoadBalancer>(config, ioc, backend, subscriptions, validatedLedgers);
|
||||
}
|
||||
|
||||
LoadBalancer::LoadBalancer(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl)
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
)
|
||||
{
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value)
|
||||
downloadRanges_ = std::clamp(*value, 1u, 256u);
|
||||
else if (backend->fetchLedgerRange())
|
||||
static constexpr std::uint32_t MAX_DOWNLOAD = 256;
|
||||
if (auto value = config.maybeValue<uint32_t>("num_markers"); value) {
|
||||
downloadRanges_ = std::clamp(*value, 1u, MAX_DOWNLOAD);
|
||||
} else if (backend->fetchLedgerRange()) {
|
||||
downloadRanges_ = 4;
|
||||
}
|
||||
|
||||
for (auto const& entry : config.array("etl_sources"))
|
||||
{
|
||||
std::unique_ptr<Source> source = make_Source(entry, ioContext, backend, subscriptions, nwvl, *this);
|
||||
auto const allowNoEtl = config.valueOr("allow_no_etl", false);
|
||||
|
||||
auto const checkOnETLFailure = [this, allowNoEtl](std::string const& log) {
|
||||
LOG(log_.error()) << log;
|
||||
|
||||
if (!allowNoEtl) {
|
||||
LOG(log_.error()) << "Set allow_no_etl as true in config to allow clio run without valid ETL sources.";
|
||||
throw std::logic_error("ETL configuration error.");
|
||||
}
|
||||
};
|
||||
|
||||
for (auto const& entry : config.array("etl_sources")) {
|
||||
std::unique_ptr<Source> source = make_Source(entry, ioc, backend, subscriptions, validatedLedgers, *this);
|
||||
|
||||
// checking etl node validity
|
||||
auto const stateOpt = ETLState::fetchETLStateFromSource(*source);
|
||||
|
||||
if (!stateOpt) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"Failed to fetch ETL state from source = {} Please check the configuration and network",
|
||||
source->toString()
|
||||
));
|
||||
} else if (etlState_ && etlState_->networkID && stateOpt->networkID && etlState_->networkID != stateOpt->networkID) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"ETL sources must be on the same network. Source network id = {} does not match others network id = {}",
|
||||
*(stateOpt->networkID),
|
||||
*(etlState_->networkID)
|
||||
));
|
||||
} else {
|
||||
etlState_ = stateOpt;
|
||||
}
|
||||
|
||||
sources_.push_back(std::move(source));
|
||||
log_.info() << "Added etl source - " << sources_.back()->toString();
|
||||
LOG(log_.info()) << "Added etl source - " << sources_.back()->toString();
|
||||
}
|
||||
|
||||
if (sources_.empty())
|
||||
checkOnETLFailure("No ETL sources configured. Please check the configuration");
|
||||
}
|
||||
|
||||
LoadBalancer::~LoadBalancer()
|
||||
{
|
||||
sources_.clear();
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
@@ -95,15 +137,17 @@ LoadBalancer::loadInitialLedger(uint32_t sequence, bool cacheOnly)
|
||||
[this, &response, &sequence, cacheOnly](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, cacheOnly);
|
||||
|
||||
if (!res)
|
||||
log_.error() << "Failed to download initial ledger."
|
||||
if (!res) {
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
else
|
||||
} else {
|
||||
response = std::move(data);
|
||||
}
|
||||
|
||||
return res;
|
||||
},
|
||||
sequence);
|
||||
sequence
|
||||
);
|
||||
return {std::move(response), success};
|
||||
}
|
||||
|
||||
@@ -111,43 +155,43 @@ LoadBalancer::OptionalGetLedgerResponseType
|
||||
LoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
GetLedgerResponseType response;
|
||||
bool success = execute(
|
||||
bool const success = execute(
|
||||
[&response, ledgerSequence, getObjects, getObjectNeighbors, log = log_](auto& source) {
|
||||
auto [status, data] = source->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
response = std::move(data);
|
||||
if (status.ok() && response.validated())
|
||||
{
|
||||
log.info() << "Successfully fetched ledger = " << ledgerSequence
|
||||
if (status.ok() && response.validated()) {
|
||||
LOG(log.info()) << "Successfully fetched ledger = " << ledgerSequence
|
||||
<< " from source = " << source->toString();
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
log.warn() << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
|
||||
LOG(log.warn()) << "Could not fetch ledger " << ledgerSequence << ", Reply: " << response.DebugString()
|
||||
<< ", error_code: " << status.error_code() << ", error_msg: " << status.error_message()
|
||||
<< ", source = " << source->toString();
|
||||
return false;
|
||||
}
|
||||
},
|
||||
ledgerSequence);
|
||||
if (success)
|
||||
ledgerSequence
|
||||
);
|
||||
if (success) {
|
||||
return response;
|
||||
else
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
LoadBalancer::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
auto numAttempts = 0;
|
||||
std::size_t sourceIdx = 0;
|
||||
if (!sources_.empty())
|
||||
sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
while (numAttempts < sources_.size())
|
||||
{
|
||||
auto numAttempts = 0u;
|
||||
|
||||
while (numAttempts < sources_.size()) {
|
||||
if (auto res = sources_[sourceIdx]->forwardToRippled(request, clientIp, yield))
|
||||
return res;
|
||||
|
||||
@@ -161,8 +205,7 @@ LoadBalancer::forwardToRippled(
|
||||
bool
|
||||
LoadBalancer::shouldPropagateTxnStream(Source* in) const
|
||||
{
|
||||
for (auto& src : sources_)
|
||||
{
|
||||
for (auto& src : sources_) {
|
||||
assert(src);
|
||||
|
||||
// We pick the first Source encountered that is connected
|
||||
@@ -188,48 +231,55 @@ template <class Func>
|
||||
bool
|
||||
LoadBalancer::execute(Func f, uint32_t ledgerSequence)
|
||||
{
|
||||
srand((unsigned)time(0));
|
||||
auto sourceIdx = rand() % sources_.size();
|
||||
std::size_t sourceIdx = 0;
|
||||
if (!sources_.empty())
|
||||
sourceIdx = util::Random::uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0;
|
||||
|
||||
while (true)
|
||||
{
|
||||
while (true) {
|
||||
auto& source = sources_[sourceIdx];
|
||||
|
||||
log_.debug() << "Attempting to execute func. ledger sequence = " << ledgerSequence
|
||||
LOG(log_.debug()) << "Attempting to execute func. ledger sequence = " << ledgerSequence
|
||||
<< " - source = " << source->toString();
|
||||
// Originally, it was (source->hasLedger(ledgerSequence) || true)
|
||||
/* Sometimes rippled has ledger but doesn't actually know. However,
|
||||
but this does NOT happen in the normal case and is safe to remove
|
||||
This || true is only needed when loading full history standalone */
|
||||
if (source->hasLedger(ledgerSequence))
|
||||
{
|
||||
bool res = f(source);
|
||||
if (res)
|
||||
{
|
||||
log_.debug() << "Successfully executed func at source = " << source->toString()
|
||||
if (source->hasLedger(ledgerSequence)) {
|
||||
bool const res = f(source);
|
||||
if (res) {
|
||||
LOG(log_.debug()) << "Successfully executed func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Failed to execute func at source = " << source->toString()
|
||||
|
||||
LOG(log_.warn()) << "Failed to execute func at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "Ledger not present at source = " << source->toString()
|
||||
} else {
|
||||
LOG(log_.warn()) << "Ledger not present at source = " << source->toString()
|
||||
<< " - ledger sequence = " << ledgerSequence;
|
||||
}
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
numAttempts++;
|
||||
if (numAttempts % sources_.size() == 0)
|
||||
{
|
||||
log_.info() << "Ledger sequence " << ledgerSequence << " is not yet available from any configured sources. "
|
||||
if (numAttempts % sources_.size() == 0) {
|
||||
LOG(log_.info()) << "Ledger sequence " << ledgerSequence
|
||||
<< " is not yet available from any configured sources. "
|
||||
<< "Sleeping and trying again";
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<ETLState>
|
||||
LoadBalancer::getETLState() noexcept
|
||||
{
|
||||
if (!etlState_) {
|
||||
// retry ETLState fetch
|
||||
etlState_ = ETLState::fetchETLStateFromSource(*this);
|
||||
}
|
||||
return etlState_;
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,95 +19,127 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <config/Config.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <etl/ETLHelpers.h>
|
||||
#include <log/Logger.h>
|
||||
#include <subscriptions/SubscriptionManager.h>
|
||||
#include <etl/ETLState.h>
|
||||
#include <feed/SubscriptionManager.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <boost/asio.hpp>
|
||||
#include "org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h"
|
||||
#include <grpcpp/grpcpp.h>
|
||||
|
||||
namespace etl {
|
||||
class Source;
|
||||
class ProbingSource;
|
||||
} // namespace etl
|
||||
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
} // namespace feed
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This class is used to manage connections to transaction processing processes
|
||||
* @brief This class is used to manage connections to transaction processing processes.
|
||||
*
|
||||
* This class spawns a listener for each etl source, which listens to messages on the ledgers stream (to keep track of
|
||||
* which ledgers have been validated by the network, and the range of ledgers each etl source has). This class also
|
||||
* allows requests for ledger data to be load balanced across all possible ETL sources.
|
||||
*/
|
||||
class LoadBalancer
|
||||
{
|
||||
class LoadBalancer {
|
||||
public:
|
||||
using RawLedgerObjectType = org::xrpl::rpc::v1::RawLedgerObject;
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
using OptionalGetLedgerResponseType = std::optional<GetLedgerResponseType>;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
static constexpr std::uint32_t DEFAULT_DOWNLOAD_RANGES = 16;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
std::vector<std::unique_ptr<Source>> sources_;
|
||||
std::uint32_t downloadRanges_ = 16;
|
||||
std::optional<ETLState> etlState_;
|
||||
std::uint32_t downloadRanges_ =
|
||||
DEFAULT_DOWNLOAD_RANGES; /*< The number of markers to use when downloading intial ledger */
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the load balancer
|
||||
* @brief Create an instance of the load balancer.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioContext io context to run on
|
||||
* @param ioc The io_context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param nwvl The network validated ledgers datastructure
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
*/
|
||||
LoadBalancer(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl);
|
||||
|
||||
static std::shared_ptr<LoadBalancer>
|
||||
make_LoadBalancer(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers);
|
||||
|
||||
static std::unique_ptr<Source>
|
||||
make_Source(
|
||||
clio::Config const& config,
|
||||
boost::asio::io_context& ioContext,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> networkValidatedLedgers,
|
||||
LoadBalancer& balancer);
|
||||
|
||||
~LoadBalancer()
|
||||
{
|
||||
sources_.clear();
|
||||
}
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue
|
||||
* @brief A factory function for the load balancer.
|
||||
*
|
||||
* @param sequence sequence of ledger to download
|
||||
* @param config The configuration to use
|
||||
* @param ioc The io_context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
*/
|
||||
static std::shared_ptr<LoadBalancer>
|
||||
make_LoadBalancer(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief A factory function for the ETL source.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc The io_context to run on
|
||||
* @param backend BackendInterface implementation
|
||||
* @param subscriptions Subscription manager
|
||||
* @param validatedLedgers The network validated ledgers datastructure
|
||||
* @param balancer The load balancer
|
||||
*/
|
||||
static std::unique_ptr<Source>
|
||||
make_Source(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> validatedLedgers,
|
||||
LoadBalancer& balancer
|
||||
);
|
||||
|
||||
~LoadBalancer();
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param cacheOnly Whether to only write to cache and not to the DB; defaults to false
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(uint32_t sequence, bool cacheOnly = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch data for a specific ledger
|
||||
* @brief Fetch data for a specific ledger.
|
||||
*
|
||||
* This function will continuously try to fetch data for the specified ledger until the fetch succeeds, the ledger
|
||||
* is found in the database, or the server is shutting down.
|
||||
*
|
||||
* @param ledgerSequence sequence of ledger to fetch data for
|
||||
* @param getObjects if true, fetch diff between specified ledger and previous
|
||||
* @return the extracted data, if extraction was successful. If the ledger was found in the database or the server
|
||||
* @param ledgerSequence Sequence of the ledger to fetch
|
||||
* @param getObjects Whether to get the account state diff between this ledger and the prior one
|
||||
* @param getObjectNeighbors Whether to request object neighbors
|
||||
* @return The extracted data, if extraction was successful. If the ledger was found in the database or the server
|
||||
* is shutting down, the optional will be empty
|
||||
*/
|
||||
OptionalGetLedgerResponseType
|
||||
@@ -127,30 +159,42 @@ public:
|
||||
shouldPropagateTxnStream(Source* in) const;
|
||||
|
||||
/**
|
||||
* @return JSON representation of the state of this load balancer
|
||||
* @return JSON representation of the state of this load balancer.
|
||||
*/
|
||||
boost::json::value
|
||||
toJson() const;
|
||||
|
||||
/**
|
||||
* @brief Forward a JSON RPC request to a randomly selected rippled node
|
||||
* @brief Forward a JSON RPC request to a randomly selected rippled node.
|
||||
*
|
||||
* @param request JSON-RPC request
|
||||
* @return response received from rippled node
|
||||
* @param request JSON-RPC request to forward
|
||||
* @param clientIp The IP address of the peer, if known
|
||||
* @param yield The coroutine context
|
||||
* @return Response received from rippled node as JSON object on success; nullopt on failure
|
||||
*/
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield)
|
||||
const;
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Return state of ETL nodes.
|
||||
* @return ETL state, nullopt if etl nodes not available
|
||||
*/
|
||||
std::optional<ETLState>
|
||||
getETLState() noexcept;
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Execute a function on a randomly selected source
|
||||
* @brief Execute a function on a randomly selected source.
|
||||
*
|
||||
* @note f is a function that takes an Source as an argument and returns a bool.
|
||||
* Attempt to execute f for one randomly chosen Source that has the specified ledger. If f returns false, another
|
||||
* randomly chosen Source is used. The process repeats until f returns true.
|
||||
*
|
||||
* @param f function to execute. This function takes the ETL source as an argument, and returns a bool.
|
||||
* @param f Function to execute. This function takes the ETL source as an argument, and returns a bool
|
||||
* @param ledgerSequence f is executed for each Source that has this ledger
|
||||
* @return true if f was eventually executed successfully. false if the ledger was found in the database or the
|
||||
* server is shutting down
|
||||
@@ -159,3 +203,4 @@ private:
|
||||
bool
|
||||
execute(Func f, uint32_t ledgerSequence);
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
@@ -22,9 +22,12 @@
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
#include <vector>
|
||||
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/Types.h>
|
||||
#include <data/BackendInterface.h>
|
||||
#include <data/DBHelpers.h>
|
||||
#include <data/Types.h>
|
||||
#include <fmt/core.h>
|
||||
|
||||
namespace etl {
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
@@ -42,27 +45,26 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// that were changed.
|
||||
std::optional<ripple::AccountID> owner;
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE)
|
||||
continue;
|
||||
|
||||
if (!owner)
|
||||
owner = ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data());
|
||||
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
{
|
||||
if (node.getFName() == ripple::sfCreatedNode) {
|
||||
ripple::STArray const& toAddNFTs =
|
||||
node.peekAtField(ripple::sfNewFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(finalIDs), [](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
}
|
||||
// Else it's modified, as there should never be a deleted NFToken page
|
||||
// as a result of a mint.
|
||||
else
|
||||
{
|
||||
else {
|
||||
// When a mint results in splitting an existing page,
|
||||
// it results in a created page and a modified node. Sometimes,
|
||||
// the created node needs to be linked to a third page, resulting
|
||||
@@ -79,9 +81,11 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
|
||||
ripple::STArray const& toAddNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
std::transform(
|
||||
toAddNFTs.begin(), toAddNFTs.end(), std::back_inserter(prevIDs), [](ripple::STObject const& nft) {
|
||||
return nft.getFieldH256(ripple::sfNFTokenID);
|
||||
});
|
||||
toAddNFTs.begin(),
|
||||
toAddNFTs.end(),
|
||||
std::back_inserter(prevIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
|
||||
ripple::STArray const& toAddFinalNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
@@ -89,27 +93,28 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
toAddFinalNFTs.begin(),
|
||||
toAddFinalNFTs.end(),
|
||||
std::back_inserter(finalIDs),
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); });
|
||||
[](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(finalIDs.begin(), finalIDs.end());
|
||||
std::sort(prevIDs.begin(), prevIDs.end());
|
||||
std::vector<ripple::uint256> tokenIDResult;
|
||||
std::set_difference(
|
||||
finalIDs.begin(),
|
||||
finalIDs.end(),
|
||||
prevIDs.begin(),
|
||||
prevIDs.end(),
|
||||
std::inserter(tokenIDResult, tokenIDResult.begin()));
|
||||
if (tokenIDResult.size() == 1 && owner)
|
||||
return {
|
||||
{NFTTransactionsData(tokenIDResult.front(), txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenIDResult.front(), *owner, sttx.getFieldVL(ripple::sfURI), txMeta)};
|
||||
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenMint data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
// Find the first NFT ID that doesn't match. We're looking for an
|
||||
// added NFT, so the one we want will be the mismatch in finalIDs.
|
||||
auto const diff = std::mismatch(finalIDs.begin(), finalIDs.end(), prevIDs.begin(), prevIDs.end());
|
||||
|
||||
// There should always be a difference so the returned finalIDs
|
||||
// iterator should never be end(). But better safe than sorry.
|
||||
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner) {
|
||||
throw std::runtime_error(fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID()))
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
{NFTTransactionsData(*diff.first, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(*diff.first, *owner, sttx.getFieldVL(ripple::sfURI), txMeta)};
|
||||
}
|
||||
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
@@ -121,8 +126,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// Determine who owned the token when it was burned by finding an
|
||||
// NFTokenPage that was deleted or modified that contains this
|
||||
// tokenID.
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfCreatedNode)
|
||||
continue;
|
||||
@@ -137,16 +141,15 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// need to look in the FinalFields.
|
||||
std::optional<ripple::STArray> prevNFTs;
|
||||
|
||||
if (node.isFieldPresent(ripple::sfPreviousFields))
|
||||
{
|
||||
if (node.isFieldPresent(ripple::sfPreviousFields)) {
|
||||
ripple::STObject const& previousFields =
|
||||
node.peekAtField(ripple::sfPreviousFields).downcast<ripple::STObject>();
|
||||
if (previousFields.isFieldPresent(ripple::sfNFTokens))
|
||||
prevNFTs = previousFields.getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode)
|
||||
} else if (!prevNFTs && node.getFName() == ripple::sfDeletedNode) {
|
||||
prevNFTs =
|
||||
node.peekAtField(ripple::sfFinalFields).downcast<ripple::STObject>().getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
|
||||
if (!prevNFTs)
|
||||
continue;
|
||||
@@ -155,14 +158,14 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
std::find_if(prevNFTs->begin(), prevNFTs->end(), [&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != prevNFTs->end())
|
||||
if (nft != prevNFTs->end()) {
|
||||
return std::make_pair(
|
||||
txs,
|
||||
NFTsData(
|
||||
tokenID,
|
||||
ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()),
|
||||
txMeta,
|
||||
true));
|
||||
tokenID, ripple::AccountID::fromVoid(node.getFieldH256(ripple::sfLedgerIndex).data()), txMeta, true
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
@@ -176,14 +179,12 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
// If we have the buy offer from this tx, we can determine the owner
|
||||
// more easily by just looking at the owner of the accepted NFTokenOffer
|
||||
// object.
|
||||
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer))
|
||||
{
|
||||
if (sttx.isFieldPresent(ripple::sfNFTokenBuyOffer)) {
|
||||
auto const affectedBuyOffer =
|
||||
std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenBuyOffer);
|
||||
});
|
||||
if (affectedBuyOffer == txMeta.getNodes().end())
|
||||
{
|
||||
if (affectedBuyOffer == txMeta.getNodes().end()) {
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
@@ -205,8 +206,7 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
std::find_if(txMeta.getNodes().begin(), txMeta.getNodes().end(), [&sttx](ripple::STObject const& node) {
|
||||
return node.getFieldH256(ripple::sfLedgerIndex) == sttx.getFieldH256(ripple::sfNFTokenSellOffer);
|
||||
});
|
||||
if (affectedSellOffer == txMeta.getNodes().end())
|
||||
{
|
||||
if (affectedSellOffer == txMeta.getNodes().end()) {
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
throw std::runtime_error(msg.str());
|
||||
@@ -220,8 +220,7 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
.downcast<ripple::STObject>()
|
||||
.getAccountID(ripple::sfOwner);
|
||||
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE ||
|
||||
node.getFName() == ripple::sfDeletedNode)
|
||||
continue;
|
||||
@@ -232,10 +231,11 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
continue;
|
||||
|
||||
ripple::STArray const& nfts = [&node] {
|
||||
if (node.getFName() == ripple::sfCreatedNode)
|
||||
if (node.getFName() == ripple::sfCreatedNode) {
|
||||
return node.peekAtField(ripple::sfNewFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
}
|
||||
return node.peekAtField(ripple::sfFinalFields)
|
||||
.downcast<ripple::STObject>()
|
||||
.getFieldArray(ripple::sfNFTokens);
|
||||
@@ -244,11 +244,12 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx
|
||||
auto const nft = std::find_if(nfts.begin(), nfts.end(), [&tokenID](ripple::STObject const& candidate) {
|
||||
return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID;
|
||||
});
|
||||
if (nft != nfts.end())
|
||||
if (nft != nfts.end()) {
|
||||
return {
|
||||
{NFTTransactionsData(tokenID, txMeta, sttx.getTransactionID())},
|
||||
NFTsData(tokenID, nodeOwner, txMeta, false)};
|
||||
}
|
||||
}
|
||||
|
||||
std::stringstream msg;
|
||||
msg << " - unexpected NFTokenAcceptOffer data in tx " << sttx.getTransactionID();
|
||||
@@ -263,8 +264,7 @@ std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTokenCancelOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
{
|
||||
std::vector<NFTTransactionsData> txs;
|
||||
for (ripple::STObject const& node : txMeta.getNodes())
|
||||
{
|
||||
for (ripple::STObject const& node : txMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_OFFER)
|
||||
continue;
|
||||
|
||||
@@ -300,8 +300,7 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
if (txMeta.getResultTER() != ripple::tesSUCCESS)
|
||||
return {{}, {}};
|
||||
|
||||
switch (sttx.getTxnType())
|
||||
{
|
||||
switch (sttx.getTxnType()) {
|
||||
case ripple::TxType::ttNFTOKEN_MINT:
|
||||
return getNFTokenMintData(txMeta, sttx);
|
||||
|
||||
@@ -338,3 +337,4 @@ getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string c
|
||||
|
||||
return nfts;
|
||||
}
|
||||
} // namespace etl
|
||||
@@ -17,21 +17,35 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <data/DBHelpers.h>
|
||||
|
||||
#include <ripple/protocol/STTx.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief Pull NFT data from TX via ETLService
|
||||
* @brief Pull NFT data from TX via ETLService.
|
||||
*
|
||||
* @param txMeta Transaction metadata
|
||||
* @param sttx The transaction
|
||||
* @return NFT transactions data as a pair of transactions and optional NFTsData
|
||||
*/
|
||||
std::pair<std::vector<NFTTransactionsData>, std::optional<NFTsData>>
|
||||
getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx);
|
||||
|
||||
/**
|
||||
* @brief Pull NFT data from ledger object via loadInitialLedger
|
||||
* @brief Pull NFT data from ledger object via loadInitialLedger.
|
||||
*
|
||||
* @param seq The ledger sequence to pull for
|
||||
* @param key The owner key
|
||||
* @param blob Object data as blob
|
||||
* @return The NFT data as a vector
|
||||
*/
|
||||
std::vector<NFTsData>
|
||||
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob);
|
||||
getNFTDataFromObj(std::uint32_t seq, std::string const& key, std::string const& blob);
|
||||
|
||||
} // namespace etl
|
||||
@@ -18,18 +18,18 @@
|
||||
//==============================================================================
|
||||
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <log/Logger.h>
|
||||
|
||||
using namespace clio;
|
||||
namespace etl {
|
||||
|
||||
ProbingSource::ProbingSource(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx)
|
||||
boost::asio::ssl::context sslCtx
|
||||
)
|
||||
: sslCtx_{std::move(sslCtx)}
|
||||
, sslSrc_{make_shared<
|
||||
SslSource>(config, ioc, std::ref(sslCtx_), backend, subscriptions, nwvl, balancer, make_SSLHooks())}
|
||||
@@ -75,8 +75,7 @@ ProbingSource::hasLedger(uint32_t sequence) const
|
||||
boost::json::object
|
||||
ProbingSource::toJson() const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
{
|
||||
if (!currentSrc_) {
|
||||
boost::json::object sourcesJson = {
|
||||
{"ws", plainSrc_->toJson()},
|
||||
{"wss", sslSrc_->toJson()},
|
||||
@@ -106,37 +105,44 @@ ProbingSource::token() const
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
ProbingSource::loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly)
|
||||
ProbingSource::loadInitialLedger(std::uint32_t sequence, std::uint32_t numMarkers, bool cacheOnly)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {{}, false};
|
||||
return currentSrc_->loadInitialLedger(ledgerSequence, numMarkers, cacheOnly);
|
||||
return currentSrc_->loadInitialLedger(sequence, numMarkers, cacheOnly);
|
||||
}
|
||||
|
||||
std::pair<grpc::Status, ProbingSource::GetLedgerResponseType>
|
||||
ProbingSource::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
ProbingSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
return currentSrc_->fetchLedger(ledgerSequence, getObjects, getObjectNeighbors);
|
||||
return currentSrc_->fetchLedger(sequence, getObjects, getObjectNeighbors);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
if (!currentSrc_) // Source may connect to rippled before the connection built to check the validity
|
||||
{
|
||||
if (auto res = plainSrc_->forwardToRippled(request, clientIp, yield))
|
||||
return res;
|
||||
|
||||
return sslSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
return currentSrc_->forwardToRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
std::optional<boost::json::object>
|
||||
ProbingSource::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
if (!currentSrc_)
|
||||
return {};
|
||||
@@ -148,23 +154,21 @@ ProbingSource::make_SSLHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return SourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
if (!ec) {
|
||||
plainSrc_->pause();
|
||||
currentSrc_ = sslSrc_;
|
||||
log_.info() << "Selected WSS as the main source: " << currentSrc_->toString();
|
||||
LOG(log_.info()) << "Selected WSS as the main source: " << currentSrc_->toString();
|
||||
}
|
||||
return SourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
[this](auto /* ec */) {
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_) {
|
||||
currentSrc_ = nullptr;
|
||||
plainSrc_->resume();
|
||||
}
|
||||
@@ -177,26 +181,25 @@ ProbingSource::make_PlainHooks() noexcept
|
||||
{
|
||||
return {// onConnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_)
|
||||
return SourceHooks::Action::STOP;
|
||||
|
||||
if (!ec)
|
||||
{
|
||||
if (!ec) {
|
||||
sslSrc_->pause();
|
||||
currentSrc_ = plainSrc_;
|
||||
log_.info() << "Selected Plain WS as the main source: " << currentSrc_->toString();
|
||||
LOG(log_.info()) << "Selected Plain WS as the main source: " << currentSrc_->toString();
|
||||
}
|
||||
return SourceHooks::Action::PROCEED;
|
||||
},
|
||||
// onDisconnected
|
||||
[this](auto ec) {
|
||||
std::lock_guard lck(mtx_);
|
||||
if (currentSrc_)
|
||||
{
|
||||
[this](auto /* ec */) {
|
||||
std::lock_guard const lck(mtx_);
|
||||
if (currentSrc_) {
|
||||
currentSrc_ = nullptr;
|
||||
sslSrc_->resume();
|
||||
}
|
||||
return SourceHooks::Action::STOP;
|
||||
}};
|
||||
}
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
@@ -19,9 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <config/Config.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <util/config/Config.h>
|
||||
#include <util/log/Logger.h>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
@@ -31,20 +31,21 @@
|
||||
|
||||
#include <mutex>
|
||||
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief This Source implementation attempts to connect over both secure websocket and plain websocket.
|
||||
*
|
||||
* First to connect pauses the other and the probing is considered done at this point.
|
||||
* If however the connected source loses connection the probing is kickstarted again.
|
||||
*/
|
||||
class ProbingSource : public Source
|
||||
{
|
||||
class ProbingSource : public Source {
|
||||
public:
|
||||
// TODO: inject when unit tests will be written for ProbingSource
|
||||
using GetLedgerResponseType = org::xrpl::rpc::v1::GetLedgerResponse;
|
||||
|
||||
private:
|
||||
clio::Logger log_{"ETL"};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
std::mutex mtx_;
|
||||
boost::asio::ssl::context sslCtx_;
|
||||
@@ -54,7 +55,7 @@ private:
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create an instance of the probing source
|
||||
* @brief Create an instance of the probing source.
|
||||
*
|
||||
* @param config The configuration to use
|
||||
* @param ioc io context to run on
|
||||
@@ -65,15 +66,16 @@ public:
|
||||
* @param sslCtx The SSL context to use; defaults to tlsv12
|
||||
*/
|
||||
ProbingSource(
|
||||
clio::Config const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<feed::SubscriptionManager> subscriptions,
|
||||
std::shared_ptr<NetworkValidatedLedgers> nwvl,
|
||||
LoadBalancer& balancer,
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12});
|
||||
boost::asio::ssl::context sslCtx = boost::asio::ssl::context{boost::asio::ssl::context::tlsv12}
|
||||
);
|
||||
|
||||
~ProbingSource() = default;
|
||||
~ProbingSource() override = default;
|
||||
|
||||
void
|
||||
run() override;
|
||||
@@ -97,14 +99,17 @@ public:
|
||||
toString() const override;
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
loadInitialLedger(std::uint32_t ledgerSequence, std::uint32_t numMarkers, bool cacheOnly = false) override;
|
||||
loadInitialLedger(std::uint32_t sequence, std::uint32_t numMarkers, bool cacheOnly = false) override;
|
||||
|
||||
std::pair<grpc::Status, GetLedgerResponseType>
|
||||
fetchLedger(uint32_t ledgerSequence, bool getObjects = true, bool getObjectNeighbors = false) override;
|
||||
fetchLedger(uint32_t sequence, bool getObjects = true, bool getObjectNeighbors = false) override;
|
||||
|
||||
std::optional<boost::json::object>
|
||||
forwardToRippled(boost::json::object const& request, std::string const& clientIp, boost::asio::yield_context& yield)
|
||||
const override;
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override;
|
||||
|
||||
boost::uuids::uuid
|
||||
token() const override;
|
||||
@@ -113,8 +118,9 @@ private:
|
||||
std::optional<boost::json::object>
|
||||
requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
std::optional<std::string> const& clientIp,
|
||||
boost::asio::yield_context yield
|
||||
) const override;
|
||||
|
||||
SourceHooks
|
||||
make_SSLHooks() noexcept;
|
||||
@@ -122,3 +128,4 @@ private:
|
||||
SourceHooks
|
||||
make_PlainHooks() noexcept;
|
||||
};
|
||||
} // namespace etl
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# ETL subsystem
|
||||
|
||||
A single clio node has one or more ETL sources, specified in the config
|
||||
file. clio will subscribe to the `ledgers` stream of each of the ETL
|
||||
sources. This stream sends a message whenever a new ledger is validated. Upon
|
||||
|
||||
@@ -17,13 +17,11 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <data/DBHelpers.h>
|
||||
#include <etl/ETLService.h>
|
||||
#include <etl/LoadBalancer.h>
|
||||
#include <etl/NFTHelpers.h>
|
||||
#include <etl/ProbingSource.h>
|
||||
#include <etl/Source.h>
|
||||
#include <log/Logger.h>
|
||||
#include <rpc/RPCHelpers.h>
|
||||
#include <util/Profiler.h>
|
||||
|
||||
@@ -36,112 +34,39 @@
|
||||
|
||||
#include <thread>
|
||||
|
||||
using namespace clio;
|
||||
namespace etl {
|
||||
|
||||
static boost::beast::websocket::stream_base::timeout
|
||||
make_TimeoutOption()
|
||||
{
|
||||
// See #289 for details.
|
||||
// TODO: investigate the issue and find if there is a solution other than
|
||||
// introducing artificial timeouts.
|
||||
if (true)
|
||||
{
|
||||
// The only difference between this and the suggested client role is
|
||||
// that idle_timeout is set to 20 instead of none()
|
||||
auto opt = boost::beast::websocket::stream_base::timeout{};
|
||||
opt.handshake_timeout = std::chrono::seconds(30);
|
||||
opt.idle_timeout = std::chrono::seconds(20);
|
||||
opt.keep_alive_pings = false;
|
||||
return opt;
|
||||
}
|
||||
else
|
||||
{
|
||||
return boost::beast::websocket::stream_base::timeout::suggested(boost::beast::role_type::client);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::reconnect(boost::beast::error_code ec)
|
||||
{
|
||||
if (paused_)
|
||||
return;
|
||||
|
||||
if (connected_)
|
||||
hooks_.onDisconnected(ec);
|
||||
|
||||
connected_ = false;
|
||||
// These are somewhat normal errors. operation_aborted occurs on shutdown,
|
||||
// when the timer is cancelled. connection_refused will occur repeatedly
|
||||
std::string err = ec.message();
|
||||
// if we cannot connect to the transaction processing process
|
||||
if (ec.category() == boost::asio::error::get_ssl_category())
|
||||
{
|
||||
err = std::string(" (") + boost::lexical_cast<std::string>(ERR_GET_LIB(ec.value())) + "," +
|
||||
boost::lexical_cast<std::string>(ERR_GET_REASON(ec.value())) + ") ";
|
||||
// ERR_PACK /* crypto/err/err.h */
|
||||
char buf[128];
|
||||
::ERR_error_string_n(ec.value(), buf, sizeof(buf));
|
||||
err += buf;
|
||||
|
||||
std::cout << err << std::endl;
|
||||
}
|
||||
|
||||
if (ec != boost::asio::error::operation_aborted && ec != boost::asio::error::connection_refused)
|
||||
{
|
||||
log_.error() << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.warn() << "error code = " << ec << " - " << toString();
|
||||
}
|
||||
|
||||
// exponentially increasing timeouts, with a max of 30 seconds
|
||||
size_t waitTime = std::min(pow(2, numFailures_), 30.0);
|
||||
numFailures_++;
|
||||
timer_.expires_after(boost::asio::chrono::seconds(waitTime));
|
||||
timer_.async_wait([this](auto ec) {
|
||||
bool startAgain = (ec != boost::asio::error::operation_aborted);
|
||||
log_.trace() << "async_wait : ec = " << ec;
|
||||
derived().close(startAgain);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PlainSource::close(bool startAgain)
|
||||
{
|
||||
timer_.cancel();
|
||||
ioc_.post([this, startAgain]() {
|
||||
boost::asio::post(strand_, [this, startAgain]() {
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
if (derived().ws().is_open()) {
|
||||
// onStop() also calls close(). If the async_close is called twice,
|
||||
// an assertion fails. Using closing_ makes sure async_close is only
|
||||
// called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "async_close: error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_));
|
||||
|
||||
if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::tcp_stream>>(
|
||||
boost::asio::make_strand(ioc_));
|
||||
|
||||
} else if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
@@ -151,96 +76,59 @@ void
|
||||
SslSource::close(bool startAgain)
|
||||
{
|
||||
timer_.cancel();
|
||||
ioc_.post([this, startAgain]() {
|
||||
boost::asio::post(strand_, [this, startAgain]() {
|
||||
if (closing_)
|
||||
return;
|
||||
|
||||
if (derived().ws().is_open())
|
||||
{
|
||||
// onStop() also calls close(). If the async_close is called twice,
|
||||
// an assertion fails. Using closing_ makes sure async_close is only
|
||||
// called once
|
||||
if (derived().ws().is_open()) {
|
||||
// onStop() also calls close(). If the async_close is called twice, an assertion fails. Using closing_
|
||||
// makes sure async_close is only called once
|
||||
closing_ = true;
|
||||
derived().ws().async_close(boost::beast::websocket::close_code::normal, [this, startAgain](auto ec) {
|
||||
if (ec)
|
||||
{
|
||||
log_.error() << " async_close : "
|
||||
<< "error code = " << ec << " - " << toString();
|
||||
if (ec) {
|
||||
LOG(log_.error()) << "async_close: error code = " << ec << " - " << toString();
|
||||
}
|
||||
closing_ = false;
|
||||
if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<
|
||||
boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
boost::asio::make_strand(ioc_), *sslCtx_);
|
||||
|
||||
if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_, *sslCtx_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (startAgain)
|
||||
{
|
||||
ws_ = std::make_unique<boost::beast::websocket::stream<boost::beast::ssl_stream<boost::beast::tcp_stream>>>(
|
||||
boost::asio::make_strand(ioc_), *sslCtx_);
|
||||
|
||||
} else if (startAgain) {
|
||||
ws_ = std::make_unique<StreamType>(strand_, *sslCtx_);
|
||||
run();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onResolve(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type results)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// try again
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_after(std::chrono::seconds(30));
|
||||
boost::beast::get_lowest_layer(derived().ws()).async_connect(results, [this](auto ec, auto ep) {
|
||||
derived().onConnect(ec, ep);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PlainSource::onConnect(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint
|
||||
)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
connected_ = true;
|
||||
numFailures_ = 0;
|
||||
// Turn off timeout on the tcp stream, because websocket stream has it's
|
||||
// own timeout system
|
||||
|
||||
// Websocket stream has it's own timeout system
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||
|
||||
// Set a desired timeout for the websocket stream
|
||||
derived().ws().set_option(make_TimeoutOption());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
derived().ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
}
|
||||
@@ -248,572 +136,45 @@ PlainSource::onConnect(
|
||||
void
|
||||
SslSource::onConnect(boost::beast::error_code ec, boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
connected_ = true;
|
||||
numFailures_ = 0;
|
||||
// Turn off timeout on the tcp stream, because websocket stream has it's
|
||||
// own timeout system
|
||||
|
||||
// Websocket stream has it's own timeout system
|
||||
boost::beast::get_lowest_layer(derived().ws()).expires_never();
|
||||
|
||||
// Set a desired timeout for the websocket stream
|
||||
derived().ws().set_option(make_TimeoutOption());
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(boost::beast::http::field::user_agent, "clio-client");
|
||||
|
||||
req.set("X-User", "clio-client");
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
// Update the host_ string. This will provide the value of the
|
||||
// Host HTTP header during the WebSocket handshake.
|
||||
// See https://tools.ietf.org/html/rfc7230#section-5.4
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
ws().next_layer().async_handshake(
|
||||
boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) { onSslHandshake(ec, endpoint); });
|
||||
ws().next_layer().async_handshake(boost::asio::ssl::stream_base::client, [this, endpoint](auto ec) {
|
||||
onSslHandshake(ec, endpoint);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
SslSource::onSslHandshake(
|
||||
boost::beast::error_code ec,
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint)
|
||||
boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint
|
||||
)
|
||||
{
|
||||
if (ec)
|
||||
{
|
||||
if (ec) {
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Perform the websocket handshake
|
||||
} else {
|
||||
auto host = ip_ + ':' + std::to_string(endpoint.port());
|
||||
// Perform the websocket handshake
|
||||
ws().async_handshake(host, "/", [this](auto ec) { onHandshake(ec); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onHandshake(boost::beast::error_code ec)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (auto action = hooks_.onConnected(ec); action == SourceHooks::Action::STOP)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::json::object jv{
|
||||
{"command", "subscribe"}, {"streams", {"ledger", "manifests", "validations", "transactions_proposed"}}};
|
||||
std::string s = boost::json::serialize(jv);
|
||||
log_.trace() << "Sending subscribe stream message";
|
||||
|
||||
derived().ws().set_option(
|
||||
boost::beast::websocket::stream_base::decorator([](boost::beast::websocket::request_type& req) {
|
||||
req.set(
|
||||
boost::beast::http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " clio-client");
|
||||
|
||||
req.set("X-User", "coro-client");
|
||||
}));
|
||||
|
||||
// Send the message
|
||||
derived().ws().async_write(boost::asio::buffer(s), [this](auto ec, size_t size) { onWrite(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onWrite(boost::beast::error_code ec, size_t bytesWritten)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
if (ec)
|
||||
{
|
||||
// start over
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
void
|
||||
SourceImpl<Derived>::onRead(boost::beast::error_code ec, size_t size)
|
||||
{
|
||||
log_.trace() << "ec = " << ec << " - " << toString();
|
||||
// if error or error reading message, start over
|
||||
if (ec)
|
||||
{
|
||||
reconnect(ec);
|
||||
}
|
||||
else
|
||||
{
|
||||
handleMessage();
|
||||
boost::beast::flat_buffer buffer;
|
||||
swap(readBuffer_, buffer);
|
||||
|
||||
log_.trace() << "calling async_read - " << toString();
|
||||
derived().ws().async_read(readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); });
|
||||
}
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
bool
|
||||
SourceImpl<Derived>::handleMessage()
|
||||
{
|
||||
log_.trace() << toString();
|
||||
|
||||
setLastMsgTime();
|
||||
connected_ = true;
|
||||
try
|
||||
{
|
||||
std::string msg{static_cast<char const*>(readBuffer_.data().data()), readBuffer_.size()};
|
||||
log_.trace() << msg;
|
||||
boost::json::value raw = boost::json::parse(msg);
|
||||
log_.trace() << "parsed";
|
||||
boost::json::object response = raw.as_object();
|
||||
|
||||
uint32_t ledgerIndex = 0;
|
||||
if (response.contains("result"))
|
||||
{
|
||||
boost::json::object result = response["result"].as_object();
|
||||
if (result.contains("ledger_index"))
|
||||
{
|
||||
ledgerIndex = result["ledger_index"].as_int64();
|
||||
}
|
||||
if (result.contains("validated_ledgers"))
|
||||
{
|
||||
boost::json::string const& validatedLedgers = result["validated_ledgers"].as_string();
|
||||
|
||||
setValidatedRange({validatedLedgers.c_str(), validatedLedgers.size()});
|
||||
}
|
||||
log_.info() << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "ledgerClosed")
|
||||
{
|
||||
log_.info() << "Received a message on ledger "
|
||||
<< " subscription stream. Message : " << response << " - " << toString();
|
||||
if (response.contains("ledger_index"))
|
||||
{
|
||||
ledgerIndex = response["ledger_index"].as_int64();
|
||||
}
|
||||
if (response.contains("validated_ledgers"))
|
||||
{
|
||||
boost::json::string const& validatedLedgers = response["validated_ledgers"].as_string();
|
||||
setValidatedRange({validatedLedgers.c_str(), validatedLedgers.size()});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (balancer_.shouldPropagateTxnStream(this))
|
||||
{
|
||||
if (response.contains("transaction"))
|
||||
{
|
||||
forwardCache_.freshen();
|
||||
subscriptions_->forwardProposedTransaction(response);
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "validationReceived")
|
||||
{
|
||||
subscriptions_->forwardValidation(response);
|
||||
}
|
||||
else if (response.contains("type") && response["type"] == "manifestReceived")
|
||||
{
|
||||
subscriptions_->forwardManifest(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ledgerIndex != 0)
|
||||
{
|
||||
log_.trace() << "Pushing ledger sequence = " << ledgerIndex << " - " << toString();
|
||||
networkValidatedLedgers_->push(ledgerIndex);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.error() << "Exception in handleMessage : " << e.what();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move to detail
|
||||
class AsyncCallData
|
||||
{
|
||||
clio::Logger log_{"ETL"};
|
||||
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> cur_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::GetLedgerDataResponse> next_;
|
||||
|
||||
org::xrpl::rpc::v1::GetLedgerDataRequest request_;
|
||||
std::unique_ptr<grpc::ClientContext> context_;
|
||||
|
||||
grpc::Status status_;
|
||||
unsigned char nextPrefix_;
|
||||
|
||||
std::string lastKey_;
|
||||
|
||||
public:
|
||||
AsyncCallData(uint32_t seq, ripple::uint256 const& marker, std::optional<ripple::uint256> const& nextMarker)
|
||||
{
|
||||
request_.mutable_ledger()->set_sequence(seq);
|
||||
if (marker.isNonZero())
|
||||
{
|
||||
request_.set_marker(marker.data(), marker.size());
|
||||
}
|
||||
request_.set_user("ETL");
|
||||
nextPrefix_ = 0x00;
|
||||
if (nextMarker)
|
||||
nextPrefix_ = nextMarker->data()[0];
|
||||
|
||||
unsigned char prefix = marker.data()[0];
|
||||
|
||||
log_.debug() << "Setting up AsyncCallData. marker = " << ripple::strHex(marker)
|
||||
<< " . prefix = " << ripple::strHex(std::string(1, prefix))
|
||||
<< " . nextPrefix_ = " << ripple::strHex(std::string(1, nextPrefix_));
|
||||
|
||||
assert(nextPrefix_ > prefix || nextPrefix_ == 0x00);
|
||||
|
||||
cur_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
next_ = std::make_unique<org::xrpl::rpc::v1::GetLedgerDataResponse>();
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
}
|
||||
|
||||
enum class CallStatus { MORE, DONE, ERRORED };
|
||||
|
||||
CallStatus
|
||||
process(
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub,
|
||||
grpc::CompletionQueue& cq,
|
||||
BackendInterface& backend,
|
||||
bool abort,
|
||||
bool cacheOnly = false)
|
||||
{
|
||||
log_.trace() << "Processing response. "
|
||||
<< "Marker prefix = " << getMarkerPrefix();
|
||||
if (abort)
|
||||
{
|
||||
log_.error() << "AsyncCallData aborted";
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!status_.ok())
|
||||
{
|
||||
log_.error() << "AsyncCallData status_ not ok: "
|
||||
<< " code = " << status_.error_code() << " message = " << status_.error_message();
|
||||
return CallStatus::ERRORED;
|
||||
}
|
||||
if (!next_->is_unlimited())
|
||||
{
|
||||
log_.warn() << "AsyncCallData is_unlimited is false. Make sure "
|
||||
"secure_gateway is set correctly at the ETL source";
|
||||
}
|
||||
|
||||
std::swap(cur_, next_);
|
||||
|
||||
bool more = true;
|
||||
|
||||
// if no marker returned, we are done
|
||||
if (cur_->marker().size() == 0)
|
||||
more = false;
|
||||
|
||||
// if returned marker is greater than our end, we are done
|
||||
unsigned char prefix = cur_->marker()[0];
|
||||
if (nextPrefix_ != 0x00 && prefix >= nextPrefix_)
|
||||
more = false;
|
||||
|
||||
// if we are not done, make the next async call
|
||||
if (more)
|
||||
{
|
||||
request_.set_marker(std::move(cur_->marker()));
|
||||
call(stub, cq);
|
||||
}
|
||||
|
||||
auto const numObjects = cur_->ledger_objects().objects_size();
|
||||
log_.debug() << "Writing " << numObjects << " objects";
|
||||
|
||||
std::vector<Backend::LedgerObject> cacheUpdates;
|
||||
cacheUpdates.reserve(numObjects);
|
||||
|
||||
for (int i = 0; i < numObjects; ++i)
|
||||
{
|
||||
auto& obj = *(cur_->mutable_ledger_objects()->mutable_objects(i));
|
||||
if (!more && nextPrefix_ != 0x00)
|
||||
{
|
||||
if (((unsigned char)obj.key()[0]) >= nextPrefix_)
|
||||
continue;
|
||||
}
|
||||
cacheUpdates.push_back(
|
||||
{*ripple::uint256::fromVoidChecked(obj.key()),
|
||||
{obj.mutable_data()->begin(), obj.mutable_data()->end()}});
|
||||
if (!cacheOnly)
|
||||
{
|
||||
if (lastKey_.size())
|
||||
backend.writeSuccessor(std::move(lastKey_), request_.ledger().sequence(), std::string{obj.key()});
|
||||
lastKey_ = obj.key();
|
||||
backend.writeNFTs(getNFTDataFromObj(request_.ledger().sequence(), obj.key(), obj.data()));
|
||||
backend.writeLedgerObject(
|
||||
std::move(*obj.mutable_key()), request_.ledger().sequence(), std::move(*obj.mutable_data()));
|
||||
}
|
||||
}
|
||||
backend.cache().update(cacheUpdates, request_.ledger().sequence(), cacheOnly);
|
||||
log_.debug() << "Wrote " << numObjects << " objects. Got more: " << (more ? "YES" : "NO");
|
||||
|
||||
return more ? CallStatus::MORE : CallStatus::DONE;
|
||||
}
|
||||
|
||||
void
|
||||
call(std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub>& stub, grpc::CompletionQueue& cq)
|
||||
{
|
||||
context_ = std::make_unique<grpc::ClientContext>();
|
||||
|
||||
std::unique_ptr<grpc::ClientAsyncResponseReader<org::xrpl::rpc::v1::GetLedgerDataResponse>> rpc(
|
||||
stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq));
|
||||
|
||||
rpc->StartCall();
|
||||
|
||||
rpc->Finish(next_.get(), &status_, this);
|
||||
}
|
||||
|
||||
std::string
|
||||
getMarkerPrefix()
|
||||
{
|
||||
if (next_->marker().size() == 0)
|
||||
return "";
|
||||
else
|
||||
return ripple::strHex(std::string{next_->marker().data()[0]});
|
||||
}
|
||||
|
||||
std::string
|
||||
getLastKey()
|
||||
{
|
||||
return lastKey_;
|
||||
}
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
SourceImpl<Derived>::loadInitialLedger(uint32_t sequence, uint32_t numMarkers, bool cacheOnly)
|
||||
{
|
||||
if (!stub_)
|
||||
return {{}, false};
|
||||
|
||||
grpc::CompletionQueue cq;
|
||||
void* tag;
|
||||
bool ok = false;
|
||||
std::vector<AsyncCallData> calls;
|
||||
auto markers = getMarkers(numMarkers);
|
||||
|
||||
for (size_t i = 0; i < markers.size(); ++i)
|
||||
{
|
||||
std::optional<ripple::uint256> nextMarker;
|
||||
|
||||
if (i + 1 < markers.size())
|
||||
nextMarker = markers[i + 1];
|
||||
|
||||
calls.emplace_back(sequence, markers[i], nextMarker);
|
||||
}
|
||||
|
||||
log_.debug() << "Starting data download for ledger " << sequence << ". Using source = " << toString();
|
||||
|
||||
for (auto& c : calls)
|
||||
c.call(stub_, cq);
|
||||
|
||||
size_t numFinished = 0;
|
||||
bool abort = false;
|
||||
size_t incr = 500000;
|
||||
size_t progress = incr;
|
||||
std::vector<std::string> edgeKeys;
|
||||
|
||||
while (numFinished < calls.size() && cq.Next(&tag, &ok))
|
||||
{
|
||||
assert(tag);
|
||||
auto ptr = static_cast<AsyncCallData*>(tag);
|
||||
|
||||
if (!ok)
|
||||
{
|
||||
log_.error() << "loadInitialLedger - ok is false";
|
||||
return {{}, false}; // handle cancelled
|
||||
}
|
||||
else
|
||||
{
|
||||
log_.trace() << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
|
||||
auto result = ptr->process(stub_, cq, *backend_, abort, cacheOnly);
|
||||
if (result != AsyncCallData::CallStatus::MORE)
|
||||
{
|
||||
numFinished++;
|
||||
log_.debug() << "Finished a marker. "
|
||||
<< "Current number of finished = " << numFinished;
|
||||
|
||||
std::string lastKey = ptr->getLastKey();
|
||||
|
||||
if (lastKey.size())
|
||||
edgeKeys.push_back(ptr->getLastKey());
|
||||
}
|
||||
|
||||
if (result == AsyncCallData::CallStatus::ERRORED)
|
||||
abort = true;
|
||||
|
||||
if (backend_->cache().size() > progress)
|
||||
{
|
||||
log_.info() << "Downloaded " << backend_->cache().size() << " records from rippled";
|
||||
progress += incr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log_.info() << "Finished loadInitialLedger. cache size = " << backend_->cache().size();
|
||||
return {std::move(edgeKeys), !abort};
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>
|
||||
SourceImpl<Derived>::fetchLedger(uint32_t ledgerSequence, bool getObjects, bool getObjectNeighbors)
|
||||
{
|
||||
org::xrpl::rpc::v1::GetLedgerResponse response;
|
||||
if (!stub_)
|
||||
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
|
||||
|
||||
// ledger header with txns and metadata
|
||||
org::xrpl::rpc::v1::GetLedgerRequest request;
|
||||
grpc::ClientContext context;
|
||||
request.mutable_ledger()->set_sequence(ledgerSequence);
|
||||
request.set_transactions(true);
|
||||
request.set_expand(true);
|
||||
request.set_get_objects(getObjects);
|
||||
request.set_get_object_neighbors(getObjectNeighbors);
|
||||
request.set_user("ETL");
|
||||
grpc::Status status = stub_->GetLedger(&context, request, &response);
|
||||
if (status.ok() && !response.is_unlimited())
|
||||
{
|
||||
log_.warn() << "SourceImpl::fetchLedger - is_unlimited is "
|
||||
"false. Make sure secure_gateway is set "
|
||||
"correctly on the ETL source. source = "
|
||||
<< toString() << " status = " << status.error_message();
|
||||
}
|
||||
return {status, std::move(response)};
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::optional<boost::json::object>
|
||||
SourceImpl<Derived>::forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (auto resp = forwardCache_.get(request); resp)
|
||||
{
|
||||
log_.debug() << "request hit forwardCache";
|
||||
return resp;
|
||||
}
|
||||
|
||||
return requestFromRippled(request, clientIp, yield);
|
||||
}
|
||||
|
||||
template <class Derived>
|
||||
std::optional<boost::json::object>
|
||||
SourceImpl<Derived>::requestFromRippled(
|
||||
boost::json::object const& request,
|
||||
std::string const& clientIp,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
log_.trace() << "Attempting to forward request to tx. "
|
||||
<< "request = " << boost::json::serialize(request);
|
||||
|
||||
boost::json::object response;
|
||||
if (!connected_)
|
||||
{
|
||||
log_.error() << "Attempted to proxy but failed to connect to tx";
|
||||
return {};
|
||||
}
|
||||
namespace beast = boost::beast; // from <boost/beast.hpp>
|
||||
namespace http = beast::http; // from <boost/beast/http.hpp>
|
||||
namespace websocket = beast::websocket; // from
|
||||
namespace net = boost::asio; // from
|
||||
using tcp = boost::asio::ip::tcp; // from
|
||||
try
|
||||
{
|
||||
boost::beast::error_code ec;
|
||||
// These objects perform our I/O
|
||||
tcp::resolver resolver{ioc_};
|
||||
|
||||
log_.trace() << "Creating websocket";
|
||||
auto ws = std::make_unique<websocket::stream<beast::tcp_stream>>(ioc_);
|
||||
|
||||
// Look up the domain name
|
||||
auto const results = resolver.async_resolve(ip_, wsPort_, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
ws->next_layer().expires_after(std::chrono::seconds(3));
|
||||
|
||||
log_.trace() << "Connecting websocket";
|
||||
// Make the connection on the IP address we get from a lookup
|
||||
ws->next_layer().async_connect(results, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
// Set a decorator to change the User-Agent of the handshake
|
||||
// and to tell rippled to charge the client IP for RPC
|
||||
// resources. See "secure_gateway" in
|
||||
//
|
||||
// https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg
|
||||
ws->set_option(websocket::stream_base::decorator([&clientIp](websocket::request_type& req) {
|
||||
req.set(http::field::user_agent, std::string(BOOST_BEAST_VERSION_STRING) + " websocket-client-coro");
|
||||
req.set(http::field::forwarded, "for=" + clientIp);
|
||||
}));
|
||||
log_.trace() << "client ip: " << clientIp;
|
||||
|
||||
log_.trace() << "Performing websocket handshake";
|
||||
// Perform the websocket handshake
|
||||
ws->async_handshake(ip_, "/", yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
log_.trace() << "Sending request";
|
||||
// Send the message
|
||||
ws->async_write(net::buffer(boost::json::serialize(request)), yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
beast::flat_buffer buffer;
|
||||
ws->async_read(buffer, yield[ec]);
|
||||
if (ec)
|
||||
return {};
|
||||
|
||||
auto begin = static_cast<char const*>(buffer.data().data());
|
||||
auto end = begin + buffer.data().size();
|
||||
auto parsed = boost::json::parse(std::string(begin, end));
|
||||
|
||||
if (!parsed.is_object())
|
||||
{
|
||||
log_.error() << "Error parsing response: " << std::string{begin, end};
|
||||
return {};
|
||||
}
|
||||
log_.trace() << "Successfully forward request";
|
||||
|
||||
response = parsed.as_object();
|
||||
|
||||
response["forwarded"] = true;
|
||||
return response;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
log_.error() << "Encountered exception : " << e.what();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
} // namespace etl
|
||||
|
||||
855
src/etl/Source.h
855
src/etl/Source.h
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user