mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-19 19:25:53 +00:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e2740d4c5 | ||
|
|
5004dc4e15 | ||
|
|
665fab183a | ||
|
|
b65ac67d17 | ||
|
|
7b18e28c47 | ||
|
|
4940d463dc |
47
.clang-tidy
47
.clang-tidy
@@ -8,7 +8,6 @@ Checks: '-*,
|
||||
bugprone-chained-comparison,
|
||||
bugprone-compare-pointer-to-member-virtual-function,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-crtp-constructor-accessibility,
|
||||
bugprone-dangling-handle,
|
||||
bugprone-dynamic-static-initializers,
|
||||
bugprone-empty-catch,
|
||||
@@ -34,11 +33,9 @@ Checks: '-*,
|
||||
bugprone-non-zero-enum-to-bool-conversion,
|
||||
bugprone-optional-value-conversion,
|
||||
bugprone-parent-virtual-call,
|
||||
bugprone-pointer-arithmetic-on-polymorphic-object,
|
||||
bugprone-posix-return,
|
||||
bugprone-redundant-branch-condition,
|
||||
bugprone-reserved-identifier,
|
||||
bugprone-return-const-ref-from-parameter,
|
||||
bugprone-shared-ptr-array-mismatch,
|
||||
bugprone-signal-handler,
|
||||
bugprone-signed-char-misuse,
|
||||
@@ -58,7 +55,6 @@ Checks: '-*,
|
||||
bugprone-suspicious-realloc-usage,
|
||||
bugprone-suspicious-semicolon,
|
||||
bugprone-suspicious-string-compare,
|
||||
bugprone-suspicious-stringview-data-usage,
|
||||
bugprone-swapped-arguments,
|
||||
bugprone-switch-missing-default-case,
|
||||
bugprone-terminating-continue,
|
||||
@@ -101,12 +97,10 @@ Checks: '-*,
|
||||
modernize-make-unique,
|
||||
modernize-pass-by-value,
|
||||
modernize-type-traits,
|
||||
modernize-use-designated-initializers,
|
||||
modernize-use-emplace,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-override,
|
||||
modernize-use-ranges,
|
||||
modernize-use-starts-ends-with,
|
||||
modernize-use-std-numbers,
|
||||
modernize-use-using,
|
||||
@@ -127,12 +121,9 @@ Checks: '-*,
|
||||
readability-convert-member-functions-to-static,
|
||||
readability-duplicate-include,
|
||||
readability-else-after-return,
|
||||
readability-enum-initial-value,
|
||||
readability-implicit-bool-conversion,
|
||||
readability-inconsistent-declaration-parameter-name,
|
||||
readability-identifier-naming,
|
||||
readability-make-member-function-const,
|
||||
readability-math-missing-parentheses,
|
||||
readability-misleading-indentation,
|
||||
readability-non-const-parameter,
|
||||
readability-redundant-casting,
|
||||
@@ -144,48 +135,14 @@ Checks: '-*,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-suspicious-call-argument,
|
||||
readability-use-std-min-max
|
||||
readability-suspicious-call-argument
|
||||
'
|
||||
|
||||
CheckOptions:
|
||||
readability-braces-around-statements.ShortStatementLines: 2
|
||||
readability-identifier-naming.MacroDefinitionCase: UPPER_CASE
|
||||
readability-identifier-naming.ClassCase: CamelCase
|
||||
readability-identifier-naming.StructCase: CamelCase
|
||||
readability-identifier-naming.UnionCase: CamelCase
|
||||
readability-identifier-naming.EnumCase: CamelCase
|
||||
readability-identifier-naming.EnumConstantCase: CamelCase
|
||||
readability-identifier-naming.ScopedEnumConstantCase: CamelCase
|
||||
readability-identifier-naming.GlobalConstantCase: UPPER_CASE
|
||||
readability-identifier-naming.GlobalConstantPrefix: 'k'
|
||||
readability-identifier-naming.GlobalVariableCase: CamelCase
|
||||
readability-identifier-naming.GlobalVariablePrefix: 'g'
|
||||
readability-identifier-naming.ConstexprFunctionCase: camelBack
|
||||
readability-identifier-naming.ConstexprMethodCase: camelBack
|
||||
readability-identifier-naming.ClassMethodCase: camelBack
|
||||
readability-identifier-naming.ClassMemberCase: camelBack
|
||||
readability-identifier-naming.ClassConstantCase: UPPER_CASE
|
||||
readability-identifier-naming.ClassConstantPrefix: 'k'
|
||||
readability-identifier-naming.StaticConstantCase: UPPER_CASE
|
||||
readability-identifier-naming.StaticConstantPrefix: 'k'
|
||||
readability-identifier-naming.StaticVariableCase: UPPER_CASE
|
||||
readability-identifier-naming.StaticVariablePrefix: 'k'
|
||||
readability-identifier-naming.ConstexprVariableCase: UPPER_CASE
|
||||
readability-identifier-naming.ConstexprVariablePrefix: 'k'
|
||||
readability-identifier-naming.LocalConstantCase: camelBack
|
||||
readability-identifier-naming.LocalVariableCase: camelBack
|
||||
readability-identifier-naming.TemplateParameterCase: CamelCase
|
||||
readability-identifier-naming.ParameterCase: camelBack
|
||||
readability-identifier-naming.FunctionCase: camelBack
|
||||
readability-identifier-naming.MemberCase: camelBack
|
||||
readability-identifier-naming.PrivateMemberSuffix: _
|
||||
readability-identifier-naming.ProtectedMemberSuffix: _
|
||||
readability-identifier-naming.PublicMemberSuffix: ''
|
||||
readability-identifier-naming.FunctionIgnoredRegexp: '.*tag_invoke.*'
|
||||
bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true
|
||||
bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc
|
||||
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*;.*(expected|unexpected).*;.*ranges_lower_bound\.h;time.h;stdlib.h'
|
||||
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*;.*(expected|unexpected).*'
|
||||
|
||||
HeaderFilterRegex: '^.*/(src|tests)/.*\.(h|hpp)$'
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
3
.clangd
3
.clangd
@@ -8,6 +8,3 @@ Diagnostics:
|
||||
IgnoreHeader:
|
||||
- ".*/(detail|impl)/.*"
|
||||
- ".*expected.*"
|
||||
- ".*ranges_lower_bound.h"
|
||||
- "time.h"
|
||||
- "stdlib.h"
|
||||
|
||||
@@ -13,36 +13,14 @@ TMPDIR=${ROOT}/.cache/doxygen
|
||||
TMPFILE=${TMPDIR}/docs.log
|
||||
DOCDIR=${TMPDIR}/out
|
||||
|
||||
# Check doxygen is at all installed
|
||||
if [ -z "$DOXYGEN" ]; then
|
||||
# No hard error if doxygen is not installed yet
|
||||
cat <<EOF
|
||||
|
||||
WARNING
|
||||
-----------------------------------------------------------------------------
|
||||
'doxygen' is required to check documentation.
|
||||
Please install it for next time.
|
||||
|
||||
Your changes may fail to pass CI once pushed.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check version of doxygen is at least 1.12
|
||||
version=$($DOXYGEN --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "1.12.0" > "$version" ]]; then
|
||||
# No hard error if doxygen version is not the one we want - let CI deal with it
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 1.12 of `which doxygen` is required.
|
||||
Your version is $version. Please upgrade it for next time.
|
||||
|
||||
Your changes may fail to pass CI once pushed.
|
||||
'doxygen' is required to check documentation.
|
||||
Please install it for next time. For the time being it's on CI.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
|
||||
@@ -5,20 +5,6 @@
|
||||
# This script checks the format of the code and cmake files.
|
||||
# In many cases it will automatically fix the issues and abort the commit.
|
||||
|
||||
no_formatted_directories_staged() {
|
||||
staged_directories=$(git diff-index --cached --name-only HEAD | awk -F/ '{print $1}')
|
||||
for sd in $staged_directories; do
|
||||
if [[ "$sd" =~ ^(benchmark|cmake|src|tests)$ ]]; then
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
if no_formatted_directories_staged ; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "+ Checking code format..."
|
||||
|
||||
# paths to check and re-format
|
||||
@@ -26,12 +12,12 @@ sources="src tests"
|
||||
formatter="clang-format -i"
|
||||
version=$($formatter --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "19.0.0" > "$version" ]]; then
|
||||
if [[ "18.0.0" > "$version" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 19 of `which clang-format` is required.
|
||||
A minimum of version 18 of `which clang-format` is required.
|
||||
Your version is $version.
|
||||
Please fix paths and run again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
@@ -3,5 +3,6 @@
|
||||
# This script is intended to be run from the root of the repository.
|
||||
|
||||
source .githooks/check-format
|
||||
source .githooks/check-docs
|
||||
#source .githooks/check-docs
|
||||
|
||||
# TODO: Fix Doxygen issue with reference links. See https://github.com/XRPLF/clio/issues/1431
|
||||
|
||||
@@ -1,58 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
|
||||
# git for-each-ref refs/tags # see which tags are annotated and which are lightweight. Annotated tags are "tag" objects.
|
||||
# # Set these so your commits and tags are always signed
|
||||
# git config commit.gpgsign true
|
||||
# git config tag.gpgsign true
|
||||
|
||||
verify_commit_signed() {
|
||||
if git verify-commit HEAD &> /dev/null; then
|
||||
:
|
||||
# echo "HEAD commit seems signed..."
|
||||
else
|
||||
echo "HEAD commit isn't signed!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
verify_tag() {
|
||||
if git describe --exact-match --tags HEAD &> /dev/null; then
|
||||
: # You might be ok to push
|
||||
# echo "Tag is annotated."
|
||||
return 0
|
||||
else
|
||||
echo "Tag for [$version] not an annotated tag."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
verify_tag_signed() {
|
||||
if git verify-tag "$version" &> /dev/null ; then
|
||||
: # ok, I guess we'll let you push
|
||||
# echo "Tag appears signed"
|
||||
return 0
|
||||
else
|
||||
echo "$version tag isn't signed"
|
||||
echo "Sign it with [git tag -ams\"$version\" $version]"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
while read local_ref local_oid remote_ref remote_oid; do
|
||||
# Check some things if we're pushing a branch called "release/"
|
||||
if echo "$remote_ref" | grep ^refs\/heads\/release\/ &> /dev/null ; then
|
||||
version=$(git tag --points-at HEAD)
|
||||
echo "Looks like you're trying to push a $version release..."
|
||||
echo "Making sure you've signed and tagged it."
|
||||
if verify_commit_signed && verify_tag && verify_tag_signed ; then
|
||||
: # Ok, I guess you can push
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
|
||||
|
||||
git lfs pre-push "$@"
|
||||
|
||||
66
.github/actions/build_docker_image/action.yml
vendored
66
.github/actions/build_docker_image/action.yml
vendored
@@ -1,66 +0,0 @@
|
||||
name: Build and push Docker image
|
||||
description: Build and push Docker image to DockerHub and GitHub Container Registry
|
||||
inputs:
|
||||
image_name:
|
||||
description: Name of the image to build
|
||||
required: true
|
||||
push_image:
|
||||
description: Whether to push the image to the registry (true/false)
|
||||
required: true
|
||||
directory:
|
||||
description: The directory containing the Dockerfile
|
||||
required: true
|
||||
tags:
|
||||
description: Comma separated tags to apply to the image
|
||||
required: true
|
||||
platforms:
|
||||
description: Platforms to build the image for (e.g. linux/amd64,linux/arm64)
|
||||
required: true
|
||||
description:
|
||||
description: Short description of the image
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
if: ${{ inputs.push_image == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USER }}
|
||||
password: ${{ env.DOCKERHUB_PW }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ inputs.push_image == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ env.GITHUB_TOKEN }}
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: ${{ inputs.image_name }}
|
||||
tags: ${{ inputs.tags }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ${{ inputs.directory }}
|
||||
platforms: ${{ inputs.platforms }}
|
||||
push: ${{ inputs.push_image == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
|
||||
- name: Update DockerHub description
|
||||
if: ${{ inputs.push_image == 'true' }}
|
||||
uses: peter-evans/dockerhub-description@v4
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USER }}
|
||||
password: ${{ env.DOCKERHUB_PW }}
|
||||
repository: ${{ inputs.image_name }}
|
||||
short-description: ${{ inputs.description }}
|
||||
readme-filepath: ${{ inputs.directory }}/README.md
|
||||
|
||||
2
.github/actions/prepare_runner/action.yml
vendored
2
.github/actions/prepare_runner/action.yml
vendored
@@ -11,7 +11,7 @@ runs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq gh conan@1 ca-certificates
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq gh conan@1
|
||||
echo "/opt/homebrew/opt/conan@1/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
|
||||
4
.github/actions/setup_conan/action.yml
vendored
4
.github/actions/setup_conan/action.yml
vendored
@@ -15,10 +15,10 @@ runs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_PROFILE: apple_clang_16
|
||||
CONAN_PROFILE: apple_clang_15
|
||||
id: conan_setup_mac
|
||||
run: |
|
||||
echo "Creating $CONAN_PROFILE conan profile"
|
||||
echo "Creating $CONAN_PROFILE conan profile";
|
||||
conan profile new $CONAN_PROFILE --detect --force
|
||||
conan profile update settings.compiler.libcxx=libc++ $CONAN_PROFILE
|
||||
conan profile update settings.compiler.cppstd=20 $CONAN_PROFILE
|
||||
|
||||
16
.github/dependabot.yml
vendored
16
.github/dependabot.yml
vendored
@@ -1,16 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
time: "04:00"
|
||||
timezone: "Etc/GMT"
|
||||
reviewers:
|
||||
- "cindyyan317"
|
||||
- "godexsoft"
|
||||
- "kuznetsss"
|
||||
commit-message:
|
||||
prefix: "[CI] "
|
||||
target-branch: "develop"
|
||||
9
.github/workflows/build.yml
vendored
9
.github/workflows/build.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
run: |
|
||||
./.githooks/check-format --diff
|
||||
shell: bash
|
||||
|
||||
|
||||
check_docs:
|
||||
name: Check documentation
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
conan_profile: clang
|
||||
code_coverage: false
|
||||
static: true
|
||||
- os: macos15
|
||||
- os: macos14
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
static: false
|
||||
@@ -197,8 +197,8 @@ jobs:
|
||||
image: rippleci/clio_ci:latest
|
||||
conan_profile: clang
|
||||
build_type: Debug
|
||||
- os: macos15
|
||||
conan_profile: apple_clang_16
|
||||
- os: macos14
|
||||
conan_profile: apple_clang_15
|
||||
build_type: Release
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
@@ -211,7 +211,6 @@ jobs:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}_${{ matrix.conan_profile }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
|
||||
95
.github/workflows/build_clio_docker_image.yml
vendored
95
.github/workflows/build_clio_docker_image.yml
vendored
@@ -1,95 +0,0 @@
|
||||
name: Build and publish Clio docker image
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tags:
|
||||
required: true
|
||||
type: string
|
||||
description: Comma separated tags for docker image
|
||||
artifact_name:
|
||||
type: string
|
||||
description: Name of Github artifact to put into docker image
|
||||
strip_binary:
|
||||
type: boolean
|
||||
description: Whether to strip clio binary
|
||||
default: true
|
||||
publish_image:
|
||||
type: boolean
|
||||
description: Whether to publish docker image
|
||||
required: true
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
required: true
|
||||
type: string
|
||||
description: Comma separated tags for docker image
|
||||
clio_server_binary_url:
|
||||
required: true
|
||||
type: string
|
||||
description: Url to download clio_server binary from
|
||||
binary_sha256:
|
||||
required: true
|
||||
type: string
|
||||
description: sha256 hash of the binary
|
||||
strip_binary:
|
||||
type: boolean
|
||||
description: Whether to strip clio binary
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build_and_publish_image:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download Clio binary from artifact
|
||||
if: ${{ inputs.artifact_name != null }}
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.artifact_name }}
|
||||
path: ./docker/clio/artifact/
|
||||
|
||||
- name: Download Clio binary from url
|
||||
if: ${{ inputs.clio_server_binary_url != null }}
|
||||
shell: bash
|
||||
run: |
|
||||
wget ${{inputs.clio_server_binary_url}} -P ./docker/clio/artifact/
|
||||
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${{inputs.binary_sha256}}" ]; then
|
||||
echo "Binary sha256 sum doesn't match"
|
||||
exit 1
|
||||
fi
|
||||
- name: Unpack binary
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y tar unzip
|
||||
cd docker/clio/artifact
|
||||
artifact=$(find . -type f)
|
||||
if [[ $artifact == *.zip ]]; then
|
||||
unzip $artifact
|
||||
elif [[ $artifact == *.tar.gz ]]; then
|
||||
tar -xvf $artifact
|
||||
fi
|
||||
mv clio_server ../
|
||||
cd ../
|
||||
rm -rf ./artifact
|
||||
|
||||
- name: Strip binary
|
||||
if: ${{ inputs.strip_binary }}
|
||||
shell: bash
|
||||
run: strip ./docker/clio/clio_server
|
||||
|
||||
- name: Build Docker image
|
||||
uses: ./.github/actions/build_docker_image
|
||||
env:
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
image_name: rippleci/clio
|
||||
push_image: ${{ inputs.publish_image }}
|
||||
directory: docker/clio
|
||||
tags: ${{ inputs.tags }}
|
||||
platforms: linux/amd64
|
||||
description: Clio is an XRP Ledger API server.
|
||||
4
.github/workflows/check_libxrpl.yml
vendored
4
.github/workflows/check_libxrpl.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_check_libxrpl
|
||||
name: clio_tests_libxrpl-${{ github.event.client_payload.version }}
|
||||
path: build/clio_tests
|
||||
|
||||
run_tests:
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: clio_tests_check_libxrpl
|
||||
name: clio_tests_libxrpl-${{ github.event.client_payload.version }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
|
||||
18
.github/workflows/check_pr_title.yml
vendored
18
.github/workflows/check_pr_title.yml
vendored
@@ -1,18 +0,0 @@
|
||||
name: Check PR title
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
branches: [develop]
|
||||
|
||||
jobs:
|
||||
check_title:
|
||||
runs-on: ubuntu-20.04
|
||||
# permissions:
|
||||
# pull-requests: write
|
||||
steps:
|
||||
- uses: ytanikin/PRConventionalCommits@1.3.0
|
||||
with:
|
||||
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
|
||||
add_label: false
|
||||
# Turned off labelling because it leads to an error, see https://github.com/ytanikin/PRConventionalCommits/issues/19
|
||||
# custom_labels: '{"build":"build", "feat":"enhancement", "fix":"bug", "docs":"documentation", "test":"testability", "ci":"ci", "style":"refactoring", "refactor":"refactoring", "perf":"performance", "chore":"tooling"}'
|
||||
8
.github/workflows/clang-tidy.yml
vendored
8
.github/workflows/clang-tidy.yml
vendored
@@ -60,7 +60,7 @@ jobs:
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-19 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
|
||||
run-clang-tidy-18 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
|
||||
|
||||
- name: Check format
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
@@ -89,7 +89,7 @@ jobs:
|
||||
|
||||
List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
|
||||
- uses: crazy-max/ghaction-import-gpg@v6
|
||||
- uses: crazy-max/ghaction-import-gpg@v5
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
|
||||
- name: Create PR with fixes
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
@@ -109,7 +109,7 @@ jobs:
|
||||
branch: "clang_tidy/autofix"
|
||||
branch-suffix: timestamp
|
||||
delete-branch: true
|
||||
title: "style: clang-tidy auto fixes"
|
||||
title: "[CI] clang-tidy auto fixes"
|
||||
body: "Fixes #${{ steps.create_issue.outputs.created_issue_id }}. Please review and commit clang-tidy fixes."
|
||||
reviewers: "cindyyan317,godexsoft,kuznetsss"
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
id: check
|
||||
shell: bash
|
||||
run: |
|
||||
passed=$(if [[ $(git log -1 --pretty=format:%s | grep 'style: clang-tidy auto fixes') ]]; then echo 'true' ; else echo 'false' ; fi)
|
||||
passed=$(if [[ $(git log -1 --pretty=format:%s | grep '\[CI\] clang-tidy auto fixes') ]]; then echo 'true' ; else echo 'false' ; fi)
|
||||
echo "passed=$passed" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run clang-tidy workflow
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
cmake ../docs && cmake --build . --target docs
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
uses: actions/configure-pages@v4
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
|
||||
31
.github/workflows/nightly.yml
vendored
31
.github/workflows/nightly.yml
vendored
@@ -3,10 +3,6 @@ on:
|
||||
schedule:
|
||||
- cron: '0 5 * * 1-5'
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/nightly.yml'
|
||||
- '.github/workflows/build_clio_docker_image.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -15,17 +11,14 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos15
|
||||
- os: macos14
|
||||
build_type: Release
|
||||
static: false
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
static: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
static: true
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
@@ -57,7 +50,6 @@ jobs:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
static: ${{ matrix.static }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
@@ -90,7 +82,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos15
|
||||
- os: macos14
|
||||
build_type: Release
|
||||
integration_tests: false
|
||||
- os: heavy
|
||||
@@ -138,7 +130,6 @@ jobs:
|
||||
./clio_integration_tests --backend_host=scylladb
|
||||
|
||||
nightly_release:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
needs: run_tests
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
@@ -152,13 +143,13 @@ jobs:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: nightly_release
|
||||
pattern: clio_server_*
|
||||
|
||||
- name: Prepare files
|
||||
shell: bash
|
||||
run: |
|
||||
cp ${{ github.workspace }}/.github/workflows/nightly_notes.md "${RUNNER_TEMP}/nightly_notes.md"
|
||||
cd nightly_release
|
||||
rm -r clio_*tests*
|
||||
for d in $(ls); do
|
||||
archive_name=$(ls $d)
|
||||
mv ${d}/${archive_name} ./
|
||||
@@ -181,21 +172,9 @@ jobs:
|
||||
--target $GITHUB_SHA --notes-file "${RUNNER_TEMP}/nightly_notes.md" \
|
||||
./nightly_release/clio_server*
|
||||
|
||||
build_and_publish_docker_image:
|
||||
uses: ./.github/workflows/build_clio_docker_image.yml
|
||||
needs: run_tests
|
||||
secrets: inherit
|
||||
with:
|
||||
tags: |
|
||||
type=raw,value=nightly
|
||||
type=raw,value=${{ github.sha }}
|
||||
artifact_name: clio_server_Linux_Release
|
||||
strip_binary: true
|
||||
publish_image: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
create_issue_on_failure:
|
||||
needs: [build, run_tests, nightly_release, build_and_publish_docker_image]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') && github.event_name != 'pull_request' }}
|
||||
needs: [build, run_tests, nightly_release]
|
||||
if: ${{ always() && contains(needs.*.result, 'failure') }}
|
||||
runs-on: ubuntu-20.04
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
33
.github/workflows/update_docker_ci.yml
vendored
33
.github/workflows/update_docker_ci.yml
vendored
@@ -18,19 +18,30 @@ jobs:
|
||||
name: Build and push docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
env:
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
image_name: rippleci/clio_ci
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/ci
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PW }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: rippleci/clio_ci
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=gcc_12_clang_16
|
||||
type=raw,value=${{ github.sha }}
|
||||
type=raw,value=${{ env.GITHUB_SHA }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ${{ github.workspace }}/docker/ci
|
||||
platforms: linux/amd64,linux/arm64
|
||||
description: CI image for XRPLF/clio.
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
|
||||
|
||||
2
.github/workflows/upload_coverage_report.yml
vendored
2
.github/workflows/upload_coverage_report.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
- name: Upload coverage report
|
||||
if: ${{ hashFiles('build/coverage_report.xml') != '' }}
|
||||
uses: wandalen/wretry.action@v3.7.3
|
||||
uses: wandalen/wretry.action@v1.4.10
|
||||
with:
|
||||
action: codecov/codecov-action@v4
|
||||
with: |
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,4 +8,4 @@
|
||||
.DS_Store
|
||||
CMakeUserPresets.json
|
||||
config.json
|
||||
src/util/build/Build.cpp
|
||||
src/main/impl/Build.cpp
|
||||
|
||||
@@ -21,7 +21,7 @@ git config --local core.hooksPath .githooks
|
||||
```
|
||||
|
||||
## Git hooks dependencies
|
||||
The pre-commit hook requires `clang-format >= 19.0.0` and `cmake-format` to be installed on your machine.
|
||||
The pre-commit hook requires `clang-format >= 18.0.0` and `cmake-format` to be installed on your machine.
|
||||
`clang-format` can be installed using `brew` on macOS and default package manager on Linux.
|
||||
`cmake-format` can be installed using `pip`.
|
||||
The hook will also attempt to automatically use `doxygen` to verify that everything public in the codebase is covered by doc comments. If `doxygen` is not installed, the hook will raise a warning suggesting to install `doxygen` for future commits.
|
||||
@@ -72,9 +72,6 @@ git push --force
|
||||
Clio uses `ccache` to speed up compilation. If you want to use it, please make sure it is installed on your machine.
|
||||
CMake will automatically detect it and use it if it is available.
|
||||
|
||||
## Opening a pull request
|
||||
When a pull request is open CI will perform checks on the new code.
|
||||
Title of the pull request and squashed commit should follow [conventional commits specification](https://www.conventionalcommits.org/en/v1.0.0/).
|
||||
|
||||
## Fixing issues found during code review
|
||||
While your code is in review, it's possible that some changes will be requested by reviewer(s).
|
||||
@@ -105,7 +102,7 @@ The button for that is near the bottom of the PR's page on GitHub.
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
|
||||
|
||||
## Formatting
|
||||
Code must conform to `clang-format` version 19, unless the result would be unreasonably difficult to read or maintain.
|
||||
Code must conform to `clang-format` version 18, unless the result would be unreasonably difficult to read or maintain.
|
||||
In most cases the pre-commit hook will take care of formatting and will fix any issues automatically.
|
||||
To manually format your code, use `clang-format -i <your changed files>` for C++ files and `cmake-format -i <your changed files>` for CMake files.
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ Below are some useful docs to learn more about Clio.
|
||||
**For Developers**:
|
||||
|
||||
- [How to build Clio](./docs/build-clio.md)
|
||||
- [Metrics and static analysis](./docs/metrics-and-static-analysis.md)
|
||||
- [Coverage report](./docs/coverage-report.md)
|
||||
|
||||
**For Operators**:
|
||||
|
||||
@@ -12,5 +12,5 @@ target_sources(
|
||||
include(deps/gbench)
|
||||
|
||||
target_include_directories(clio_benchmark PRIVATE .)
|
||||
target_link_libraries(clio_benchmark PUBLIC clio_etl benchmark::benchmark_main)
|
||||
target_link_libraries(clio_benchmark PUBLIC clio benchmark::benchmark_main)
|
||||
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <cstdint>
|
||||
#include <latch>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
@@ -188,10 +189,10 @@ public:
|
||||
static auto
|
||||
generateData()
|
||||
{
|
||||
constexpr auto kTOTAL = 10'000;
|
||||
constexpr auto TOTAL = 10'000;
|
||||
std::vector<uint64_t> data;
|
||||
data.reserve(kTOTAL);
|
||||
for (auto i = 0; i < kTOTAL; ++i)
|
||||
data.reserve(TOTAL);
|
||||
for (auto i = 0; i < TOTAL; ++i)
|
||||
data.push_back(util::Random::uniform(1, 100'000'000));
|
||||
|
||||
return data;
|
||||
@@ -208,7 +209,7 @@ benchmarkThreads(benchmark::State& state)
|
||||
}
|
||||
|
||||
template <typename CtxType>
|
||||
static void
|
||||
void
|
||||
benchmarkExecutionContextBatched(benchmark::State& state)
|
||||
{
|
||||
auto data = generateData();
|
||||
@@ -219,7 +220,7 @@ benchmarkExecutionContextBatched(benchmark::State& state)
|
||||
}
|
||||
|
||||
template <typename CtxType>
|
||||
static void
|
||||
void
|
||||
benchmarkAnyExecutionContextBatched(benchmark::State& state)
|
||||
{
|
||||
auto data = generateData();
|
||||
|
||||
@@ -17,26 +17,25 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "util/build/Build.hpp"
|
||||
#include "main/Build.hpp"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace util::build {
|
||||
|
||||
static constexpr char versionString[] = "@CLIO_VERSION@"; // NOLINT(readability-identifier-naming)
|
||||
namespace Build {
|
||||
static constexpr char versionString[] = "@CLIO_VERSION@";
|
||||
|
||||
std::string const&
|
||||
getClioVersionString()
|
||||
{
|
||||
static std::string const value = versionString; // NOLINT(readability-identifier-naming)
|
||||
static std::string const value = versionString;
|
||||
return value;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getClioFullVersionString()
|
||||
{
|
||||
static std::string const value = "clio-" + getClioVersionString(); // NOLINT(readability-identifier-naming)
|
||||
static std::string const value = "clio-" + getClioVersionString();
|
||||
return value;
|
||||
}
|
||||
|
||||
} // namespace util::build
|
||||
} // namespace Build
|
||||
|
||||
@@ -8,7 +8,7 @@ if (lint)
|
||||
endif ()
|
||||
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||
else ()
|
||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-19" "clang-tidy" REQUIRED)
|
||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-18" "clang-tidy" REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (NOT _CLANG_TIDY_BIN)
|
||||
|
||||
@@ -45,4 +45,4 @@ endif ()
|
||||
|
||||
message(STATUS "Build version: ${CLIO_VERSION}")
|
||||
|
||||
configure_file(${CMAKE_CURRENT_LIST_DIR}/Build.cpp.in ${CMAKE_CURRENT_LIST_DIR}/../src/util/build/Build.cpp)
|
||||
configure_file(${CMAKE_CURRENT_LIST_DIR}/Build.cpp.in ${CMAKE_CURRENT_LIST_DIR}/../src/main/impl/Build.cpp)
|
||||
|
||||
@@ -28,8 +28,7 @@ class Clio(ConanFile):
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/2.4.0-b3',
|
||||
'zlib/1.3.1',
|
||||
'xrpl/2.2.0',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
# CI image for XRPLF/clio
|
||||
|
||||
This image contains an environment to build [Clio](https://github.com/XRPLF/clio), check code and documentation.
|
||||
It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but can also be used to compile Clio locally.
|
||||
|
||||
The image is based on Ubuntu 20.04 and contains:
|
||||
- clang 16.0.6
|
||||
- gcc 12.3
|
||||
- doxygen 1.12
|
||||
- gh 2.40
|
||||
- ccache 4.10.2
|
||||
- conan 1.62
|
||||
- and some other useful tools
|
||||
|
||||
Conan is set up to build Clio without any additional steps. There are two preset conan profiles: `clang` and `gcc` to use corresponding compiler. By default conan is setup to use `gcc`.
|
||||
Sanitizer builds for `ASAN`, `TSAN` and `UBSAN` are enabled via conan profiles for each of the supported compilers. These can be selected using the following pattern (all lowercase): `[compiler].[sanitizer]` (e.g. `--profile gcc.tsan`).
|
||||
@@ -1,9 +0,0 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=address\" linkflags=\"-fsanitize=address\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=address"
|
||||
CXXFLAGS="-fsanitize=address"
|
||||
LDFLAGS="-fsanitize=address"
|
||||
@@ -1,9 +0,0 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=thread\" linkflags=\"-fsanitize=thread\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=thread"
|
||||
CXXFLAGS="-fsanitize=thread"
|
||||
LDFLAGS="-fsanitize=thread"
|
||||
@@ -1,9 +0,0 @@
|
||||
include(clang)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=undefined\" linkflags=\"-fsanitize=undefined\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=undefined"
|
||||
CXXFLAGS="-fsanitize=undefined"
|
||||
LDFLAGS="-fsanitize=undefined"
|
||||
@@ -1,9 +0,0 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=address\" linkflags=\"-fsanitize=address\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=address"
|
||||
CXXFLAGS="-fsanitize=address"
|
||||
LDFLAGS="-fsanitize=address"
|
||||
@@ -1,9 +0,0 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=thread\" linkflags=\"-fsanitize=thread\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=thread"
|
||||
CXXFLAGS="-fsanitize=thread"
|
||||
LDFLAGS="-fsanitize=thread"
|
||||
@@ -1,9 +0,0 @@
|
||||
include(gcc)
|
||||
|
||||
[options]
|
||||
boost:extra_b2_flags="cxxflags=\"-fsanitize=undefined\" linkflags=\"-fsanitize=undefined\""
|
||||
boost:without_stacktrace=True
|
||||
[env]
|
||||
CFLAGS="-fsanitize=undefined"
|
||||
CXXFLAGS="-fsanitize=undefined"
|
||||
LDFLAGS="-fsanitize=undefined"
|
||||
@@ -6,10 +6,10 @@ SHELL ["/bin/bash", "-c"]
|
||||
USER root
|
||||
WORKDIR /root
|
||||
|
||||
ENV CCACHE_VERSION=4.10.2 \
|
||||
LLVM_TOOLS_VERSION=19 \
|
||||
ENV CCACHE_VERSION=4.8.3 \
|
||||
LLVM_TOOLS_VERSION=18 \
|
||||
GH_VERSION=2.40.0 \
|
||||
DOXYGEN_VERSION=1.12.0
|
||||
DOXYGEN_VERSION=1.10.0
|
||||
|
||||
# Add repositories
|
||||
RUN apt-get -qq update \
|
||||
@@ -98,10 +98,3 @@ RUN conan profile new clang --detect \
|
||||
&& conan profile update "conf.tools.build:compiler_executables={\"c\": \"/usr/bin/clang-16\", \"cpp\": \"/usr/bin/clang++-16\"}" clang
|
||||
|
||||
RUN echo "include(gcc)" >> .conan/profiles/default
|
||||
|
||||
COPY conan/gcc.asan /root/.conan/profiles
|
||||
COPY conan/gcc.tsan /root/.conan/profiles
|
||||
COPY conan/gcc.ubsan /root/.conan/profiles
|
||||
COPY conan/clang.asan /root/.conan/profiles
|
||||
COPY conan/clang.tsan /root/.conan/profiles
|
||||
COPY conan/clang.ubsan /root/.conan/profiles
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Clio official docker image
|
||||
|
||||
[Clio](https://github.com/XRPLF/clio) is an XRP Ledger API server optimized for RPC calls over WebSocket or JSON-RPC.
|
||||
It stores validated historical ledger and transaction data in a space efficient format.
|
||||
|
||||
This image contains `clio_server` binary allowing users to run Clio easily.
|
||||
|
||||
## Clio configuration file
|
||||
|
||||
Please note that while Clio requires a configuration file, this image doesn't include any default config.
|
||||
Your configuration file should be mounted under the path `/opt/clio/etc/config.json`.
|
||||
Clio repository provides an [example](https://github.com/XRPLF/clio/blob/develop/docs/examples/config/example-config.json) of the configuration file.
|
||||
|
||||
Config file recommendations:
|
||||
- Set `log_to_console` to `false` if you want to avoid logs being written to `stdout`.
|
||||
- Set `log_directory` to `/opt/clio/log` to store logs in a volume.
|
||||
|
||||
## Usage
|
||||
|
||||
The following command can be used to run Clio in docker (assuming server's port is `51233` in your config):
|
||||
```bash
|
||||
docker run -d -v <path to your config.json>:/opt/clio/etc/config.json -v <path to store logs>:/opt/clio/log -p 51233:51233 rippleci/clio
|
||||
```
|
||||
@@ -1,16 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
COPY ./clio_server /opt/clio/bin/clio_server
|
||||
|
||||
RUN ln -s /opt/clio/bin/clio_server /usr/local/bin/clio_server && \
|
||||
mkdir -p /opt/clio/etc/ && \
|
||||
mkdir -p /opt/clio/log/ && \
|
||||
groupadd -g 10001 clio && \
|
||||
useradd -u 10000 -g 10001 -s /bin/bash clio && \
|
||||
chown clio:clio /opt/clio/log && \
|
||||
apt update && \
|
||||
apt install -y libatomic1
|
||||
|
||||
USER clio
|
||||
ENTRYPOINT ["/opt/clio/bin/clio_server"]
|
||||
CMD ["--conf", "/opt/clio/etc/config.json"]
|
||||
@@ -1,3 +1,4 @@
|
||||
version: '3.7'
|
||||
services:
|
||||
clio_develop:
|
||||
image: rippleci/clio_ci:latest
|
||||
|
||||
@@ -141,60 +141,3 @@ If you wish to develop against a `rippled` instance running in standalone mode t
|
||||
|
||||
1. Advance the `rippled` ledger to at least ledger 256.
|
||||
2. Wait 10 minutes before first starting Clio against this standalone node.
|
||||
|
||||
## Building with a Custom `libxrpl`
|
||||
|
||||
Sometimes, during development, you need to build against a custom version of `libxrpl`. (For example, you may be developing compatibility for a proposed amendment that is not yet merged to the main `rippled` codebase.) To build Clio with compatibility for a custom fork or branch of `rippled`, follow these steps:
|
||||
|
||||
1. First, pull/clone the appropriate `rippled` fork and switch to the branch you want to build. For example, the following example uses an in-development build with [XLS-33d Multi-Purpose Tokens](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0033d-multi-purpose-tokens):
|
||||
|
||||
```sh
|
||||
git clone https://github.com/shawnxie999/rippled/
|
||||
cd rippled
|
||||
git switch mpt-1.1
|
||||
```
|
||||
|
||||
2. Export a custom package to your local Conan store using a user/channel:
|
||||
|
||||
```sh
|
||||
conan export . my/feature
|
||||
```
|
||||
|
||||
3. Patch your local Clio build to use the right package.
|
||||
|
||||
Edit `conanfile.py` (from the Clio repository root). Replace the `xrpl` requirement with the custom package version from the previous step. This must also include the current version number from your `rippled` branch. For example:
|
||||
|
||||
```py
|
||||
# ... (excerpt from conanfile.py)
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/2.3.0-b1@my/feature', # Update this line
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
```
|
||||
|
||||
4. Build Clio as you would have before.
|
||||
|
||||
See [Building Clio](#building-clio) for details.
|
||||
|
||||
## Using `clang-tidy` for static analysis
|
||||
|
||||
The minimum [clang-tidy](https://clang.llvm.org/extra/clang-tidy/) version required is 19.0.
|
||||
|
||||
Clang-tidy can be run by Cmake when building the project. To achieve this, you just need to provide the option `-o lint=True` for the `conan install` command:
|
||||
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
|
||||
By default Cmake will try to find `clang-tidy` automatically in your system.
|
||||
To force Cmake to use your desired binary, set the `CLIO_CLANG_TIDY_BIN` environment variable to the path of the `clang-tidy` binary. For example:
|
||||
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@19/bin/clang-tidy
|
||||
```
|
||||
|
||||
43
docs/examples/config/cloud-example-config.json
Normal file
43
docs/examples/config/cloud-example-config.json
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* This is an example configuration file. Please do not use without modifying to suit your needs.
|
||||
*/
|
||||
{
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
// This option can be used to setup a secure connect bundle connection
|
||||
"secure_connect_bundle": "[path/to/zip. ignore if using contact_points]",
|
||||
// The following options are used only if using contact_points
|
||||
"contact_points": "[ip. ignore if using secure_connect_bundle]",
|
||||
"port": "[port. ignore if using_secure_connect_bundle]",
|
||||
// Authentication settings
|
||||
"username": "[username, if any]",
|
||||
"password": "[password, if any]",
|
||||
// Other common settings
|
||||
"keyspace": "clio",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
}
|
||||
},
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip": "[rippled ip]",
|
||||
"ws_port": "6006",
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
"dos_guard": {
|
||||
"whitelist": [
|
||||
"127.0.0.1"
|
||||
]
|
||||
},
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 8080
|
||||
},
|
||||
"log_level": "debug",
|
||||
"log_file": "./clio.log",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false
|
||||
}
|
||||
@@ -31,7 +31,7 @@
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
"ws_port": "6005",
|
||||
"ws_port": "6006",
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
@@ -39,9 +39,6 @@
|
||||
"cache_timeout": 0.250, // in seconds, could be 0, which means no cache
|
||||
"request_timeout": 10.0 // time for Clio to wait for rippled to reply on a forwarded request (default is 10 seconds)
|
||||
},
|
||||
"rpc": {
|
||||
"cache_timeout": 0.5 // in seconds, could be 0, which means no cache for rpc
|
||||
},
|
||||
"dos_guard": {
|
||||
// Comma-separated list of IPs to exclude from rate limiting
|
||||
"whitelist": [
|
||||
@@ -70,15 +67,7 @@
|
||||
"admin_password": "xrp",
|
||||
// If local_admin is true, Clio will consider requests come from 127.0.0.1 as admin requests
|
||||
// It's true by default unless admin_password is set,'local_admin' : true and 'admin_password' can not be set at the same time
|
||||
"local_admin": false,
|
||||
"processing_policy": "parallel", // Could be "sequent" or "parallel".
|
||||
// For sequent policy request from one client connection will be processed one by one and the next one will not be read before
|
||||
// the previous one is processed. For parallel policy Clio will take all requests and process them in parallel and
|
||||
// send a reply for each request whenever it is ready.
|
||||
"parallel_requests_limit": 10, // Optional parameter, used only if "processing_strategy" is "parallel". It limits the number of requests for one client connection processed in parallel. Infinite if not specified.
|
||||
// Max number of responses to queue up before sent successfully. If a client's waiting queue is too long, the server will close the connection.
|
||||
"ws_max_sending_queue_size": 1500,
|
||||
"__ng_web_server": false // Use ng web server. This is a temporary setting which will be deleted after switching to ng web server
|
||||
"local_admin": false
|
||||
},
|
||||
// Time in seconds for graceful shutdown. Defaults to 10 seconds. Not fully implemented yet.
|
||||
"graceful_period": 10.0,
|
||||
|
||||
30
docs/metrics-and-static-analysis.md
Normal file
30
docs/metrics-and-static-analysis.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Metrics and static analysis
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports [Prometheus](https://prometheus.io/) metrics collection. It accepts Prometheus requests on the port configured in the `server` section of the config.
|
||||
|
||||
Prometheus metrics are enabled by default, and replies to `/metrics` are compressed. To disable compression, and have human readable metrics, add `"prometheus": { "enabled": true, "compress_reply": false }` to Clio's config.
|
||||
|
||||
To completely disable Prometheus metrics add `"prometheus": { "enabled": false }` to Clio's config.
|
||||
|
||||
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
|
||||
|
||||
You can find an example docker-compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
|
||||
|
||||
## Using `clang-tidy` for static analysis
|
||||
|
||||
The minimum [clang-tidy](https://clang.llvm.org/extra/clang-tidy/) version required is 17.0.
|
||||
|
||||
Clang-tidy can be run by Cmake when building the project. To achieve this, you just need to provide the option `-o lint=True` for the `conan install` command:
|
||||
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
|
||||
By default Cmake will try to find `clang-tidy` automatically in your system.
|
||||
To force Cmake to use your desired binary, set the `CLIO_CLANG_TIDY_BIN` environment variable to the path of the `clang-tidy` binary. For example:
|
||||
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@17/bin/clang-tidy
|
||||
```
|
||||
@@ -80,15 +80,3 @@ Clio will fallback to hardcoded defaults when these values are not specified in
|
||||
|
||||
> [!TIP]
|
||||
> See the [example-config.json](../docs/examples/config/example-config.json) for more details.
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports [Prometheus](https://prometheus.io/) metrics collection. It accepts Prometheus requests on the port configured in the `server` section of the config.
|
||||
|
||||
Prometheus metrics are enabled by default, and replies to `/metrics` are compressed. To disable compression, and have human readable metrics, add `"prometheus": { "enabled": true, "compress_reply": false }` to Clio's config.
|
||||
|
||||
To completely disable Prometheus metrics add `"prometheus": { "enabled": false }` to Clio's config.
|
||||
|
||||
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
|
||||
|
||||
You can find an example docker-compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
add_subdirectory(util)
|
||||
add_subdirectory(data)
|
||||
add_subdirectory(etl)
|
||||
add_subdirectory(etlng)
|
||||
add_subdirectory(feed)
|
||||
add_subdirectory(rpc)
|
||||
add_subdirectory(web)
|
||||
add_subdirectory(migration)
|
||||
add_subdirectory(app)
|
||||
add_subdirectory(main)
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
add_library(clio_app)
|
||||
target_sources(clio_app PRIVATE CliArgs.cpp ClioApplication.cpp Stopper.cpp WebHandlers.cpp)
|
||||
|
||||
target_link_libraries(clio_app PUBLIC clio_etl clio_etlng clio_feed clio_web clio_rpc clio_migration)
|
||||
@@ -1,86 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "app/CliArgs.hpp"
|
||||
|
||||
#include "migration/MigrationApplication.hpp"
|
||||
#include "util/build/Build.hpp"
|
||||
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <boost/program_options/parsers.hpp>
|
||||
#include <boost/program_options/positional_options.hpp>
|
||||
#include <boost/program_options/value_semantic.hpp>
|
||||
#include <boost/program_options/variables_map.hpp>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
|
||||
CliArgs::Action
|
||||
CliArgs::parse(int argc, char const* argv[])
|
||||
{
|
||||
namespace po = boost::program_options;
|
||||
// clang-format off
|
||||
po::options_description description("Options");
|
||||
description.add_options()
|
||||
("help,h", "print help message and exit")
|
||||
("version,v", "print version and exit")
|
||||
("conf,c", po::value<std::string>()->default_value(kDEFAULT_CONFIG_PATH), "configuration file")
|
||||
("ng-web-server,w", "Use ng-web-server")
|
||||
("migrate", po::value<std::string>(), "start migration helper")
|
||||
("verify", "Checks the validity of config values")
|
||||
;
|
||||
// clang-format on
|
||||
po::positional_options_description positional;
|
||||
positional.add("conf", 1);
|
||||
|
||||
po::variables_map parsed;
|
||||
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
|
||||
po::notify(parsed);
|
||||
|
||||
if (parsed.count("help") != 0u) {
|
||||
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n" << description;
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
}
|
||||
|
||||
if (parsed.count("version") != 0u) {
|
||||
std::cout << util::build::getClioFullVersionString() << '\n';
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
}
|
||||
|
||||
auto configPath = parsed["conf"].as<std::string>();
|
||||
|
||||
if (parsed.count("migrate") != 0u) {
|
||||
auto const opt = parsed["migrate"].as<std::string>();
|
||||
if (opt == "status")
|
||||
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::status()}};
|
||||
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::migration(opt)}};
|
||||
}
|
||||
|
||||
if (parsed.count("verify") != 0u)
|
||||
return Action{Action::VerifyConfig{.configPath = std::move(configPath)}};
|
||||
|
||||
return Action{Action::Run{.configPath = std::move(configPath), .useNgWebServer = parsed.count("ng-web-server") != 0}
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
@@ -1,108 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "migration/MigrationApplication.hpp"
|
||||
#include "util/OverloadSet.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief Parsed command line arguments representation.
|
||||
*/
|
||||
class CliArgs {
|
||||
public:
|
||||
/**
|
||||
* @brief Default configuration path.
|
||||
*/
|
||||
static constexpr char kDEFAULT_CONFIG_PATH[] = "/etc/opt/clio/config.json";
|
||||
|
||||
/**
|
||||
* @brief An action parsed from the command line.
|
||||
*/
|
||||
class Action {
|
||||
public:
|
||||
/** @brief Run action. */
|
||||
struct Run {
|
||||
std::string configPath; ///< Configuration file path.
|
||||
bool useNgWebServer; ///< Whether to use a ng web server
|
||||
};
|
||||
|
||||
/** @brief Exit action. */
|
||||
struct Exit {
|
||||
int exitCode; ///< Exit code.
|
||||
};
|
||||
|
||||
/** @brief Migration action. */
|
||||
struct Migrate {
|
||||
std::string configPath;
|
||||
MigrateSubCmd subCmd;
|
||||
};
|
||||
|
||||
/** @brief Verify Config action. */
|
||||
struct VerifyConfig {
|
||||
std::string configPath;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Construct an action from a Run.
|
||||
*
|
||||
* @param action Run action.
|
||||
*/
|
||||
template <typename ActionType>
|
||||
requires std::is_same_v<ActionType, Run> or std::is_same_v<ActionType, Exit> or
|
||||
std::is_same_v<ActionType, Migrate> or std::is_same_v<ActionType, VerifyConfig>
|
||||
explicit Action(ActionType&& action) : action_(std::forward<ActionType>(action))
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply a function to the action.
|
||||
*
|
||||
* @tparam Processors Action processors types. Must be callable with the action type and return int.
|
||||
* @param processors Action processors.
|
||||
* @return Exit code.
|
||||
*/
|
||||
template <typename... Processors>
|
||||
int
|
||||
apply(Processors&&... processors) const
|
||||
{
|
||||
return std::visit(util::OverloadSet{std::forward<Processors>(processors)...}, action_);
|
||||
}
|
||||
|
||||
private:
|
||||
std::variant<Run, Exit, Migrate, VerifyConfig> action_;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Parse command line arguments.
|
||||
*
|
||||
* @param argc Number of arguments.
|
||||
* @param argv Array of arguments.
|
||||
* @return Parsed command line arguments.
|
||||
*/
|
||||
static Action
|
||||
parse(int argc, char const* argv[]);
|
||||
};
|
||||
|
||||
} // namespace app
|
||||
@@ -1,200 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "app/ClioApplication.hpp"
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
#include "app/WebHandlers.hpp"
|
||||
#include "data/AmendmentCenter.hpp"
|
||||
#include "data/BackendFactory.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/NetworkValidatedLedgers.hpp"
|
||||
#include "feed/SubscriptionManager.hpp"
|
||||
#include "migration/MigrationInspectorFactory.hpp"
|
||||
#include "rpc/Counters.hpp"
|
||||
#include "rpc/RPCEngine.hpp"
|
||||
#include "rpc/WorkQueue.hpp"
|
||||
#include "rpc/common/impl/HandlerProvider.hpp"
|
||||
#include "util/build/Build.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
#include "web/AdminVerificationStrategy.hpp"
|
||||
#include "web/RPCServerHandler.hpp"
|
||||
#include "web/Server.hpp"
|
||||
#include "web/dosguard/DOSGuard.hpp"
|
||||
#include "web/dosguard/IntervalSweepHandler.hpp"
|
||||
#include "web/dosguard/WhitelistHandler.hpp"
|
||||
#include "web/ng/RPCServerHandler.hpp"
|
||||
#include "web/ng/Server.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace app {
|
||||
|
||||
namespace {
|
||||
|
||||
/**
|
||||
* @brief Start context threads
|
||||
*
|
||||
* @param ioc Context
|
||||
* @param numThreads Number of worker threads to start
|
||||
*/
|
||||
void
|
||||
start(boost::asio::io_context& ioc, std::uint32_t numThreads)
|
||||
{
|
||||
std::vector<std::thread> v;
|
||||
v.reserve(numThreads - 1);
|
||||
for (auto i = numThreads - 1; i > 0; --i)
|
||||
v.emplace_back([&ioc] { ioc.run(); });
|
||||
|
||||
ioc.run();
|
||||
for (auto& t : v)
|
||||
t.join();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
ClioApplication::ClioApplication(util::config::ClioConfigDefinition const& config)
|
||||
: config_(config), signalsHandler_{config_}
|
||||
{
|
||||
LOG(util::LogService::info()) << "Clio version: " << util::build::getClioFullVersionString();
|
||||
PrometheusService::init(config);
|
||||
signalsHandler_.subscribeToStop([this]() { appStopper_.stop(); });
|
||||
}
|
||||
|
||||
int
|
||||
ClioApplication::run(bool const useNgWebServer)
|
||||
{
|
||||
auto const threads = config_.get<uint16_t>("io_threads");
|
||||
LOG(util::LogService::info()) << "Number of io threads = " << threads;
|
||||
|
||||
// IO context to handle all incoming requests, as well as other things.
|
||||
// This is not the only io context in the application.
|
||||
boost::asio::io_context ioc{threads};
|
||||
|
||||
// Rate limiter, to prevent abuse
|
||||
auto whitelistHandler = web::dosguard::WhitelistHandler{config_};
|
||||
auto dosGuard = web::dosguard::DOSGuard{config_, whitelistHandler};
|
||||
auto sweepHandler = web::dosguard::IntervalSweepHandler{config_, ioc, dosGuard};
|
||||
|
||||
// Interface to the database
|
||||
auto backend = data::makeBackend(config_);
|
||||
|
||||
{
|
||||
auto const migrationInspector = migration::makeMigrationInspector(config_, backend);
|
||||
// Check if any migration is blocking Clio server starting.
|
||||
if (migrationInspector->isBlockingClio() and backend->hardFetchLedgerRangeNoThrow()) {
|
||||
LOG(util::LogService::error())
|
||||
<< "Existing Migration is blocking Clio, Please complete the database migration first.";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
// Manages clients subscribed to streams
|
||||
auto subscriptions = feed::SubscriptionManager::makeSubscriptionManager(config_, backend);
|
||||
|
||||
// Tracks which ledgers have been validated by the network
|
||||
auto ledgers = etl::NetworkValidatedLedgers::makeValidatedLedgers();
|
||||
|
||||
// Handles the connection to one or more rippled nodes.
|
||||
// ETL uses the balancer to extract data.
|
||||
// The server uses the balancer to forward RPCs to a rippled node.
|
||||
// The balancer itself publishes to streams (transactions_proposed and accounts_proposed)
|
||||
auto balancer = etl::LoadBalancer::makeLoadBalancer(config_, ioc, backend, subscriptions, ledgers);
|
||||
|
||||
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
|
||||
auto etl = etl::ETLService::makeETLService(config_, ioc, backend, subscriptions, balancer, ledgers);
|
||||
|
||||
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
||||
auto counters = rpc::Counters::makeCounters(workQueue);
|
||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||
auto const handlerProvider = std::make_shared<rpc::impl::ProductionHandlerProvider const>(
|
||||
config_, backend, subscriptions, balancer, etl, amendmentCenter, counters
|
||||
);
|
||||
|
||||
using RPCEngineType = rpc::RPCEngine<etl::LoadBalancer, rpc::Counters>;
|
||||
auto const rpcEngine =
|
||||
RPCEngineType::makeRPCEngine(config_, backend, balancer, dosGuard, workQueue, counters, handlerProvider);
|
||||
|
||||
if (useNgWebServer or config_.get<bool>("server.__ng_web_server")) {
|
||||
web::ng::RPCServerHandler<RPCEngineType, etl::ETLService> handler{config_, backend, rpcEngine, etl};
|
||||
|
||||
auto expectedAdminVerifier = web::makeAdminVerificationStrategy(config_);
|
||||
if (not expectedAdminVerifier.has_value()) {
|
||||
LOG(util::LogService::error()) << "Error creating admin verifier: " << expectedAdminVerifier.error();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
auto const adminVerifier = std::move(expectedAdminVerifier).value();
|
||||
|
||||
auto httpServer = web::ng::makeServer(config_, OnConnectCheck{dosGuard}, DisconnectHook{dosGuard}, ioc);
|
||||
|
||||
if (not httpServer.has_value()) {
|
||||
LOG(util::LogService::error()) << "Error creating web server: " << httpServer.error();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
httpServer->onGet("/metrics", MetricsHandler{adminVerifier});
|
||||
httpServer->onGet("/health", HealthCheckHandler{});
|
||||
auto requestHandler = RequestHandler{adminVerifier, handler, dosGuard};
|
||||
httpServer->onPost("/", requestHandler);
|
||||
httpServer->onWs(std::move(requestHandler));
|
||||
|
||||
auto const maybeError = httpServer->run();
|
||||
if (maybeError.has_value()) {
|
||||
LOG(util::LogService::error()) << "Error starting web server: " << *maybeError;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
appStopper_.setOnStop(
|
||||
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, ioc)
|
||||
);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
// Calls destructors on all resources, and destructs in order
|
||||
start(ioc, threads);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
// Init the web server
|
||||
auto handler =
|
||||
std::make_shared<web::RPCServerHandler<RPCEngineType, etl::ETLService>>(config_, backend, rpcEngine, etl);
|
||||
|
||||
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler);
|
||||
|
||||
// Blocks until stopped.
|
||||
// When stopped, shared_ptrs fall out of scope
|
||||
// Calls destructors on all resources, and destructs in order
|
||||
start(ioc, threads);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
@@ -1,55 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
#include "util/SignalsHandler.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief The main application class
|
||||
*/
|
||||
class ClioApplication {
|
||||
util::config::ClioConfigDefinition const& config_;
|
||||
util::SignalsHandler signalsHandler_;
|
||||
Stopper appStopper_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new ClioApplication object
|
||||
*
|
||||
* @param config The configuration of the application
|
||||
*/
|
||||
ClioApplication(util::config::ClioConfigDefinition const& config);
|
||||
|
||||
/**
|
||||
* @brief Run the application
|
||||
*
|
||||
* @param useNgWebServer Whether to use the new web server
|
||||
*
|
||||
* @return exit code
|
||||
*/
|
||||
int
|
||||
run(bool useNgWebServer);
|
||||
};
|
||||
|
||||
} // namespace app
|
||||
@@ -1,52 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "app/Stopper.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
|
||||
Stopper::~Stopper()
|
||||
{
|
||||
if (worker_.joinable())
|
||||
worker_.join();
|
||||
}
|
||||
|
||||
void
|
||||
Stopper::setOnStop(std::function<void(boost::asio::yield_context)> cb)
|
||||
{
|
||||
boost::asio::spawn(ctx_, std::move(cb));
|
||||
}
|
||||
|
||||
void
|
||||
Stopper::stop()
|
||||
{
|
||||
// Do nothing if worker_ is already running
|
||||
if (worker_.joinable())
|
||||
return;
|
||||
|
||||
worker_ = std::thread{[this]() { ctx_.run(); }};
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
@@ -1,118 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/ETLService.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/CoroutineGroup.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/ng/Server.hpp"
|
||||
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <thread>
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief Application stopper class. On stop it will create a new thread to run all the shutdown tasks.
|
||||
*/
|
||||
class Stopper {
|
||||
boost::asio::io_context ctx_;
|
||||
std::thread worker_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Destroy the Stopper object
|
||||
*/
|
||||
~Stopper();
|
||||
|
||||
/**
|
||||
* @brief Set the callback to be called when the application is stopped.
|
||||
*
|
||||
* @param cb The callback to be called on application stop.
|
||||
*/
|
||||
void
|
||||
setOnStop(std::function<void(boost::asio::yield_context)> cb);
|
||||
|
||||
/**
|
||||
* @brief Stop the application and run the shutdown tasks.
|
||||
*/
|
||||
void
|
||||
stop();
|
||||
|
||||
/**
|
||||
* @brief Create a callback to be called on application stop.
|
||||
*
|
||||
* @param server The server to stop.
|
||||
* @param balancer The load balancer to stop.
|
||||
* @param etl The ETL service to stop.
|
||||
* @param subscriptions The subscription manager to stop.
|
||||
* @param backend The backend to stop.
|
||||
* @param ioc The io_context to stop.
|
||||
* @return The callback to be called on application stop.
|
||||
*/
|
||||
template <
|
||||
web::ng::SomeServer ServerType,
|
||||
etl::SomeLoadBalancer LoadBalancerType,
|
||||
etl::SomeETLService ETLServiceType>
|
||||
static std::function<void(boost::asio::yield_context)>
|
||||
makeOnStopCallback(
|
||||
ServerType& server,
|
||||
LoadBalancerType& balancer,
|
||||
ETLServiceType& etl,
|
||||
feed::SubscriptionManagerInterface& subscriptions,
|
||||
data::BackendInterface& backend,
|
||||
boost::asio::io_context& ioc
|
||||
)
|
||||
{
|
||||
return [&](boost::asio::yield_context yield) {
|
||||
util::CoroutineGroup coroutineGroup{yield};
|
||||
coroutineGroup.spawn(yield, [&server](auto innerYield) {
|
||||
server.stop(innerYield);
|
||||
LOG(util::LogService::info()) << "Server stopped";
|
||||
});
|
||||
coroutineGroup.spawn(yield, [&balancer](auto innerYield) {
|
||||
balancer.stop(innerYield);
|
||||
LOG(util::LogService::info()) << "LoadBalancer stopped";
|
||||
});
|
||||
coroutineGroup.asyncWait(yield);
|
||||
|
||||
etl.stop();
|
||||
LOG(util::LogService::info()) << "ETL stopped";
|
||||
|
||||
subscriptions.stop();
|
||||
LOG(util::LogService::info()) << "SubscriptionManager stopped";
|
||||
|
||||
backend.waitForWritesToFinish();
|
||||
LOG(util::LogService::info()) << "Backend writes finished";
|
||||
|
||||
ioc.stop();
|
||||
LOG(util::LogService::info()) << "io_context stopped";
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace app
|
||||
@@ -1,58 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
#include "util/newconfig/ConfigFileJson.hpp"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <string_view>
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief Verifies user's config values are correct
|
||||
*
|
||||
* @param configPath The path to config
|
||||
* @return true if config values are all correct, false otherwise
|
||||
*/
|
||||
inline bool
|
||||
parseConfig(std::string_view configPath)
|
||||
{
|
||||
using namespace util::config;
|
||||
|
||||
auto const json = ConfigFileJson::makeConfigFileJson(configPath);
|
||||
if (!json.has_value()) {
|
||||
std::cerr << "Error parsing json from config: " << configPath << "\n" << json.error().error << std::endl;
|
||||
return false;
|
||||
}
|
||||
auto const errors = gClioConfig.parse(json.value());
|
||||
if (errors.has_value()) {
|
||||
for (auto const& err : errors.value()) {
|
||||
std::cerr << "Issues found in provided config '" << configPath << "':\n";
|
||||
std::cerr << err.error << std::endl;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
@@ -1,111 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "app/WebHandlers.hpp"
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/prometheus/Http.hpp"
|
||||
#include "web/AdminVerificationStrategy.hpp"
|
||||
#include "web/SubscriptionContextInterface.hpp"
|
||||
#include "web/dosguard/DOSGuardInterface.hpp"
|
||||
#include "web/ng/Connection.hpp"
|
||||
#include "web/ng/Request.hpp"
|
||||
#include "web/ng/Response.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/beast/http/status.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
|
||||
OnConnectCheck::OnConnectCheck(web::dosguard::DOSGuardInterface& dosguard) : dosguard_{dosguard}
|
||||
{
|
||||
}
|
||||
|
||||
std::expected<void, web::ng::Response>
|
||||
OnConnectCheck::operator()(web::ng::Connection const& connection)
|
||||
{
|
||||
dosguard_.get().increment(connection.ip());
|
||||
if (not dosguard_.get().isOk(connection.ip())) {
|
||||
return std::unexpected{
|
||||
web::ng::Response{boost::beast::http::status::too_many_requests, "Too many requests", connection}
|
||||
};
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
DisconnectHook::DisconnectHook(web::dosguard::DOSGuardInterface& dosguard) : dosguard_{dosguard}
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
DisconnectHook::operator()(web::ng::Connection const& connection)
|
||||
{
|
||||
dosguard_.get().decrement(connection.ip());
|
||||
}
|
||||
|
||||
MetricsHandler::MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier)
|
||||
: adminVerifier_{std::move(adminVerifier)}
|
||||
{
|
||||
}
|
||||
|
||||
web::ng::Response
|
||||
MetricsHandler::operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata& connectionMetadata,
|
||||
web::SubscriptionContextPtr,
|
||||
boost::asio::yield_context
|
||||
)
|
||||
{
|
||||
auto const maybeHttpRequest = request.asHttpRequest();
|
||||
ASSERT(maybeHttpRequest.has_value(), "Got not a http request in Get");
|
||||
auto const& httpRequest = maybeHttpRequest->get();
|
||||
|
||||
// FIXME(#1702): Using veb server thread to handle prometheus request. Better to post on work queue.
|
||||
auto maybeResponse = util::prometheus::handlePrometheusRequest(
|
||||
httpRequest, adminVerifier_->isAdmin(httpRequest, connectionMetadata.ip())
|
||||
);
|
||||
ASSERT(maybeResponse.has_value(), "Got unexpected request for Prometheus");
|
||||
return web::ng::Response{std::move(maybeResponse).value(), request};
|
||||
}
|
||||
|
||||
web::ng::Response
|
||||
HealthCheckHandler::operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata&,
|
||||
web::SubscriptionContextPtr,
|
||||
boost::asio::yield_context
|
||||
)
|
||||
{
|
||||
static auto constexpr kHEALTH_CHECK_HTML = R"html(
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Test page for Clio</title></head>
|
||||
<body><h1>Clio Test</h1><p>This page shows Clio http(s) connectivity is working.</p></body>
|
||||
</html>
|
||||
)html";
|
||||
|
||||
return web::ng::Response{boost::beast::http::status::ok, kHEALTH_CHECK_HTML, request};
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
@@ -1,234 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/AdminVerificationStrategy.hpp"
|
||||
#include "web/SubscriptionContextInterface.hpp"
|
||||
#include "web/dosguard/DOSGuardInterface.hpp"
|
||||
#include "web/ng/Connection.hpp"
|
||||
#include "web/ng/Request.hpp"
|
||||
#include "web/ng/Response.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/beast/http/status.hpp>
|
||||
#include <boost/json/array.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
|
||||
/**
|
||||
* @brief A function object that checks if the connection is allowed to proceed.
|
||||
*/
|
||||
class OnConnectCheck {
|
||||
std::reference_wrapper<web::dosguard::DOSGuardInterface> dosguard_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new OnConnectCheck object
|
||||
*
|
||||
* @param dosguard The DOSGuardInterface to use for checking the connection.
|
||||
*/
|
||||
OnConnectCheck(web::dosguard::DOSGuardInterface& dosguard);
|
||||
|
||||
/**
|
||||
* @brief Check if the connection is allowed to proceed.
|
||||
*
|
||||
* @param connection The connection to check.
|
||||
* @return A response if the connection is not allowed to proceed or void otherwise.
|
||||
*/
|
||||
std::expected<void, web::ng::Response>
|
||||
operator()(web::ng::Connection const& connection);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object to be called when a connection is disconnected.
|
||||
*/
|
||||
class DisconnectHook {
|
||||
std::reference_wrapper<web::dosguard::DOSGuardInterface> dosguard_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new DisconnectHook object
|
||||
*
|
||||
* @param dosguard The DOSGuardInterface to use for disconnecting the connection.
|
||||
*/
|
||||
DisconnectHook(web::dosguard::DOSGuardInterface& dosguard);
|
||||
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param connection The connection which has disconnected.
|
||||
*/
|
||||
void
|
||||
operator()(web::ng::Connection const& connection);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object that handles the metrics endpoint.
|
||||
*/
|
||||
class MetricsHandler {
|
||||
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new MetricsHandler object
|
||||
*
|
||||
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access.
|
||||
*/
|
||||
MetricsHandler(std::shared_ptr<web::AdminVerificationStrategy> adminVerifier);
|
||||
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param request The request to handle.
|
||||
* @param connectionMetadata The connection metadata.
|
||||
* @return The response to the request.
|
||||
*/
|
||||
web::ng::Response
|
||||
operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata& connectionMetadata,
|
||||
web::SubscriptionContextPtr,
|
||||
boost::asio::yield_context
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object that handles the health check endpoint.
|
||||
*/
|
||||
class HealthCheckHandler {
|
||||
public:
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param request The request to handle.
|
||||
* @return The response to the request
|
||||
*/
|
||||
web::ng::Response
|
||||
operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata&,
|
||||
web::SubscriptionContextPtr,
|
||||
boost::asio::yield_context
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object that handles the websocket endpoint.
|
||||
*
|
||||
* @tparam RpcHandlerType The type of the RPC handler.
|
||||
*/
|
||||
template <typename RpcHandlerType>
|
||||
class RequestHandler {
|
||||
util::Logger webServerLog_{"WebServer"};
|
||||
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier_;
|
||||
std::reference_wrapper<RpcHandlerType> rpcHandler_;
|
||||
std::reference_wrapper<web::dosguard::DOSGuardInterface> dosguard_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new RequestHandler object
|
||||
*
|
||||
* @param adminVerifier The AdminVerificationStrategy to use for verifying the connection for admin access.
|
||||
* @param rpcHandler The RPC handler to use for handling the request.
|
||||
* @param dosguard The DOSGuardInterface to use for checking the connection.
|
||||
*/
|
||||
RequestHandler(
|
||||
std::shared_ptr<web::AdminVerificationStrategy> adminVerifier,
|
||||
RpcHandlerType& rpcHandler,
|
||||
web::dosguard::DOSGuardInterface& dosguard
|
||||
)
|
||||
: adminVerifier_(std::move(adminVerifier)), rpcHandler_(rpcHandler), dosguard_(dosguard)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param request The request to handle.
|
||||
* @param connectionMetadata The connection metadata.
|
||||
* @param subscriptionContext The subscription context.
|
||||
* @param yield The yield context.
|
||||
* @return The response to the request.
|
||||
*/
|
||||
web::ng::Response
|
||||
operator()(
|
||||
web::ng::Request const& request,
|
||||
web::ng::ConnectionMetadata& connectionMetadata,
|
||||
web::SubscriptionContextPtr subscriptionContext,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
if (not dosguard_.get().request(connectionMetadata.ip())) {
|
||||
auto error = rpc::makeError(rpc::RippledError::rpcSLOW_DOWN);
|
||||
|
||||
if (not request.isHttp()) {
|
||||
try {
|
||||
auto requestJson = boost::json::parse(request.message());
|
||||
if (requestJson.is_object() && requestJson.as_object().contains("id"))
|
||||
error["id"] = requestJson.as_object().at("id");
|
||||
error["request"] = request.message();
|
||||
} catch (std::exception const&) {
|
||||
error["request"] = request.message();
|
||||
}
|
||||
}
|
||||
return web::ng::Response{boost::beast::http::status::service_unavailable, error, request};
|
||||
}
|
||||
LOG(webServerLog_.info()) << connectionMetadata.tag()
|
||||
<< "Received request from ip = " << connectionMetadata.ip()
|
||||
<< " - posting to WorkQueue";
|
||||
|
||||
connectionMetadata.setIsAdmin([this, &request, &connectionMetadata]() {
|
||||
return adminVerifier_->isAdmin(request.httpHeaders(), connectionMetadata.ip());
|
||||
});
|
||||
|
||||
try {
|
||||
auto response = rpcHandler_(request, connectionMetadata, std::move(subscriptionContext), yield);
|
||||
|
||||
if (not dosguard_.get().add(connectionMetadata.ip(), response.message().size())) {
|
||||
auto jsonResponse = boost::json::parse(response.message()).as_object();
|
||||
jsonResponse["warning"] = "load";
|
||||
if (jsonResponse.contains("warnings") && jsonResponse["warnings"].is_array()) {
|
||||
jsonResponse["warnings"].as_array().push_back(rpc::makeWarning(rpc::WarnRpcRateLimit));
|
||||
} else {
|
||||
jsonResponse["warnings"] = boost::json::array{rpc::makeWarning(rpc::WarnRpcRateLimit)};
|
||||
}
|
||||
response.setMessage(jsonResponse);
|
||||
}
|
||||
|
||||
return response;
|
||||
} catch (std::exception const&) {
|
||||
return web::ng::Response{
|
||||
boost::beast::http::status::internal_server_error,
|
||||
rpc::makeError(rpc::RippledError::rpcINTERNAL),
|
||||
request
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace app
|
||||
@@ -1,203 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/AmendmentCenter.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <ranges>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
|
||||
std::unordered_set<std::string>&
|
||||
supportedAmendments()
|
||||
{
|
||||
static std::unordered_set<std::string> kAMENDMENTS = {};
|
||||
return kAMENDMENTS;
|
||||
}
|
||||
|
||||
bool
|
||||
lookupAmendment(auto const& allAmendments, std::vector<ripple::uint256> const& ledgerAmendments, std::string_view name)
|
||||
{
|
||||
namespace rg = std::ranges;
|
||||
if (auto const am = rg::find(allAmendments, name, &data::Amendment::name); am != rg::end(allAmendments))
|
||||
return rg::find(ledgerAmendments, am->feature) != rg::end(ledgerAmendments);
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace data {
|
||||
namespace impl {
|
||||
|
||||
WritingAmendmentKey::WritingAmendmentKey(std::string amendmentName) : AmendmentKey{std::move(amendmentName)}
|
||||
{
|
||||
ASSERT(not supportedAmendments().contains(name), "Attempt to register the same amendment twice");
|
||||
supportedAmendments().insert(name);
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
|
||||
AmendmentKey::operator std::string const&() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
AmendmentKey::operator std::string_view() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
AmendmentKey::operator ripple::uint256() const
|
||||
{
|
||||
return Amendment::getAmendmentId(name);
|
||||
}
|
||||
|
||||
AmendmentCenter::AmendmentCenter(std::shared_ptr<data::BackendInterface> const& backend) : backend_{backend}
|
||||
{
|
||||
namespace rg = std::ranges;
|
||||
namespace vs = std::views;
|
||||
|
||||
rg::copy(
|
||||
ripple::allAmendments() | vs::transform([&](auto const& p) {
|
||||
auto const& [name, support] = p;
|
||||
return Amendment{
|
||||
.name = name,
|
||||
.feature = Amendment::getAmendmentId(name),
|
||||
.isSupportedByXRPL = support != ripple::AmendmentSupport::Unsupported,
|
||||
.isSupportedByClio = rg::find(supportedAmendments(), name) != rg::end(supportedAmendments()),
|
||||
.isRetired = support == ripple::AmendmentSupport::Retired
|
||||
};
|
||||
}),
|
||||
std::back_inserter(all_)
|
||||
);
|
||||
|
||||
for (auto const& am : all_ | vs::filter([](auto const& am) { return am.isSupportedByClio; }))
|
||||
supported_.insert_or_assign(am.name, am);
|
||||
}
|
||||
|
||||
bool
|
||||
AmendmentCenter::isSupported(AmendmentKey const& key) const
|
||||
{
|
||||
return supported_.contains(key);
|
||||
}
|
||||
|
||||
std::map<std::string, Amendment> const&
|
||||
AmendmentCenter::getSupported() const
|
||||
{
|
||||
return supported_;
|
||||
}
|
||||
|
||||
std::vector<Amendment> const&
|
||||
AmendmentCenter::getAll() const
|
||||
{
|
||||
return all_;
|
||||
}
|
||||
|
||||
bool
|
||||
AmendmentCenter::isEnabled(AmendmentKey const& key, uint32_t seq) const
|
||||
{
|
||||
return data::synchronous([this, &key, seq](auto yield) { return isEnabled(yield, key, seq); });
|
||||
}
|
||||
|
||||
bool
|
||||
AmendmentCenter::isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const
|
||||
{
|
||||
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments)
|
||||
return lookupAmendment(all_, *listAmendments, key);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<bool>
|
||||
AmendmentCenter::isEnabled(boost::asio::yield_context yield, std::vector<AmendmentKey> const& keys, uint32_t seq) const
|
||||
{
|
||||
namespace rg = std::ranges;
|
||||
|
||||
if (auto const listAmendments = fetchAmendmentsList(yield, seq); listAmendments) {
|
||||
std::vector<bool> out;
|
||||
rg::transform(keys, std::back_inserter(out), [this, &listAmendments](auto const& key) {
|
||||
return lookupAmendment(all_, *listAmendments, key);
|
||||
});
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
return std::vector<bool>(keys.size(), false);
|
||||
}
|
||||
|
||||
Amendment const&
|
||||
AmendmentCenter::getAmendment(AmendmentKey const& key) const
|
||||
{
|
||||
ASSERT(supported_.contains(key), "The amendment '{}' must be present in supported amendments list", key.name);
|
||||
return supported_.at(key);
|
||||
}
|
||||
|
||||
Amendment const&
|
||||
AmendmentCenter::operator[](AmendmentKey const& key) const
|
||||
{
|
||||
return getAmendment(key);
|
||||
}
|
||||
|
||||
ripple::uint256
|
||||
Amendment::getAmendmentId(std::string_view name)
|
||||
{
|
||||
return ripple::sha512Half(ripple::Slice(name.data(), name.size()));
|
||||
}
|
||||
|
||||
std::optional<std::vector<ripple::uint256>>
|
||||
AmendmentCenter::fetchAmendmentsList(boost::asio::yield_context yield, uint32_t seq) const
|
||||
{
|
||||
// the amendments should always be present on the ledger
|
||||
auto const amendments = backend_->fetchLedgerObject(ripple::keylet::amendments().key, seq, yield);
|
||||
if (not amendments.has_value())
|
||||
throw std::runtime_error("Amendments ledger object must be present in the database");
|
||||
|
||||
ripple::SLE const amendmentsSLE{
|
||||
ripple::SerialIter{amendments->data(), amendments->size()}, ripple::keylet::amendments().key
|
||||
};
|
||||
|
||||
return amendmentsSLE[~ripple::sfAmendments];
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
@@ -1,263 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/AmendmentCenterInterface.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/preprocessor.hpp>
|
||||
#include <boost/preprocessor/seq/for_each.hpp>
|
||||
#include <boost/preprocessor/stringize.hpp>
|
||||
#include <boost/preprocessor/variadic/to_seq.hpp>
|
||||
#include <xrpl/basics/Slice.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#define REGISTER(name) \
|
||||
inline static impl::WritingAmendmentKey const name = \
|
||||
impl::WritingAmendmentKey(std::string(BOOST_PP_STRINGIZE(name)))
|
||||
|
||||
namespace data {
|
||||
namespace impl {
|
||||
|
||||
struct WritingAmendmentKey : AmendmentKey {
|
||||
explicit WritingAmendmentKey(std::string amendmentName);
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
/**
|
||||
* @brief List of supported amendments
|
||||
*/
|
||||
struct Amendments {
|
||||
// NOTE: if Clio wants to report it supports an Amendment it should be listed here.
|
||||
// Whether an amendment is obsolete and/or supported by libxrpl is extracted directly from libxrpl.
|
||||
// If an amendment is in the list below it just means Clio did whatever changes needed to support it.
|
||||
// Most of the time it's going to be no changes at all.
|
||||
|
||||
/** @cond */
|
||||
// NOLINTBEGIN(readability-identifier-naming)
|
||||
REGISTER(OwnerPaysFee);
|
||||
REGISTER(Flow);
|
||||
REGISTER(FlowCross);
|
||||
REGISTER(fix1513);
|
||||
REGISTER(DepositAuth);
|
||||
REGISTER(Checks);
|
||||
REGISTER(fix1571);
|
||||
REGISTER(fix1543);
|
||||
REGISTER(fix1623);
|
||||
REGISTER(DepositPreauth);
|
||||
REGISTER(fix1515);
|
||||
REGISTER(fix1578);
|
||||
REGISTER(MultiSignReserve);
|
||||
REGISTER(fixTakerDryOfferRemoval);
|
||||
REGISTER(fixMasterKeyAsRegularKey);
|
||||
REGISTER(fixCheckThreading);
|
||||
REGISTER(fixPayChanRecipientOwnerDir);
|
||||
REGISTER(DeletableAccounts);
|
||||
REGISTER(fixQualityUpperBound);
|
||||
REGISTER(RequireFullyCanonicalSig);
|
||||
REGISTER(fix1781);
|
||||
REGISTER(HardenedValidations);
|
||||
REGISTER(fixAmendmentMajorityCalc);
|
||||
REGISTER(NegativeUNL);
|
||||
REGISTER(TicketBatch);
|
||||
REGISTER(FlowSortStrands);
|
||||
REGISTER(fixSTAmountCanonicalize);
|
||||
REGISTER(fixRmSmallIncreasedQOffers);
|
||||
REGISTER(CheckCashMakesTrustLine);
|
||||
REGISTER(ExpandedSignerList);
|
||||
REGISTER(NonFungibleTokensV1_1);
|
||||
REGISTER(fixTrustLinesToSelf);
|
||||
REGISTER(fixRemoveNFTokenAutoTrustLine);
|
||||
REGISTER(ImmediateOfferKilled);
|
||||
REGISTER(DisallowIncoming);
|
||||
REGISTER(XRPFees);
|
||||
REGISTER(fixUniversalNumber);
|
||||
REGISTER(fixNonFungibleTokensV1_2);
|
||||
REGISTER(fixNFTokenRemint);
|
||||
REGISTER(fixReducedOffersV1);
|
||||
REGISTER(Clawback);
|
||||
REGISTER(AMM);
|
||||
REGISTER(XChainBridge);
|
||||
REGISTER(fixDisallowIncomingV1);
|
||||
REGISTER(DID);
|
||||
REGISTER(fixFillOrKill);
|
||||
REGISTER(fixNFTokenReserve);
|
||||
REGISTER(fixInnerObjTemplate);
|
||||
REGISTER(fixAMMOverflowOffer);
|
||||
REGISTER(PriceOracle);
|
||||
REGISTER(fixEmptyDID);
|
||||
REGISTER(fixXChainRewardRounding);
|
||||
REGISTER(fixPreviousTxnID);
|
||||
REGISTER(fixAMMv1_1);
|
||||
REGISTER(NFTokenMintOffer);
|
||||
REGISTER(fixReducedOffersV2);
|
||||
REGISTER(fixEnforceNFTokenTrustline);
|
||||
REGISTER(fixInnerObjTemplate2);
|
||||
REGISTER(fixNFTokenPageLinks);
|
||||
REGISTER(InvariantsV1_1);
|
||||
REGISTER(MPTokensV1);
|
||||
REGISTER(fixAMMv1_2);
|
||||
REGISTER(AMMClawback);
|
||||
REGISTER(Credentials);
|
||||
REGISTER(DynamicNFT);
|
||||
REGISTER(PermissionedDomains);
|
||||
|
||||
// Obsolete but supported by libxrpl
|
||||
REGISTER(CryptoConditionsSuite);
|
||||
REGISTER(NonFungibleTokensV1);
|
||||
REGISTER(fixNFTokenDirV1);
|
||||
REGISTER(fixNFTokenNegOffer);
|
||||
|
||||
// Retired amendments
|
||||
REGISTER(MultiSign);
|
||||
REGISTER(TrustSetAuth);
|
||||
REGISTER(FeeEscalation);
|
||||
REGISTER(PayChan);
|
||||
REGISTER(fix1368);
|
||||
REGISTER(CryptoConditions);
|
||||
REGISTER(Escrow);
|
||||
REGISTER(TickSize);
|
||||
REGISTER(fix1373);
|
||||
REGISTER(EnforceInvariants);
|
||||
REGISTER(SortedDirectories);
|
||||
REGISTER(fix1201);
|
||||
REGISTER(fix1512);
|
||||
REGISTER(fix1523);
|
||||
REGISTER(fix1528);
|
||||
// NOLINTEND(readability-identifier-naming)
|
||||
/** @endcond */
|
||||
};
|
||||
|
||||
#undef REGISTER
|
||||
|
||||
/**
|
||||
* @brief Knowledge center for amendments within XRPL
|
||||
*/
|
||||
class AmendmentCenter : public AmendmentCenterInterface {
|
||||
std::shared_ptr<data::BackendInterface> backend_;
|
||||
|
||||
std::map<std::string, Amendment> supported_;
|
||||
std::vector<Amendment> all_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new AmendmentCenter instance
|
||||
*
|
||||
* @param backend The backend
|
||||
*/
|
||||
explicit AmendmentCenter(std::shared_ptr<data::BackendInterface> const& backend);
|
||||
|
||||
/**
|
||||
* @brief Check whether an amendment is supported by Clio
|
||||
*
|
||||
* @param key The key of the amendment to check
|
||||
* @return true if supported; false otherwise
|
||||
*/
|
||||
[[nodiscard]] bool
|
||||
isSupported(AmendmentKey const& key) const final;
|
||||
|
||||
/**
|
||||
* @brief Get all supported amendments as a map
|
||||
*
|
||||
* @return The amendments supported by Clio
|
||||
*/
|
||||
[[nodiscard]] std::map<std::string, Amendment> const&
|
||||
getSupported() const final;
|
||||
|
||||
/**
|
||||
* @brief Get all known amendments
|
||||
*
|
||||
* @return All known amendments as a vector
|
||||
*/
|
||||
[[nodiscard]] std::vector<Amendment> const&
|
||||
getAll() const final;
|
||||
|
||||
/**
|
||||
* @brief Check whether an amendment was/is enabled for a given sequence
|
||||
*
|
||||
* @param key The key of the amendment to check
|
||||
* @param seq The sequence to check for
|
||||
* @return true if enabled; false otherwise
|
||||
*/
|
||||
[[nodiscard]] bool
|
||||
isEnabled(AmendmentKey const& key, uint32_t seq) const final;
|
||||
|
||||
/**
|
||||
* @brief Check whether an amendment was/is enabled for a given sequence
|
||||
*
|
||||
* @param yield The coroutine context to use
|
||||
* @param key The key of the amendment to check
|
||||
* @param seq The sequence to check for
|
||||
* @return true if enabled; false otherwise
|
||||
*/
|
||||
[[nodiscard]] bool
|
||||
isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const final;
|
||||
|
||||
/**
|
||||
* @brief Check whether an amendment was/is enabled for a given sequence
|
||||
*
|
||||
* @param yield The coroutine context to use
|
||||
* @param keys The keys of the amendments to check
|
||||
* @param seq The sequence to check for
|
||||
* @return A vector of bools representing enabled state for each of the given keys
|
||||
*/
|
||||
[[nodiscard]] std::vector<bool>
|
||||
isEnabled(boost::asio::yield_context yield, std::vector<AmendmentKey> const& keys, uint32_t seq) const final;
|
||||
|
||||
/**
|
||||
* @brief Get an amendment
|
||||
*
|
||||
* @param key The key of the amendment to get
|
||||
* @return The amendment as a const ref; asserts if the amendment is unknown
|
||||
*/
|
||||
[[nodiscard]] Amendment const&
|
||||
getAmendment(AmendmentKey const& key) const final;
|
||||
|
||||
/**
|
||||
* @brief Get an amendment by its key
|
||||
|
||||
* @param key The amendment key from @see Amendments
|
||||
* @return The amendment as a const ref; asserts if the amendment is unknown
|
||||
*/
|
||||
[[nodiscard]] Amendment const&
|
||||
operator[](AmendmentKey const& key) const final;
|
||||
|
||||
private:
|
||||
[[nodiscard]] std::optional<std::vector<ripple::uint256>>
|
||||
fetchAmendmentsList(boost::asio::yield_context yield, uint32_t seq) const;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -1,116 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/Types.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief The interface of an amendment center
|
||||
*/
|
||||
class AmendmentCenterInterface {
|
||||
public:
|
||||
virtual ~AmendmentCenterInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Check whether an amendment is supported by Clio
|
||||
*
|
||||
* @param key The key of the amendment to check
|
||||
* @return true if supported; false otherwise
|
||||
*/
|
||||
[[nodiscard]] virtual bool
|
||||
isSupported(AmendmentKey const& key) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Get all supported amendments as a map
|
||||
*
|
||||
* @return The amendments supported by Clio
|
||||
*/
|
||||
[[nodiscard]] virtual std::map<std::string, Amendment> const&
|
||||
getSupported() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Get all known amendments
|
||||
*
|
||||
* @return All known amendments as a vector
|
||||
*/
|
||||
[[nodiscard]] virtual std::vector<Amendment> const&
|
||||
getAll() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Check whether an amendment was/is enabled for a given sequence
|
||||
*
|
||||
* @param key The key of the amendment to check
|
||||
* @param seq The sequence to check for
|
||||
* @return true if enabled; false otherwise
|
||||
*/
|
||||
[[nodiscard]] virtual bool
|
||||
isEnabled(AmendmentKey const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Check whether an amendment was/is enabled for a given sequence
|
||||
*
|
||||
* @param yield The coroutine context to use
|
||||
* @param key The key of the amendment to check
|
||||
* @param seq The sequence to check for
|
||||
* @return true if enabled; false otherwise
|
||||
*/
|
||||
[[nodiscard]] virtual bool
|
||||
isEnabled(boost::asio::yield_context yield, AmendmentKey const& key, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Check whether an amendment was/is enabled for a given sequence
|
||||
*
|
||||
* @param yield The coroutine context to use
|
||||
* @param keys The keys of the amendments to check
|
||||
* @param seq The sequence to check for
|
||||
* @return A vector of bools representing enabled state for each of the given keys
|
||||
*/
|
||||
[[nodiscard]] virtual std::vector<bool>
|
||||
isEnabled(boost::asio::yield_context yield, std::vector<AmendmentKey> const& keys, uint32_t seq) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Get an amendment
|
||||
*
|
||||
* @param key The key of the amendment to get
|
||||
* @return The amendment as a const ref; asserts if the amendment is unknown
|
||||
*/
|
||||
[[nodiscard]] virtual Amendment const&
|
||||
getAmendment(AmendmentKey const& key) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Get an amendment by its key
|
||||
*
|
||||
* @param key The amendment key from @see Amendments
|
||||
* @return The amendment as a const ref; asserts if the amendment is unknown
|
||||
*/
|
||||
[[nodiscard]] virtual Amendment const&
|
||||
operator[](AmendmentKey const& key) const = 0;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
@@ -36,7 +36,7 @@ namespace data {
|
||||
|
||||
namespace {
|
||||
|
||||
std::vector<std::int64_t> const kHISTOGRAM_BUCKETS{1, 2, 5, 10, 20, 50, 100, 200, 500, 700, 1000};
|
||||
std::vector<std::int64_t> const histogramBuckets{1, 2, 5, 10, 20, 50, 100, 200, 500, 700, 1000};
|
||||
|
||||
std::int64_t
|
||||
durationInMillisecondsSince(std::chrono::steady_clock::time_point const startTime)
|
||||
@@ -69,13 +69,13 @@ BackendCounters::BackendCounters()
|
||||
, readDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "read"}}),
|
||||
kHISTOGRAM_BUCKETS,
|
||||
histogramBuckets,
|
||||
"The duration of backend read operations including retries"
|
||||
))
|
||||
, writeDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "write"}}),
|
||||
kHISTOGRAM_BUCKETS,
|
||||
histogramBuckets,
|
||||
"The duration of backend write operations including retries"
|
||||
))
|
||||
{
|
||||
|
||||
@@ -22,8 +22,8 @@
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/CassandraBackend.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
@@ -41,18 +41,19 @@ namespace data {
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
makeBackend(util::config::ClioConfigDefinition const& config)
|
||||
make_Backend(util::Config const& config)
|
||||
{
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
static util::Logger const log{"Backend"};
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
auto const readOnly = config.get<bool>("read_only");
|
||||
auto const readOnly = config.valueOr("read_only", false);
|
||||
|
||||
auto const type = config.get<std::string>("database.type");
|
||||
auto const type = config.value<std::string>("database.type");
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
if (boost::iequals(type, "cassandra")) {
|
||||
auto const cfg = config.getObject("database." + type);
|
||||
// TODO: retire `cassandra-new` by next release after 2.0
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new")) {
|
||||
auto cfg = config.section("database." + type);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
}
|
||||
|
||||
|
||||
@@ -24,13 +24,13 @@
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/protocol/Fees.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/Serializer.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
@@ -93,6 +93,7 @@ BackendInterface::fetchLedgerObject(
|
||||
return obj;
|
||||
}
|
||||
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj) {
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
@@ -102,19 +103,6 @@ BackendInterface::fetchLedgerObject(
|
||||
return dbObj;
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
BackendInterface::fetchLedgerObjectSeq(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto seq = doFetchLedgerObjectSeq(key, sequence, yield);
|
||||
if (!seq)
|
||||
LOG(gLog.trace()) << "Missed in db";
|
||||
return seq;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
BackendInterface::fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
@@ -176,9 +164,9 @@ BackendInterface::fetchSuccessorObject(
|
||||
if (succ) {
|
||||
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
|
||||
if (!obj)
|
||||
return {{.key = *succ, .blob = {}}};
|
||||
return {{*succ, {}}};
|
||||
|
||||
return {{.key = *succ, .blob = *obj}};
|
||||
return {{*succ, *obj}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
@@ -267,7 +255,7 @@ std::optional<LedgerRange>
|
||||
BackendInterface::fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock const lck(rngMtx_);
|
||||
return range_;
|
||||
return range;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -276,16 +264,16 @@ BackendInterface::updateRange(uint32_t newMax)
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
|
||||
ASSERT(
|
||||
!range_ || newMax >= range_->maxSequence,
|
||||
!range || newMax >= range->maxSequence,
|
||||
"Range shouldn't exist yet or newMax should be greater. newMax = {}, range->maxSequence = {}",
|
||||
newMax,
|
||||
range_->maxSequence
|
||||
range->maxSequence
|
||||
);
|
||||
|
||||
if (!range_) {
|
||||
range_ = {.minSequence = newMax, .maxSequence = newMax};
|
||||
if (!range) {
|
||||
range = {newMax, newMax};
|
||||
} else {
|
||||
range_->maxSequence = newMax;
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -296,10 +284,10 @@ BackendInterface::setRange(uint32_t min, uint32_t max, bool force)
|
||||
|
||||
if (!force) {
|
||||
ASSERT(min <= max, "Range min must be less than or equal to max");
|
||||
ASSERT(not range_.has_value(), "Range was already set");
|
||||
ASSERT(not range.has_value(), "Range was already set");
|
||||
}
|
||||
|
||||
range_ = {.minSequence = min, .maxSequence = max};
|
||||
range = {min, max};
|
||||
}
|
||||
|
||||
LedgerPage
|
||||
@@ -320,10 +308,10 @@ BackendInterface::fetchLedgerPage(
|
||||
ripple::uint256 const& curCursor = [&]() {
|
||||
if (!keys.empty())
|
||||
return keys.back();
|
||||
return (cursor ? *cursor : kFIRST_KEY);
|
||||
return (cursor ? *cursor : firstKey);
|
||||
}();
|
||||
|
||||
std::uint32_t const seq = outOfOrder ? range_->maxSequence : ledgerSequence;
|
||||
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
|
||||
if (!succ) {
|
||||
|
||||
@@ -31,10 +31,10 @@
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/utility/result_of.hpp>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/Fees.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
@@ -65,7 +65,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static constexpr std::size_t kDEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
/**
|
||||
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
|
||||
*
|
||||
@@ -76,9 +76,9 @@ static constexpr std::size_t kDEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
*/
|
||||
template <typename FnType>
|
||||
auto
|
||||
retryOnTimeout(FnType func, size_t waitMs = kDEFAULT_WAIT_BETWEEN_RETRY)
|
||||
retryOnTimeout(FnType func, size_t waitMs = DEFAULT_WAIT_BETWEEN_RETRY)
|
||||
{
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
static util::Logger const log{"Backend"};
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
@@ -138,7 +138,7 @@ synchronousAndRetryOnTimeout(FnType&& func)
|
||||
class BackendInterface {
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range_;
|
||||
std::optional<LedgerRange> range;
|
||||
LedgerCache cache_;
|
||||
std::optional<etl::CorruptionDetector<LedgerCache>> corruptionDetector_;
|
||||
|
||||
@@ -364,25 +364,6 @@ public:
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all holders' balances for a MPTIssuanceID
|
||||
*
|
||||
* @param mptID MPTIssuanceID you wish you query.
|
||||
* @param limit Paging limit.
|
||||
* @param cursorIn Optional cursor to allow us to pick up from where we last left off.
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<Blob> of MPToken balances and an optional marker
|
||||
*/
|
||||
virtual MPTHoldersAndCursor
|
||||
fetchMPTHolders(
|
||||
ripple::uint192 const& mptID,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::AccountID> const& cursorIn,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger object.
|
||||
*
|
||||
@@ -397,19 +378,6 @@ public:
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger object sequence.
|
||||
*
|
||||
* Currently the real fetch happens in doFetchLedgerObjectSeq
|
||||
*
|
||||
* @param key The key of the object
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sequence in unit32_t on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<std::uint32_t>
|
||||
fetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects by their keys.
|
||||
*
|
||||
@@ -439,18 +407,6 @@ public:
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching a ledger object sequence.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sequence in unit32_t on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<std::uint32_t>
|
||||
doFetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching ledger objects.
|
||||
*
|
||||
@@ -548,16 +504,6 @@ public:
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the status of migrator by name.
|
||||
*
|
||||
* @param migratorName The name of the migrator
|
||||
* @param yield The coroutine context
|
||||
* @return The status of the migrator if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<std::string>
|
||||
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Synchronously fetches the ledger range from DB.
|
||||
*
|
||||
@@ -646,14 +592,6 @@ public:
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write accounts that started holding onto a MPT.
|
||||
*
|
||||
* @param data A vector of MPT ID and account pairs
|
||||
*/
|
||||
virtual void
|
||||
writeMPTHolders(std::vector<MPTHolderData> const& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
@@ -683,21 +621,6 @@ public:
|
||||
bool
|
||||
finishWrites(std::uint32_t ledgerSequence);
|
||||
|
||||
/**
|
||||
* @brief Wait for all pending writes to finish.
|
||||
*/
|
||||
virtual void
|
||||
waitForWritesToFinish() = 0;
|
||||
|
||||
/**
|
||||
* @brief Mark the migration status of a migrator as Migrated in the database
|
||||
*
|
||||
* @param migratorName The name of the migrator
|
||||
* @param status The status to set
|
||||
*/
|
||||
virtual void
|
||||
writeMigratorStatus(std::string const& migratorName, std::string const& status) = 0;
|
||||
|
||||
/**
|
||||
* @return true if database is overwhelmed; false otherwise
|
||||
*/
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
add_library(clio_data)
|
||||
target_sources(
|
||||
clio_data
|
||||
PRIVATE AmendmentCenter.cpp
|
||||
BackendCounters.cpp
|
||||
PRIVATE BackendCounters.cpp
|
||||
BackendInterface.cpp
|
||||
LedgerCache.cpp
|
||||
cassandra/impl/Future.cpp
|
||||
|
||||
@@ -36,13 +36,13 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/nft.h>
|
||||
#include <ripple/basics/Blob.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
#include <ripple/protocol/nft.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
@@ -73,15 +73,13 @@ class BasicCassandraBackend : public BackendInterface {
|
||||
|
||||
SettingsProviderType settingsProvider_;
|
||||
Schema<SettingsProviderType> schema_;
|
||||
|
||||
std::atomic_uint32_t ledgerSequence_ = 0u;
|
||||
|
||||
protected:
|
||||
Handle handle_;
|
||||
|
||||
// have to be mutable because BackendInterface constness :(
|
||||
mutable ExecutionStrategyType executor_;
|
||||
|
||||
std::atomic_uint32_t ledgerSequence_ = 0u;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
@@ -96,7 +94,7 @@ public:
|
||||
, executor_{settingsProvider_.getSettings(), handle_}
|
||||
{
|
||||
if (auto const res = handle_.connect(); not res)
|
||||
throw std::runtime_error("Could not connect to database: " + res.error());
|
||||
throw std::runtime_error("Could not connect to Cassandra: " + res.error());
|
||||
|
||||
if (not readOnly) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
|
||||
@@ -131,7 +129,7 @@ public:
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {.txns = {}, .cursor = {}};
|
||||
return {{}, {}};
|
||||
|
||||
Statement const statement = [this, forward, &account]() {
|
||||
if (forward)
|
||||
@@ -188,18 +186,13 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
void
|
||||
waitForWritesToFinish() override
|
||||
{
|
||||
executor_.sync();
|
||||
}
|
||||
|
||||
bool
|
||||
doFinishWrites() override
|
||||
{
|
||||
waitForWritesToFinish();
|
||||
// wait for other threads to finish their writes
|
||||
executor_.sync();
|
||||
|
||||
if (!range_) {
|
||||
if (!range) {
|
||||
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
|
||||
}
|
||||
|
||||
@@ -343,8 +336,8 @@ public:
|
||||
|
||||
auto const& result = res.value();
|
||||
if (not result.hasRows()) {
|
||||
LOG(log_.warn()) << "Could not fetch all transaction hashes - no rows; ledger = "
|
||||
<< std::to_string(ledgerSequence);
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes - no rows; ledger = "
|
||||
<< std::to_string(ledgerSequence);
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -353,7 +346,7 @@ public:
|
||||
hashes.push_back(std::move(hash));
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from database in "
|
||||
LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from Cassandra in "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
|
||||
<< " milliseconds";
|
||||
|
||||
@@ -407,7 +400,7 @@ public:
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {.txns = {}, .cursor = {}};
|
||||
return {{}, {}};
|
||||
|
||||
Statement const statement = [this, forward, &tokenID]() {
|
||||
if (forward)
|
||||
@@ -555,45 +548,6 @@ public:
|
||||
return ret;
|
||||
}
|
||||
|
||||
MPTHoldersAndCursor
|
||||
fetchMPTHolders(
|
||||
ripple::uint192 const& mptID,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::AccountID> const& cursorIn,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto const holderEntries = executor_.read(
|
||||
yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
|
||||
);
|
||||
|
||||
auto const& holderResults = holderEntries.value();
|
||||
if (not holderResults.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> mptKeys;
|
||||
std::optional<ripple::AccountID> cursor;
|
||||
for (auto const [holder] : extract<ripple::AccountID>(holderResults)) {
|
||||
mptKeys.push_back(ripple::keylet::mptoken(mptID, holder).key);
|
||||
cursor = holder;
|
||||
}
|
||||
|
||||
auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
|
||||
|
||||
auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
|
||||
|
||||
mptObjects.erase(it, mptObjects.end());
|
||||
|
||||
ASSERT(mptKeys.size() <= limit, "Number of keys can't exceed the limit");
|
||||
if (mptKeys.size() == limit)
|
||||
return {mptObjects, cursor};
|
||||
|
||||
return {mptObjects, {}};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
@@ -613,24 +567,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
doFetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
if (auto const result = res->template get<Blob, std::uint32_t>(); result) {
|
||||
auto [_, seq] = result.value();
|
||||
return seq;
|
||||
}
|
||||
LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -654,7 +590,7 @@ public:
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
|
||||
if (auto const result = res->template get<ripple::uint256>(); result) {
|
||||
if (*result == kLAST_KEY)
|
||||
if (*result == lastKey)
|
||||
return std::nullopt;
|
||||
return result;
|
||||
}
|
||||
@@ -704,7 +640,7 @@ public:
|
||||
});
|
||||
|
||||
ASSERT(numHashes == results.size(), "Number of hashes and results must match");
|
||||
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in " << timeDiff
|
||||
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from Cassandra in " << timeDiff
|
||||
<< " milliseconds";
|
||||
return results;
|
||||
}
|
||||
@@ -824,7 +760,7 @@ public:
|
||||
if (keys.empty())
|
||||
return {};
|
||||
|
||||
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in " << timeDiff
|
||||
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from Cassandra in " << timeDiff
|
||||
<< " milliseconds";
|
||||
|
||||
auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
@@ -842,32 +778,12 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
std::optional<std::string>
|
||||
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch migrator status: " << res.error();
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& results = res.value();
|
||||
if (not results) {
|
||||
return {};
|
||||
}
|
||||
|
||||
for (auto [statusString] : extract<std::string>(results))
|
||||
return statusString;
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
|
||||
{
|
||||
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
|
||||
|
||||
if (range_)
|
||||
if (range)
|
||||
executor_.write(schema_->insertDiff, seq, key);
|
||||
|
||||
executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
|
||||
@@ -932,7 +848,7 @@ public:
|
||||
std::string&& metadata
|
||||
) override
|
||||
{
|
||||
LOG(log_.trace()) << "Writing txn to database";
|
||||
LOG(log_.trace()) << "Writing txn to cassandra";
|
||||
|
||||
executor_.write(schema_->insertLedgerTransaction, seq, hash);
|
||||
executor_.write(
|
||||
@@ -947,45 +863,27 @@ public:
|
||||
statements.reserve(data.size() * 3);
|
||||
|
||||
for (NFTsData const& record : data) {
|
||||
if (!record.onlyUriChanged) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID
|
||||
));
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// only uri changed, we update the uri table only
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID
|
||||
));
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
executor_.writeEach(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeMPTHolders(std::vector<MPTHolderData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
for (auto [mptId, holder] : data)
|
||||
statements.push_back(schema_->insertMPTHolder.bind(std::move(mptId), std::move(holder)));
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
@@ -996,14 +894,6 @@ public:
|
||||
// probably was used in PG to start a transaction or smth.
|
||||
}
|
||||
|
||||
void
|
||||
writeMigratorStatus(std::string const& migratorName, std::string const& status) override
|
||||
{
|
||||
executor_.writeSync(
|
||||
schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
|
||||
);
|
||||
}
|
||||
|
||||
bool
|
||||
isTooBusy() const override
|
||||
{
|
||||
|
||||
@@ -23,16 +23,16 @@
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/StringUtilities.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STAccount.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <xrpl/protocol/TxMeta.h>
|
||||
#include <ripple/basics/Blob.h>
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/Serializer.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -107,7 +107,6 @@ struct NFTsData {
|
||||
ripple::AccountID owner;
|
||||
std::optional<ripple::Blob> uri;
|
||||
bool isBurned = false;
|
||||
bool onlyUriChanged = false; // Whether only the URI was changed
|
||||
|
||||
/**
|
||||
* @brief Construct a new NFTsData object
|
||||
@@ -171,31 +170,6 @@ struct NFTsData {
|
||||
: tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a new NFTsData object with only the URI changed
|
||||
*
|
||||
* @param tokenID The token ID
|
||||
* @param meta The transaction metadata
|
||||
* @param uri The new URI
|
||||
*
|
||||
*/
|
||||
NFTsData(ripple::uint256 const& tokenID, ripple::TxMeta const& meta, ripple::Blob const& uri)
|
||||
: tokenID(tokenID)
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
, uri(uri)
|
||||
, onlyUriChanged(true)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents an MPT and holder pair
|
||||
*/
|
||||
struct MPTHolderData {
|
||||
ripple::uint192 mptID;
|
||||
ripple::AccountID holder;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -208,11 +182,11 @@ template <typename T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
{
|
||||
static constexpr short kOFFER_OFFSET = 0x006f;
|
||||
static constexpr short kSHIFT = 8;
|
||||
static constexpr short OFFER_OFFSET = 0x006f;
|
||||
static constexpr short SHIFT = 8;
|
||||
|
||||
short offerBytes = (object[1] << kSHIFT) | object[2];
|
||||
return offerBytes == kOFFER_OFFSET;
|
||||
short offer_bytes = (object[1] << SHIFT) | object[2];
|
||||
return offer_bytes == OFFER_OFFSET;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -241,9 +215,9 @@ template <typename T>
|
||||
inline bool
|
||||
isDirNode(T const& object)
|
||||
{
|
||||
static constexpr short kDIR_NODE_SPACE_KEY = 0x0064;
|
||||
static constexpr short DIR_NODE_SPACE_KEY = 0x0064;
|
||||
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == kDIR_NODE_SPACE_KEY;
|
||||
return spaceKey == DIR_NODE_SPACE_KEY;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -291,12 +265,12 @@ template <typename T>
|
||||
inline ripple::uint256
|
||||
getBookBase(T const& key)
|
||||
{
|
||||
static constexpr size_t kEY_SIZE = 24;
|
||||
static constexpr size_t KEY_SIZE = 24;
|
||||
|
||||
ASSERT(key.size() == ripple::uint256::size(), "Invalid key size {}", key.size());
|
||||
|
||||
ripple::uint256 ret;
|
||||
for (size_t i = 0; i < kEY_SIZE; ++i)
|
||||
for (size_t i = 0; i < KEY_SIZE; ++i)
|
||||
ret.data()[i] = key.data()[i];
|
||||
|
||||
return ret;
|
||||
@@ -315,4 +289,4 @@ uint256ToString(ripple::uint256 const& input)
|
||||
}
|
||||
|
||||
/** @brief The ripple epoch start timestamp. Midnight on 1st January 2000. */
|
||||
static constexpr std::uint32_t kRIPPLE_EPOCH_START = 946684800;
|
||||
static constexpr std::uint32_t rippleEpochStart = 946684800;
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
#include "data/Types.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -75,7 +75,7 @@ LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool is
|
||||
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq) {
|
||||
e = {.seq = seq, .blob = obj.blob};
|
||||
e = {seq, obj.blob};
|
||||
}
|
||||
} else {
|
||||
map_.erase(obj.key);
|
||||
@@ -101,7 +101,7 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
++successorHitCounter_.get();
|
||||
return {{.key = e->first, .blob = e->second.blob}};
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
@@ -117,7 +117,7 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
if (e == map_.begin())
|
||||
return {};
|
||||
--e;
|
||||
return {{.key = e->first, .blob = e->second.blob}};
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
|
||||
@@ -24,8 +24,8 @@
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/hardened_hash.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
|
||||
@@ -262,15 +262,3 @@ CREATE TABLE clio.nf_token_transactions (
|
||||
```
|
||||
|
||||
The `nf_token_transactions` table serves as the NFT counterpart to `account_tx`, inspired by the same motivations and fulfilling a similar role within this context. It drives the `nft_history` API.
|
||||
|
||||
### migrator_status
|
||||
|
||||
```
|
||||
CREATE TABLE clio.migrator_status (
|
||||
migrator_name TEXT, # The name of the migrator
|
||||
status TEXT, # The status of the migrator
|
||||
PRIMARY KEY (migrator_name)
|
||||
)
|
||||
```
|
||||
|
||||
The `migrator_status` table stores the status of the migratior in this database. If a migrator's status is `migrated`, it means this database has finished data migration for this migrator.
|
||||
|
||||
@@ -19,13 +19,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
@@ -233,14 +231,6 @@ struct NFTsAndCursor {
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents an array of MPTokens
|
||||
*/
|
||||
struct MPTHoldersAndCursor {
|
||||
std::vector<Blob> mptokens;
|
||||
std::optional<ripple::AccountID> cursor;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Stores a range of sequences as a min and max pair.
|
||||
*/
|
||||
@@ -249,71 +239,8 @@ struct LedgerRange {
|
||||
std::uint32_t maxSequence = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Represents an amendment in the XRPL
|
||||
*/
|
||||
struct Amendment {
|
||||
std::string name;
|
||||
ripple::uint256 feature;
|
||||
bool isSupportedByXRPL = false;
|
||||
bool isSupportedByClio = false;
|
||||
bool isRetired = false;
|
||||
|
||||
/**
|
||||
* @brief Get the amendment Id from its name
|
||||
*
|
||||
* @param name The name of the amendment
|
||||
* @return The amendment Id as uint256
|
||||
*/
|
||||
static ripple::uint256
|
||||
getAmendmentId(std::string_view const name);
|
||||
|
||||
/**
|
||||
* @brief Equality comparison operator
|
||||
* @param other The object to compare to
|
||||
* @return Whether the objects are equal
|
||||
*/
|
||||
bool
|
||||
operator==(Amendment const& other) const
|
||||
{
|
||||
return name == other.name;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A helper for amendment name to feature conversions
|
||||
*/
|
||||
struct AmendmentKey {
|
||||
std::string name;
|
||||
|
||||
/**
|
||||
* @brief Construct a new AmendmentKey
|
||||
* @param val Anything convertible to a string
|
||||
*/
|
||||
AmendmentKey(std::convertible_to<std::string> auto&& val) : name{std::forward<decltype(val)>(val)}
|
||||
{
|
||||
}
|
||||
|
||||
/** @brief Conversion to string */
|
||||
operator std::string const&() const;
|
||||
|
||||
/** @brief Conversion to string_view */
|
||||
operator std::string_view() const;
|
||||
|
||||
/** @brief Conversion to uint256 */
|
||||
operator ripple::uint256() const;
|
||||
|
||||
/**
|
||||
* @brief Comparison operators
|
||||
* @param other The object to compare to
|
||||
* @return Whether the objects are equal, greater or less
|
||||
*/
|
||||
auto
|
||||
operator<=>(AmendmentKey const& other) const = default;
|
||||
};
|
||||
|
||||
constexpr ripple::uint256 kFIRST_KEY{"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 kLAST_KEY{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 kHI192{"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
constexpr ripple::uint256 firstKey{"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 lastKey{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 hi192{"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
|
||||
} // namespace data
|
||||
|
||||
@@ -60,7 +60,7 @@ Handle::connect() const
|
||||
Handle::FutureType
|
||||
Handle::asyncConnect(std::string_view keyspace) const
|
||||
{
|
||||
return cass_session_connect_keyspace_n(session_, cluster_, keyspace.data(), keyspace.size());
|
||||
return cass_session_connect_keyspace(session_, cluster_, keyspace.data());
|
||||
}
|
||||
|
||||
Handle::MaybeErrorType
|
||||
@@ -155,7 +155,7 @@ Handle::asyncExecute(std::vector<StatementType> const& statements, std::function
|
||||
Handle::PreparedStatementType
|
||||
Handle::prepare(std::string_view query) const
|
||||
{
|
||||
Handle::FutureType const future = cass_session_prepare_n(session_, query.data(), query.size());
|
||||
Handle::FutureType const future = cass_session_prepare(session_, query.data());
|
||||
auto const rc = future.await();
|
||||
if (rc)
|
||||
return cass_future_get_prepared(future);
|
||||
|
||||
@@ -74,7 +74,7 @@ public:
|
||||
'class': 'SimpleStrategy',
|
||||
'replication_factor': '{}'
|
||||
}}
|
||||
AND durable_writes = True
|
||||
AND durable_writes = true
|
||||
)",
|
||||
settingsProvider_.get().getKeyspace(),
|
||||
settingsProvider_.get().getReplicationFactor()
|
||||
@@ -257,31 +257,6 @@ public:
|
||||
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
mpt_id blob,
|
||||
holder blob,
|
||||
PRIMARY KEY (mpt_id, holder)
|
||||
)
|
||||
WITH CLUSTERING ORDER BY (holder ASC)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "mp_token_holders")
|
||||
));
|
||||
|
||||
statements.emplace_back(fmt::format(
|
||||
R"(
|
||||
CREATE TABLE IF NOT EXISTS {}
|
||||
(
|
||||
migrator_name TEXT,
|
||||
status TEXT,
|
||||
PRIMARY KEY (migrator_name)
|
||||
)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "migrator_status")
|
||||
));
|
||||
|
||||
return statements;
|
||||
}();
|
||||
|
||||
@@ -418,17 +393,6 @@ public:
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertMPTHolder = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(mpt_id, holder)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "mp_token_holders")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertLedgerHeader = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
@@ -472,23 +436,12 @@ public:
|
||||
R"(
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = False
|
||||
WHERE is_latest = false
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement insertMigratorStatus = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
(migrator_name, status)
|
||||
VALUES (?, ?)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "migrator_status")
|
||||
));
|
||||
}();
|
||||
|
||||
//
|
||||
// Select queries
|
||||
//
|
||||
@@ -734,20 +687,6 @@ public:
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectMPTHolders = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT holder
|
||||
FROM {}
|
||||
WHERE mpt_id = ?
|
||||
AND holder > ?
|
||||
ORDER BY holder ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "mp_token_holders")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerByHash = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
@@ -776,7 +715,7 @@ public:
|
||||
R"(
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
WHERE is_latest = True
|
||||
WHERE is_latest = true
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
@@ -787,22 +726,10 @@ public:
|
||||
R"(
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
WHERE is_latest in (True, False)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
));
|
||||
}();
|
||||
|
||||
PreparedStatement selectMigratorStatus = [this]() {
|
||||
return handle_.get().prepare(fmt::format(
|
||||
R"(
|
||||
SELECT status
|
||||
FROM {}
|
||||
WHERE migrator_name = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "migrator_status")
|
||||
));
|
||||
}();
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -22,7 +22,10 @@
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/Cluster.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/newconfig/ObjectView.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
|
||||
#include <cerrno>
|
||||
#include <chrono>
|
||||
@@ -33,17 +36,43 @@
|
||||
#include <ios>
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <system_error>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
SettingsProvider::SettingsProvider(util::config::ObjectView const& cfg)
|
||||
namespace impl {
|
||||
inline Settings::ContactPoints
|
||||
tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_object()) {
|
||||
throw std::runtime_error("Feed entire Cassandra section to parse Settings::ContactPoints instead");
|
||||
}
|
||||
|
||||
util::Config const obj{value};
|
||||
Settings::ContactPoints out;
|
||||
|
||||
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
|
||||
out.port = obj.maybeValue<uint16_t>("port");
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
inline Settings::SecureConnectionBundle
|
||||
tag_invoke(boost::json::value_to_tag<Settings::SecureConnectionBundle>, boost::json::value const& value)
|
||||
{
|
||||
if (not value.is_string())
|
||||
throw std::runtime_error("`secure_connect_bundle` must be a string");
|
||||
return Settings::SecureConnectionBundle{value.as_string().data()};
|
||||
}
|
||||
} // namespace impl
|
||||
|
||||
SettingsProvider::SettingsProvider(util::Config const& cfg)
|
||||
: config_{cfg}
|
||||
, keyspace_{cfg.get<std::string>("keyspace")}
|
||||
, keyspace_{cfg.valueOr<std::string>("keyspace", "clio")}
|
||||
, tablePrefix_{cfg.maybeValue<std::string>("table_prefix")}
|
||||
, replicationFactor_{cfg.get<uint16_t>("replication_factor")}
|
||||
, replicationFactor_{cfg.valueOr<uint16_t>("replication_factor", 3)}
|
||||
, settings_{parseSettings()}
|
||||
{
|
||||
}
|
||||
@@ -57,8 +86,8 @@ SettingsProvider::getSettings() const
|
||||
std::optional<std::string>
|
||||
SettingsProvider::parseOptionalCertificate() const
|
||||
{
|
||||
if (auto const certPath = config_.getValueView("certfile"); certPath.hasValue()) {
|
||||
auto const path = std::filesystem::path(certPath.asString());
|
||||
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath) {
|
||||
auto const path = std::filesystem::path(*certPath);
|
||||
std::ifstream fileStream(path.string(), std::ios::in);
|
||||
if (!fileStream) {
|
||||
throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string());
|
||||
@@ -79,34 +108,30 @@ Settings
|
||||
SettingsProvider::parseSettings() const
|
||||
{
|
||||
auto settings = Settings::defaultSettings();
|
||||
|
||||
// all config values used in settings is under "database.cassandra" prefix
|
||||
if (config_.getValueView("secure_connect_bundle").hasValue()) {
|
||||
auto const bundle = Settings::SecureConnectionBundle{(config_.get<std::string>("secure_connect_bundle"))};
|
||||
settings.connectionInfo = bundle;
|
||||
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle) {
|
||||
settings.connectionInfo = *bundle;
|
||||
} else {
|
||||
Settings::ContactPoints out;
|
||||
out.contactPoints = config_.get<std::string>("contact_points");
|
||||
out.port = config_.maybeValue<uint32_t>("port");
|
||||
settings.connectionInfo = out;
|
||||
settings.connectionInfo =
|
||||
config_.valueOrThrow<Settings::ContactPoints>("Missing contact_points in Cassandra config");
|
||||
}
|
||||
|
||||
settings.threads = config_.get<uint32_t>("threads");
|
||||
settings.maxWriteRequestsOutstanding = config_.get<uint32_t>("max_write_requests_outstanding");
|
||||
settings.maxReadRequestsOutstanding = config_.get<uint32_t>("max_read_requests_outstanding");
|
||||
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
|
||||
settings.threads = config_.valueOr<uint32_t>("threads", settings.threads);
|
||||
settings.maxWriteRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding);
|
||||
settings.maxReadRequestsOutstanding =
|
||||
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
|
||||
settings.coreConnectionsPerHost =
|
||||
config_.valueOr<uint32_t>("core_connections_per_host", settings.coreConnectionsPerHost);
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
|
||||
settings.writeBatchSize = config_.valueOr<std::size_t>("write_batch_size", settings.writeBatchSize);
|
||||
|
||||
if (config_.getValueView("connect_timeout").hasValue()) {
|
||||
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");
|
||||
settings.connectionTimeout = std::chrono::milliseconds{connectTimeoutSecond * util::kMILLISECONDS_PER_SECOND};
|
||||
}
|
||||
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
|
||||
if (connectTimeoutSecond)
|
||||
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
|
||||
if (config_.getValueView("request_timeout").hasValue()) {
|
||||
auto const requestTimeoutSecond = config_.get<uint32_t>("request_timeout");
|
||||
settings.requestTimeout = std::chrono::milliseconds{requestTimeoutSecond * util::kMILLISECONDS_PER_SECOND};
|
||||
}
|
||||
auto const requestTimeoutSecond = config_.maybeValue<uint32_t>("request_timeout");
|
||||
if (requestTimeoutSecond)
|
||||
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * util::MILLISECONDS_PER_SECOND};
|
||||
|
||||
settings.certificate = parseOptionalCertificate();
|
||||
settings.username = config_.maybeValue<std::string>("username");
|
||||
|
||||
@@ -19,9 +19,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/Cluster.hpp"
|
||||
#include "util/newconfig/ObjectView.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -33,7 +34,7 @@ namespace data::cassandra {
|
||||
* @brief Provides settings for @ref BasicCassandraBackend.
|
||||
*/
|
||||
class SettingsProvider {
|
||||
util::config::ObjectView config_;
|
||||
util::Config config_;
|
||||
|
||||
std::string keyspace_;
|
||||
std::optional<std::string> tablePrefix_;
|
||||
@@ -46,7 +47,7 @@ public:
|
||||
*
|
||||
* @param cfg The config of Clio to use
|
||||
*/
|
||||
explicit SettingsProvider(util::config::ObjectView const& cfg);
|
||||
explicit SettingsProvider(util::Config const& cfg);
|
||||
|
||||
/**
|
||||
* @return The cluster settings
|
||||
|
||||
@@ -21,8 +21,6 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
@@ -57,26 +55,6 @@ struct Limit {
|
||||
int32_t limit;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A strong type wrapper for string
|
||||
*
|
||||
* This is unfortunately needed right now to support TEXT properly
|
||||
* because clio uses string to represent BLOB
|
||||
* If we want to bind TEXT with string, we need to use this type
|
||||
*/
|
||||
struct Text {
|
||||
std::string text;
|
||||
|
||||
/**
|
||||
* @brief Construct a new Text object from string type
|
||||
*
|
||||
* @param text The text to wrap
|
||||
*/
|
||||
explicit Text(std::string text) : text{std::move(text)}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
class Handle;
|
||||
class CassandraError;
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/RetryPolicy.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
@@ -65,8 +64,8 @@ class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<Statemen
|
||||
RetryCallbackType onRetry_;
|
||||
|
||||
// does not exist during initial construction, hence optional
|
||||
using OptionalFuture = std::optional<FutureWithCallbackType>;
|
||||
util::Mutex<OptionalFuture> future_;
|
||||
std::optional<FutureWithCallbackType> future_;
|
||||
std::mutex mtx_;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -128,8 +127,8 @@ private:
|
||||
self = nullptr; // explicitly decrement refcount
|
||||
};
|
||||
|
||||
auto future = future_.template lock<std::scoped_lock>();
|
||||
future->emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
std::scoped_lock const lck{mtx_};
|
||||
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -31,14 +31,14 @@
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
constexpr auto kBATCH_DELETER = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
|
||||
Batch::Batch(std::vector<Statement> const& statements)
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), kBATCH_DELETER}
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), batchDeleter}
|
||||
{
|
||||
cass_batch_set_is_idempotent(*this, cass_true);
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/SslContext.hpp"
|
||||
#include "util/OverloadSet.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
@@ -32,14 +31,21 @@
|
||||
#include <variant>
|
||||
|
||||
namespace {
|
||||
constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
|
||||
constexpr auto kCLUSTER_DELETER = [](CassCluster* ptr) { cass_cluster_free(ptr); };
|
||||
template <typename... Ts>
|
||||
struct overloadSet : Ts... {
|
||||
using Ts::operator()...;
|
||||
};
|
||||
|
||||
// explicit deduction guide (not needed as of C++20, but clang be clang)
|
||||
template <typename... Ts>
|
||||
overloadSet(Ts...) -> overloadSet<Ts...>;
|
||||
}; // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), kCLUSTER_DELETER}
|
||||
Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), clusterDeleter}
|
||||
{
|
||||
using std::to_string;
|
||||
|
||||
@@ -84,7 +90,7 @@ void
|
||||
Cluster::setupConnection(Settings const& settings)
|
||||
{
|
||||
std::visit(
|
||||
util::OverloadSet{
|
||||
overloadSet{
|
||||
[this](Settings::ContactPoints const& points) { setupContactPoints(points); },
|
||||
[this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }
|
||||
},
|
||||
|
||||
@@ -25,8 +25,6 @@
|
||||
#include <cassandra.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
@@ -41,10 +39,10 @@ namespace data::cassandra::impl {
|
||||
* @brief Bundles all cassandra settings in one place.
|
||||
*/
|
||||
struct Settings {
|
||||
static constexpr std::size_t kDEFAULT_CONNECTION_TIMEOUT = 10000;
|
||||
static constexpr uint32_t kDEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
static constexpr std::size_t kDEFAULT_BATCH_SIZE = 20;
|
||||
static constexpr std::size_t DEFAULT_CONNECTION_TIMEOUT = 10000;
|
||||
static constexpr uint32_t DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t DEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
static constexpr std::size_t DEFAULT_BATCH_SIZE = 20;
|
||||
|
||||
/**
|
||||
* @brief Represents the configuration of contact points for cassandra.
|
||||
@@ -65,7 +63,7 @@ struct Settings {
|
||||
bool enableLog = false;
|
||||
|
||||
/** @brief Connect timeout specified in milliseconds */
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{kDEFAULT_CONNECTION_TIMEOUT};
|
||||
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{DEFAULT_CONNECTION_TIMEOUT};
|
||||
|
||||
/** @brief Request timeout specified in milliseconds */
|
||||
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
|
||||
@@ -77,16 +75,16 @@ struct Settings {
|
||||
uint32_t threads = std::thread::hardware_concurrency();
|
||||
|
||||
/** @brief The maximum number of outstanding write requests at any given moment */
|
||||
uint32_t maxWriteRequestsOutstanding = kDEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
|
||||
uint32_t maxWriteRequestsOutstanding = DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The maximum number of outstanding read requests at any given moment */
|
||||
uint32_t maxReadRequestsOutstanding = kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
uint32_t maxReadRequestsOutstanding = DEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The number of connection per host to always have active */
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
|
||||
/** @brief Size of batches when writing */
|
||||
std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE;
|
||||
std::size_t writeBatchSize = DEFAULT_BATCH_SIZE;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <stdexcept>
|
||||
@@ -33,7 +33,7 @@
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
class Collection : public ManagedObject<CassCollection> {
|
||||
static constexpr auto kDELETER = [](CassCollection* ptr) { cass_collection_free(ptr); };
|
||||
static constexpr auto deleter = [](CassCollection* ptr) { cass_collection_free(ptr); };
|
||||
|
||||
static void
|
||||
throwErrorIfNeeded(CassError const rc, std::string_view const label)
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
|
||||
template <typename Type>
|
||||
explicit Collection(std::vector<Type> const& value)
|
||||
: ManagedObject{cass_collection_new(CASS_COLLECTION_TYPE_LIST, value.size()), kDELETER}
|
||||
: ManagedObject{cass_collection_new(CASS_COLLECTION_TYPE_LIST, value.size()), deleter}
|
||||
{
|
||||
bind(value);
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
@@ -193,24 +192,10 @@ public:
|
||||
template <typename... Args>
|
||||
void
|
||||
write(PreparedStatementType const& preparedStatement, Args&&... args)
|
||||
{
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
write(std::move(statement));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor
|
||||
*
|
||||
* @param statement Statement to execute
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
void
|
||||
write(StatementType&& statement)
|
||||
{
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
|
||||
incrementOutstandingRequestCount();
|
||||
|
||||
counters_->registerWriteStarted();
|
||||
@@ -266,21 +251,6 @@ public:
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Non-blocking query execution used for writing data. Constrast with write, this method does not execute
|
||||
* the statements in a batch.
|
||||
*
|
||||
* Retries forever with retry policy specified by @ref AsyncExecutor.
|
||||
*
|
||||
* @param statements Vector of statements to execute
|
||||
* @throw DatabaseTimeout on timeout
|
||||
*/
|
||||
void
|
||||
writeEach(std::vector<StatementType>&& statements)
|
||||
{
|
||||
std::ranges::for_each(std::move(statements), [this](auto& statement) { this->write(std::move(statement)); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Coroutine-based query execution used for reading data.
|
||||
*
|
||||
|
||||
@@ -32,12 +32,12 @@
|
||||
#include <utility>
|
||||
|
||||
namespace {
|
||||
constexpr auto kFUTURE_DELETER = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, kFUTURE_DELETER}
|
||||
/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, futureDeleter}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -30,8 +30,8 @@ protected:
|
||||
std::unique_ptr<Managed, void (*)(Managed*)> ptr_;
|
||||
|
||||
public:
|
||||
template <typename DeleterCallable>
|
||||
ManagedObject(Managed* rawPtr, DeleterCallable deleter) : ptr_{rawPtr, deleter}
|
||||
template <typename deleterCallable>
|
||||
ManagedObject(Managed* rawPtr, deleterCallable deleter) : ptr_{rawPtr, deleter}
|
||||
{
|
||||
if (rawPtr == nullptr)
|
||||
throw std::runtime_error("Could not create DB object - got nullptr");
|
||||
|
||||
@@ -26,13 +26,13 @@
|
||||
#include <cstddef>
|
||||
|
||||
namespace {
|
||||
constexpr auto kRESULT_DELETER = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
constexpr auto kRESULT_ITERATOR_DELETER = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto resultDeleter = [](CassResult const* ptr) { cass_result_free(ptr); };
|
||||
constexpr auto resultIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
/* implicit */ Result::Result(CassResult const* ptr) : ManagedObject{ptr, kRESULT_DELETER}
|
||||
/* implicit */ Result::Result(CassResult const* ptr) : ManagedObject{ptr, resultDeleter}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ Result::hasRows() const
|
||||
}
|
||||
|
||||
/* implicit */ ResultIterator::ResultIterator(CassIterator* ptr)
|
||||
: ManagedObject{ptr, kRESULT_ITERATOR_DELETER}, hasMore_{cass_iterator_next(ptr) != 0u}
|
||||
: ManagedObject{ptr, resultIteratorDeleter}, hasMore_{cass_iterator_next(ptr) != 0u}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -21,11 +21,10 @@
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/Tuple.hpp"
|
||||
#include "util/UnsupportedType.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -42,6 +41,9 @@
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
template <typename Type>
|
||||
inline Type
|
||||
extractColumn(CassRow const* row, std::size_t idx)
|
||||
@@ -101,7 +103,7 @@ extractColumn(CassRow const* row, std::size_t idx)
|
||||
output = static_cast<DecayedType>(out);
|
||||
} else {
|
||||
// type not supported for extraction
|
||||
static_assert(util::Unsupported<DecayedType>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
|
||||
return output;
|
||||
|
||||
@@ -26,10 +26,10 @@
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
class Session : public ManagedObject<CassSession> {
|
||||
static constexpr auto kDELETER = [](CassSession* ptr) { cass_session_free(ptr); };
|
||||
static constexpr auto deleter = [](CassSession* ptr) { cass_session_free(ptr); };
|
||||
|
||||
public:
|
||||
Session() : ManagedObject{cass_session_new(), kDELETER}
|
||||
Session() : ManagedObject{cass_session_new(), deleter}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
@@ -27,12 +27,12 @@
|
||||
#include <string>
|
||||
|
||||
namespace {
|
||||
constexpr auto kCONTEXT_DELETER = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
constexpr auto contextDeleter = [](CassSsl* ptr) { cass_ssl_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), kCONTEXT_DELETER}
|
||||
SslContext::SslContext(std::string const& certificate) : ManagedObject{cass_ssl_new(), contextDeleter}
|
||||
{
|
||||
cass_ssl_set_verify_flags(*this, CASS_SSL_VERIFY_NONE);
|
||||
if (auto const rc = cass_ssl_add_trusted_cert(*this, certificate.c_str()); rc != CASS_OK) {
|
||||
|
||||
@@ -23,13 +23,12 @@
|
||||
#include "data/cassandra/impl/Collection.hpp"
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "data/cassandra/impl/Tuple.hpp"
|
||||
#include "util/UnsupportedType.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/STAccount.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -43,7 +42,10 @@
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
class Statement : public ManagedObject<CassStatement> {
|
||||
static constexpr auto kDELETER = [](CassStatement* ptr) { cass_statement_free(ptr); };
|
||||
static constexpr auto deleter = [](CassStatement* ptr) { cass_statement_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
public:
|
||||
/**
|
||||
@@ -54,14 +56,14 @@ public:
|
||||
*/
|
||||
template <typename... Args>
|
||||
explicit Statement(std::string_view query, Args&&... args)
|
||||
: ManagedObject{cass_statement_new_n(query.data(), query.size(), sizeof...(args)), kDELETER}
|
||||
: ManagedObject{cass_statement_new(query.data(), sizeof...(args)), deleter}
|
||||
{
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
bind<Args...>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/* implicit */ Statement(CassStatement* ptr) : ManagedObject{ptr, kDELETER}
|
||||
/* implicit */ Statement(CassStatement* ptr) : ManagedObject{ptr, deleter}
|
||||
{
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
@@ -106,9 +108,9 @@ public:
|
||||
using UintByteTupleType = std::tuple<uint32_t, ripple::uint256>;
|
||||
using ByteVectorType = std::vector<ripple::uint256>;
|
||||
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256> || std::is_same_v<DecayedType, ripple::uint192>) {
|
||||
if constexpr (std::is_same_v<DecayedType, ripple::uint256>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::base_uint");
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
} else if constexpr (std::is_same_v<DecayedType, ripple::AccountID>) {
|
||||
auto const rc = bindBytes(value.data(), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind ripple::AccountID");
|
||||
@@ -119,9 +121,6 @@ public:
|
||||
// reinterpret_cast is needed here :'(
|
||||
auto const rc = bindBytes(reinterpret_cast<unsigned char const*>(value.data()), value.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as bytes)");
|
||||
} else if constexpr (std::is_convertible_v<DecayedType, Text>) {
|
||||
auto const rc = cass_statement_bind_string_n(*this, idx, value.text.c_str(), value.text.size());
|
||||
throwErrorIfNeeded(rc, "Bind string (as TEXT)");
|
||||
} else if constexpr (std::is_same_v<DecayedType, UintTupleType> ||
|
||||
std::is_same_v<DecayedType, UintByteTupleType>) {
|
||||
auto const rc = cass_statement_bind_tuple(*this, idx, Tuple{std::forward<Type>(value)});
|
||||
@@ -142,7 +141,7 @@ public:
|
||||
throwErrorIfNeeded(rc, "Bind int64");
|
||||
} else {
|
||||
// type not supported for binding
|
||||
static_assert(util::Unsupported<DecayedType>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -153,10 +152,10 @@ public:
|
||||
* This is used to produce Statement objects that can be executed.
|
||||
*/
|
||||
class PreparedStatement : public ManagedObject<CassPrepared const> {
|
||||
static constexpr auto kDELETER = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
static constexpr auto deleter = [](CassPrepared const* ptr) { cass_prepared_free(ptr); };
|
||||
|
||||
public:
|
||||
/* implicit */ PreparedStatement(CassPrepared const* ptr) : ManagedObject{ptr, kDELETER}
|
||||
/* implicit */ PreparedStatement(CassPrepared const* ptr) : ManagedObject{ptr, deleter}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -24,17 +24,17 @@
|
||||
#include <cassandra.h>
|
||||
|
||||
namespace {
|
||||
constexpr auto kTUPLE_DELETER = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
constexpr auto kTUPLE_ITERATOR_DELETER = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
constexpr auto tupleDeleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
constexpr auto tupleIteratorDeleter = [](CassIterator* ptr) { cass_iterator_free(ptr); };
|
||||
} // namespace
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
/* implicit */ Tuple::Tuple(CassTuple* ptr) : ManagedObject{ptr, kTUPLE_DELETER}
|
||||
/* implicit */ Tuple::Tuple(CassTuple* ptr) : ManagedObject{ptr, tupleDeleter}
|
||||
{
|
||||
}
|
||||
|
||||
/* implicit */ TupleIterator::TupleIterator(CassIterator* ptr) : ManagedObject{ptr, kTUPLE_ITERATOR_DELETER}
|
||||
/* implicit */ TupleIterator::TupleIterator(CassIterator* ptr) : ManagedObject{ptr, tupleIteratorDeleter}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -20,10 +20,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "util/UnsupportedType.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -37,14 +36,17 @@
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
class Tuple : public ManagedObject<CassTuple> {
|
||||
static constexpr auto kDELETER = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
static constexpr auto deleter = [](CassTuple* ptr) { cass_tuple_free(ptr); };
|
||||
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
public:
|
||||
/* implicit */ Tuple(CassTuple* ptr);
|
||||
|
||||
template <typename... Types>
|
||||
explicit Tuple(std::tuple<Types...>&& value)
|
||||
: ManagedObject{cass_tuple_new(std::tuple_size<std::tuple<Types...>>{}), kDELETER}
|
||||
: ManagedObject{cass_tuple_new(std::tuple_size<std::tuple<Types...>>{}), deleter}
|
||||
{
|
||||
std::apply(std::bind_front(&Tuple::bind<Types...>, this), std::move(value));
|
||||
}
|
||||
@@ -89,12 +91,15 @@ public:
|
||||
throwErrorIfNeeded(rc, "Bind ripple::uint256");
|
||||
} else {
|
||||
// type not supported for binding
|
||||
static_assert(util::Unsupported<DecayedType>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class TupleIterator : public ManagedObject<CassIterator> {
|
||||
template <typename>
|
||||
static constexpr bool unsupported_v = false;
|
||||
|
||||
public:
|
||||
/* implicit */ TupleIterator(CassIterator* ptr);
|
||||
|
||||
@@ -136,7 +141,7 @@ private:
|
||||
output = static_cast<DecayedType>(out);
|
||||
} else {
|
||||
// type not supported for extraction
|
||||
static_assert(util::Unsupported<DecayedType>);
|
||||
static_assert(unsupported_v<DecayedType>);
|
||||
}
|
||||
|
||||
return output;
|
||||
|
||||
@@ -10,8 +10,7 @@ target_sources(
|
||||
NetworkValidatedLedgers.cpp
|
||||
NFTHelpers.cpp
|
||||
Source.cpp
|
||||
MPTHelpers.cpp
|
||||
impl/AmendmentBlockHandler.cpp
|
||||
impl/ForwardingCache.cpp
|
||||
impl/ForwardingSource.cpp
|
||||
impl/GrpcSource.cpp
|
||||
impl/SubscriptionSource.cpp
|
||||
|
||||
@@ -64,12 +64,8 @@ public:
|
||||
* @param backend The backend to use
|
||||
* @param cache The cache to load into
|
||||
*/
|
||||
CacheLoader(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
std::shared_ptr<BackendInterface> const& backend,
|
||||
CacheType& cache
|
||||
)
|
||||
: backend_{backend}, cache_{cache}, settings_{makeCacheLoaderSettings(config)}, ctx_{settings_.numThreads}
|
||||
CacheLoader(util::Config const& config, std::shared_ptr<BackendInterface> const& backend, CacheType& cache)
|
||||
: backend_{backend}, cache_{cache}, settings_{make_CacheLoaderSettings(config)}, ctx_{settings_.numThreads}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -130,8 +126,7 @@ public:
|
||||
void
|
||||
stop() noexcept
|
||||
{
|
||||
if (loader_ != nullptr)
|
||||
loader_->stop();
|
||||
loader_->stop();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -140,8 +135,7 @@ public:
|
||||
void
|
||||
wait() noexcept
|
||||
{
|
||||
if (loader_ != nullptr)
|
||||
loader_->wait();
|
||||
loader_->wait();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -19,12 +19,11 @@
|
||||
|
||||
#include "etl/CacheLoaderSettings.hpp"
|
||||
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
namespace etl {
|
||||
@@ -48,29 +47,31 @@ CacheLoaderSettings::isDisabled() const
|
||||
}
|
||||
|
||||
[[nodiscard]] CacheLoaderSettings
|
||||
makeCacheLoaderSettings(util::config::ClioConfigDefinition const& config)
|
||||
make_CacheLoaderSettings(util::Config const& config)
|
||||
{
|
||||
CacheLoaderSettings settings;
|
||||
settings.numThreads = config.get<uint16_t>("io_threads");
|
||||
auto const cache = config.getObject("cache");
|
||||
// Given diff number to generate cursors
|
||||
settings.numCacheDiffs = cache.get<std::size_t>("num_diffs");
|
||||
// Given cursors number fetching from diff
|
||||
settings.numCacheCursorsFromDiff = cache.get<std::size_t>("num_cursors_from_diff");
|
||||
// Given cursors number fetching from account
|
||||
settings.numCacheCursorsFromAccount = cache.get<std::size_t>("num_cursors_from_account");
|
||||
settings.numThreads = config.valueOr("io_threads", settings.numThreads);
|
||||
if (config.contains("cache")) {
|
||||
auto const cache = config.section("cache");
|
||||
// Given diff number to generate cursors
|
||||
settings.numCacheDiffs = cache.valueOr<size_t>("num_diffs", settings.numCacheDiffs);
|
||||
// Given cursors number fetching from diff
|
||||
settings.numCacheCursorsFromDiff = cache.valueOr<size_t>("num_cursors_from_diff", 0);
|
||||
// Given cursors number fetching from account
|
||||
settings.numCacheCursorsFromAccount = cache.valueOr<size_t>("num_cursors_from_account", 0);
|
||||
|
||||
settings.numCacheMarkers = cache.get<std::size_t>("num_markers");
|
||||
settings.cachePageFetchSize = cache.get<std::size_t>("page_fetch_size");
|
||||
|
||||
auto const entry = cache.get<std::string>("load");
|
||||
if (boost::iequals(entry, "sync"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::SYNC;
|
||||
if (boost::iequals(entry, "async"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::ASYNC;
|
||||
if (boost::iequals(entry, "none") or boost::iequals(entry, "no"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::NONE;
|
||||
settings.numCacheMarkers = cache.valueOr<size_t>("num_markers", settings.numCacheMarkers);
|
||||
settings.cachePageFetchSize = cache.valueOr<size_t>("page_fetch_size", settings.cachePageFetchSize);
|
||||
|
||||
if (auto entry = cache.maybeValue<std::string>("load"); entry) {
|
||||
if (boost::iequals(*entry, "sync"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::SYNC;
|
||||
if (boost::iequals(*entry, "async"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::ASYNC;
|
||||
if (boost::iequals(*entry, "none") or boost::iequals(*entry, "no"))
|
||||
settings.loadStyle = CacheLoaderSettings::LoadStyle::NONE;
|
||||
}
|
||||
}
|
||||
return settings;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
@@ -64,6 +64,6 @@ struct CacheLoaderSettings {
|
||||
* @returns The CacheLoaderSettings object
|
||||
*/
|
||||
[[nodiscard]] CacheLoaderSettings
|
||||
makeCacheLoaderSettings(util::config::ClioConfigDefinition const& config);
|
||||
make_CacheLoaderSettings(util::Config const& config);
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
/** @file */
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
|
||||
@@ -26,12 +26,12 @@
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/newconfig/ConfigDefinition.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <ripple/beast/core/CurrentThreadName.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
@@ -88,9 +88,9 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
|
||||
auto const end = std::chrono::system_clock::now();
|
||||
auto const lastPublishedSeq = ledgerPublisher_.getLastPublishedSequence();
|
||||
static constexpr auto kNANOSECONDS_PER_SECOND = 1'000'000'000.0;
|
||||
static constexpr auto NANOSECONDS_PER_SECOND = 1'000'000'000.0;
|
||||
LOG(log_.debug()) << "Extracted and wrote " << lastPublishedSeq.value_or(startSequence) - startSequence << " in "
|
||||
<< ((end - begin).count()) / kNANOSECONDS_PER_SECOND;
|
||||
<< ((end - begin).count()) / NANOSECONDS_PER_SECOND;
|
||||
|
||||
state_.isWriting = false;
|
||||
|
||||
@@ -134,7 +134,7 @@ ETLService::monitor()
|
||||
}
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial ledger: " << e.what();
|
||||
amendmentBlockHandler_.notifyAmendmentBlocked();
|
||||
amendmentBlockHandler_.onAmendmentBlock();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng && rng->maxSequence >= nextSequence) {
|
||||
ledgerPublisher_.publish(nextSequence, {});
|
||||
++nextSequence;
|
||||
} else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::kMILLISECONDS_PER_SECOND)) {
|
||||
} else if (networkValidatedLedgers_->waitUntilValidatedByNetwork(nextSequence, util::MILLISECONDS_PER_SECOND)) {
|
||||
LOG(log_.info()) << "Ledger with sequence = " << nextSequence << " has been validated by the network. "
|
||||
<< "Attempting to find in database and publish";
|
||||
|
||||
@@ -178,8 +178,8 @@ ETLService::publishNextSequence(uint32_t nextSequence)
|
||||
// database after the specified number of attempts. publishLedger()
|
||||
// waits one second between each attempt to read the ledger from the
|
||||
// database
|
||||
constexpr size_t kTIMEOUT_SECONDS = 10;
|
||||
bool const success = ledgerPublisher_.publish(nextSequence, kTIMEOUT_SECONDS);
|
||||
constexpr size_t timeoutSeconds = 10;
|
||||
bool const success = ledgerPublisher_.publish(nextSequence, timeoutSeconds);
|
||||
|
||||
if (!success) {
|
||||
LOG(log_.warn()) << "Failed to publish ledger with sequence = " << nextSequence << " . Beginning ETL";
|
||||
@@ -233,7 +233,7 @@ ETLService::monitorReadOnly()
|
||||
// if we can't, wait until it's validated by the network, or 1 second passes, whichever occurs
|
||||
// first. Even if we don't hear from rippled, if ledgers are being written to the db, we publish
|
||||
// them.
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::kMILLISECONDS_PER_SECOND);
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(latestSequence, util::MILLISECONDS_PER_SECOND);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -262,7 +262,7 @@ ETLService::doWork()
|
||||
}
|
||||
|
||||
ETLService::ETLService(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
@@ -280,9 +280,9 @@ ETLService::ETLService(
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
state_.isReadOnly = config.get<bool>("read_only");
|
||||
extractorThreads_ = config.get<uint32_t>("extractor_threads");
|
||||
txnThreshold_ = config.get<std::size_t>("txn_threshold");
|
||||
state_.isReadOnly = config.valueOr("read_only", static_cast<bool>(state_.isReadOnly));
|
||||
extractorThreads_ = config.valueOr<uint32_t>("extractor_threads", extractorThreads_);
|
||||
txnThreshold_ = config.valueOr<size_t>("txn_threshold", txnThreshold_);
|
||||
|
||||
// This should probably be done in the backend factory but we don't have state available until here
|
||||
backend_->setCorruptionDetector(CorruptionDetector<data::LedgerCache>{state_, backend->cache()});
|
||||
|
||||
@@ -22,11 +22,11 @@
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "etl/CacheLoader.hpp"
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etl/impl/AmendmentBlockHandler.hpp"
|
||||
#include "etl/impl/AmendmentBlock.hpp"
|
||||
#include "etl/impl/ExtractionDataPipe.hpp"
|
||||
#include "etl/impl/Extractor.hpp"
|
||||
#include "etl/impl/LedgerFetcher.hpp"
|
||||
@@ -37,12 +37,12 @@
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
#include <ripple/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <concepts>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
@@ -53,22 +53,15 @@
|
||||
struct AccountTransactionsData;
|
||||
struct NFTTransactionsData;
|
||||
struct NFTsData;
|
||||
namespace feed {
|
||||
class SubscriptionManager;
|
||||
} // namespace feed
|
||||
|
||||
/**
|
||||
* @brief This namespace contains everything to do with the ETL and ETL sources.
|
||||
*/
|
||||
namespace etl {
|
||||
|
||||
/**
|
||||
* @brief A tag class to help identify ETLService in templated code.
|
||||
*/
|
||||
struct ETLServiceTag {
|
||||
virtual ~ETLServiceTag() = default;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
concept SomeETLService = std::derived_from<T, ETLServiceTag>;
|
||||
|
||||
/**
|
||||
* @brief This class is responsible for continuously extracting data from a p2p node, and writing that data to the
|
||||
* databases.
|
||||
@@ -82,7 +75,7 @@ concept SomeETLService = std::derived_from<T, ETLServiceTag>;
|
||||
* the others will fall back to monitoring/publishing. In this sense, this class dynamically transitions from monitoring
|
||||
* to writing and from writing to monitoring, based on the activity of other processes running on different machines.
|
||||
*/
|
||||
class ETLService : public ETLServiceTag {
|
||||
class ETLService {
|
||||
// TODO: make these template parameters in ETLService
|
||||
using LoadBalancerType = LoadBalancer;
|
||||
using DataPipeType = etl::impl::ExtractionDataPipe<org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
@@ -92,7 +85,7 @@ class ETLService : public ETLServiceTag {
|
||||
using ExtractorType = etl::impl::Extractor<DataPipeType, LedgerFetcherType>;
|
||||
using LedgerLoaderType = etl::impl::LedgerLoader<LoadBalancerType, LedgerFetcherType>;
|
||||
using LedgerPublisherType = etl::impl::LedgerPublisher<CacheType>;
|
||||
using AmendmentBlockHandlerType = etl::impl::AmendmentBlockHandler;
|
||||
using AmendmentBlockHandlerType = etl::impl::AmendmentBlockHandler<>;
|
||||
using TransformerType =
|
||||
etl::impl::Transformer<DataPipeType, LedgerLoaderType, LedgerPublisherType, AmendmentBlockHandlerType>;
|
||||
|
||||
@@ -130,7 +123,7 @@ public:
|
||||
* @param ledgers The network validated ledgers datastructure
|
||||
*/
|
||||
ETLService(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
@@ -152,8 +145,8 @@ public:
|
||||
* @return A shared pointer to a new instance of ETLService
|
||||
*/
|
||||
static std::shared_ptr<ETLService>
|
||||
makeETLService(
|
||||
util::config::ClioConfigDefinition const& config,
|
||||
make_ETLService(
|
||||
util::Config const& config,
|
||||
boost::asio::io_context& ioc,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||
@@ -170,20 +163,10 @@ public:
|
||||
/**
|
||||
* @brief Stops components and joins worker thread.
|
||||
*/
|
||||
~ETLService() override
|
||||
~ETLService()
|
||||
{
|
||||
if (not state_.isStopping)
|
||||
stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stop the ETL service.
|
||||
* @note This method blocks until the ETL service has stopped.
|
||||
*/
|
||||
void
|
||||
stop()
|
||||
{
|
||||
LOG(log_.info()) << "Stop called";
|
||||
LOG(log_.info()) << "onStop called";
|
||||
LOG(log_.debug()) << "Stopping Reporting ETL";
|
||||
|
||||
state_.isStopping = true;
|
||||
cacheLoader_.stop();
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
|
||||
@@ -47,11 +46,8 @@ struct ETLState {
|
||||
static std::optional<ETLState>
|
||||
fetchETLStateFromSource(Forward& source) noexcept
|
||||
{
|
||||
auto const serverInfoRippled = data::synchronous([&source](auto yield) -> std::optional<boost::json::object> {
|
||||
if (auto result = source.forwardToRippled({{"command", "server_info"}}, std::nullopt, {}, yield)) {
|
||||
return std::move(result).value();
|
||||
}
|
||||
return std::nullopt;
|
||||
auto const serverInfoRippled = data::synchronous([&source](auto yield) {
|
||||
return source.forwardToRippled({{"command", "server_info"}}, std::nullopt, {}, yield);
|
||||
});
|
||||
|
||||
if (serverInfoRippled)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user