mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 20:05:51 +00:00
Compare commits
100 Commits
2.6.0-b1
...
experiment
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7eaf0005e4 | ||
|
|
497721ee7c | ||
|
|
26530108e3 | ||
|
|
fc88abdaeb | ||
|
|
3f2ada3439 | ||
|
|
e996f2b7ab | ||
|
|
26112d17f8 | ||
|
|
e4abec4b98 | ||
|
|
503e23055b | ||
|
|
97480ce626 | ||
|
|
bd966e636e | ||
|
|
91b248e3b2 | ||
|
|
140ac78e15 | ||
|
|
f1bf423f69 | ||
|
|
dcf369e4ec | ||
|
|
56f4dc591c | ||
|
|
c40cd8154f | ||
|
|
989a0c8468 | ||
|
|
1adbed7913 | ||
|
|
490ec41083 | ||
|
|
384e79cd32 | ||
|
|
8bc36c2c0b | ||
|
|
9edc26a2a3 | ||
|
|
08bb619964 | ||
|
|
26ef25f864 | ||
|
|
4e9558f76b | ||
|
|
a62084a4f0 | ||
|
|
b8c298b734 | ||
|
|
cf4d5d649a | ||
|
|
eb2778ccad | ||
|
|
790402bcfb | ||
|
|
7c68770787 | ||
|
|
d9faf7a833 | ||
|
|
90ac03cae7 | ||
|
|
3a667f558c | ||
|
|
0a2930d861 | ||
|
|
e86178b523 | ||
|
|
10e15b524f | ||
|
|
402ab29a73 | ||
|
|
3df28f42ec | ||
|
|
0e8896ad06 | ||
|
|
ffd18049eb | ||
|
|
7413e02a05 | ||
|
|
0403248a8f | ||
|
|
84db880ce7 | ||
|
|
e6b2f9cde7 | ||
|
|
2512a9c8e7 | ||
|
|
5e7f6bb5bd | ||
|
|
ae15bbd7b5 | ||
|
|
f88ce31363 | ||
|
|
33c0737933 | ||
|
|
b26fcae690 | ||
|
|
60baaf921f | ||
|
|
f41e06061f | ||
|
|
c170c56a84 | ||
|
|
e03f5e46c0 | ||
|
|
30da8d8f63 | ||
|
|
c9c392679d | ||
|
|
47f5ae5f12 | ||
|
|
6c34458d6c | ||
|
|
8f6bec2e25 | ||
|
|
ec40cc93ff | ||
|
|
3681ef4e41 | ||
|
|
e2fbf56277 | ||
|
|
2d48de372b | ||
|
|
0d9a83fd4d | ||
|
|
c780ef8a0b | ||
|
|
d833d36896 | ||
|
|
7a2090bc00 | ||
|
|
b5892dd139 | ||
|
|
a172d0b7ea | ||
|
|
47c2af0421 | ||
|
|
c3e04426d3 | ||
|
|
e9ab081ab7 | ||
|
|
caedb51f00 | ||
|
|
e6abdda0a7 | ||
|
|
d598396445 | ||
|
|
bbd2884e3b | ||
|
|
46c96654ee | ||
|
|
57ac234657 | ||
|
|
4232359dce | ||
|
|
8b1cab46e7 | ||
|
|
e05505aa4f | ||
|
|
73bc85864b | ||
|
|
373430924b | ||
|
|
8ad111655c | ||
|
|
0a8470758d | ||
|
|
1ec906addc | ||
|
|
afc0a358d9 | ||
|
|
af284dda37 | ||
|
|
7558348d14 | ||
|
|
0d262e74bc | ||
|
|
312e7be2b4 | ||
|
|
de9b79adf0 | ||
|
|
6c68360234 | ||
|
|
7e42507b9a | ||
|
|
36bfcc7543 | ||
|
|
4a5278a915 | ||
|
|
333b73e882 | ||
|
|
9420c506ca |
@@ -24,6 +24,7 @@ inputs:
|
||||
dockerhub_repo:
|
||||
description: DockerHub repository name
|
||||
required: false
|
||||
default: ""
|
||||
dockerhub_description:
|
||||
description: Short description of the image
|
||||
required: false
|
||||
|
||||
73
.github/actions/cmake/action.yml
vendored
Normal file
73
.github/actions/cmake/action.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
name: Run CMake
|
||||
description: Run CMake to generate build files
|
||||
|
||||
inputs:
|
||||
build_dir:
|
||||
description: Build directory
|
||||
required: false
|
||||
default: "build"
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
build_type:
|
||||
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
|
||||
required: true
|
||||
default: "Release"
|
||||
integration_tests:
|
||||
description: Whether to generate target integration tests
|
||||
required: true
|
||||
default: "true"
|
||||
benchmark:
|
||||
description: Whether to generate targets for benchmarks
|
||||
required: true
|
||||
default: "true"
|
||||
code_coverage:
|
||||
description: Whether to enable code coverage
|
||||
required: true
|
||||
default: "false"
|
||||
static:
|
||||
description: Whether Clio is to be statically linked
|
||||
required: true
|
||||
default: "false"
|
||||
time_trace:
|
||||
description: Whether to enable compiler trace reports
|
||||
required: true
|
||||
default: "false"
|
||||
package:
|
||||
description: Whether to generate Debian package
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
SANITIZER_OPTION: |-
|
||||
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
|
||||
endsWith(inputs.conan_profile, '.tsan') && '-Dsan=thread' ||
|
||||
endsWith(inputs.conan_profile, '.ubsan') && '-Dsan=undefined' ||
|
||||
'' }}
|
||||
INTEGRATION_TESTS: "${{ inputs.integration_tests == 'true' && 'ON' || 'OFF' }}"
|
||||
BENCHMARK: "${{ inputs.benchmark == 'true' && 'ON' || 'OFF' }}"
|
||||
COVERAGE: "${{ inputs.code_coverage == 'true' && 'ON' || 'OFF' }}"
|
||||
STATIC: "${{ inputs.static == 'true' && 'ON' || 'OFF' }}"
|
||||
TIME_TRACE: "${{ inputs.time_trace == 'true' && 'ON' || 'OFF' }}"
|
||||
PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}"
|
||||
run: |
|
||||
cmake \
|
||||
-B ${{inputs.build_dir}} \
|
||||
-S . \
|
||||
-G Ninja \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
|
||||
"${SANITIZER_OPTION}" \
|
||||
-Dtests=ON \
|
||||
-Dintegration_tests="${INTEGRATION_TESTS}" \
|
||||
-Dbenchmark="${BENCHMARK}" \
|
||||
-Dcoverage="${COVERAGE}" \
|
||||
-Dstatic="${STATIC}" \
|
||||
-Dtime_trace="${TIME_TRACE}" \
|
||||
-Dpackage="${PACKAGE}"
|
||||
38
.github/actions/conan/action.yml
vendored
Normal file
38
.github/actions/conan/action.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: Run Conan
|
||||
description: Run conan to install dependencies
|
||||
|
||||
inputs:
|
||||
build_dir:
|
||||
description: Build directory
|
||||
required: false
|
||||
default: "build"
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
force_conan_source_build:
|
||||
description: Whether conan should build all dependencies from source
|
||||
required: true
|
||||
default: "false"
|
||||
build_type:
|
||||
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
|
||||
required: true
|
||||
default: "Release"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create build directory
|
||||
shell: bash
|
||||
run: mkdir -p "${{ inputs.build_dir }}"
|
||||
|
||||
- name: Run conan
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
|
||||
run: |
|
||||
conan \
|
||||
install . \
|
||||
-of build \
|
||||
-b "$CONAN_BUILD_OPTION" \
|
||||
-s "build_type=${{ inputs.build_type }}" \
|
||||
--profile:all "${{ inputs.conan_profile }}"
|
||||
93
.github/actions/generate/action.yml
vendored
93
.github/actions/generate/action.yml
vendored
@@ -1,93 +0,0 @@
|
||||
name: Run conan and cmake
|
||||
description: Run conan and cmake
|
||||
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
force_conan_source_build:
|
||||
description: Whether conan should build all dependencies from source
|
||||
required: true
|
||||
default: "false"
|
||||
build_type:
|
||||
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
|
||||
required: true
|
||||
default: "Release"
|
||||
build_integration_tests:
|
||||
description: Whether to build integration tests
|
||||
required: true
|
||||
default: "true"
|
||||
build_benchmark:
|
||||
description: Whether to build benchmark tests
|
||||
required: true
|
||||
default: "true"
|
||||
code_coverage:
|
||||
description: Whether conan's coverage option should be on or not
|
||||
required: true
|
||||
default: "false"
|
||||
static:
|
||||
description: Whether Clio is to be statically linked
|
||||
required: true
|
||||
default: "false"
|
||||
time_trace:
|
||||
description: Whether to enable compiler trace reports
|
||||
required: true
|
||||
default: "false"
|
||||
use_mold:
|
||||
description: Whether to use mold linker
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create build directory
|
||||
shell: bash
|
||||
run: mkdir -p build
|
||||
|
||||
- name: Run conan
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
STATIC_OPTION: "${{ inputs.static == 'true' && 'True' || 'False' }}"
|
||||
INTEGRATION_TESTS_OPTION: "${{ inputs.build_integration_tests == 'true' && 'True' || 'False' }}"
|
||||
BENCHMARK_OPTION: "${{ inputs.build_benchmark == 'true' && 'True' || 'False' }}"
|
||||
TIME_TRACE: "${{ inputs.time_trace == 'true' && 'True' || 'False' }}"
|
||||
USE_MOLD: "${{ inputs.use_mold == 'true' && 'True' || 'False' }}"
|
||||
run: |
|
||||
cd build
|
||||
conan \
|
||||
install .. \
|
||||
-of . \
|
||||
-b "$CONAN_BUILD_OPTION" \
|
||||
-s "build_type=${{ inputs.build_type }}" \
|
||||
-o "&:static=${STATIC_OPTION}" \
|
||||
-o "&:tests=True" \
|
||||
-o "&:integration_tests=${INTEGRATION_TESTS_OPTION}" \
|
||||
-o "&:benchmark=${BENCHMARK_OPTION}" \
|
||||
-o "&:lint=False" \
|
||||
-o "&:coverage=${CODE_COVERAGE}" \
|
||||
-o "&:time_trace=${TIME_TRACE}" \
|
||||
-o "&:use_mold=${USE_MOLD}" \
|
||||
--profile:all "${{ inputs.conan_profile }}"
|
||||
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
SANITIZER_OPTION: |-
|
||||
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
|
||||
endsWith(inputs.conan_profile, '.tsan') && '-Dsan=thread' ||
|
||||
endsWith(inputs.conan_profile, '.ubsan') && '-Dsan=undefined' ||
|
||||
'' }}
|
||||
USE_MOLD_OPTION: "${{ inputs.use_mold == 'true' && '-DUSE_MOLD=ON' || '' }}"
|
||||
run: |
|
||||
cd build
|
||||
cmake \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
|
||||
"${USE_MOLD_OPTION}" \
|
||||
"${SANITIZER_OPTION}" \
|
||||
.. \
|
||||
-G Ninja
|
||||
59
.github/actions/prepare_runner/action.yml
vendored
59
.github/actions/prepare_runner/action.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: Prepare runner
|
||||
description: Install packages, set environment variables, create directories
|
||||
|
||||
inputs:
|
||||
disable_ccache:
|
||||
description: Whether ccache should be disabled
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install packages on mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install --quiet \
|
||||
bison \
|
||||
ca-certificates \
|
||||
ccache \
|
||||
clang-build-analyzer \
|
||||
cmake \
|
||||
conan \
|
||||
gh \
|
||||
jq \
|
||||
llvm@14 \
|
||||
ninja \
|
||||
pkg-config
|
||||
echo "/opt/homebrew/opt/conan@2/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: git config --global --add safe.directory "$PWD"
|
||||
|
||||
- name: Set env variables for macOS
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DIR=${{ github.workspace }}/.ccache" >> $GITHUB_ENV
|
||||
echo "CONAN_HOME=${{ github.workspace }}/.conan2" >> $GITHUB_ENV
|
||||
|
||||
- name: Set env variables for Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DIR=/root/.ccache" >> $GITHUB_ENV
|
||||
echo "CONAN_HOME=/root/.conan2" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CCACHE_DISABLE=1
|
||||
if: ${{ inputs.disable_ccache == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DISABLE=1" >> $GITHUB_ENV
|
||||
|
||||
- name: Create directories
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p "$CCACHE_DIR"
|
||||
mkdir -p "$CONAN_HOME"
|
||||
30
.github/dependabot.yml
vendored
30
.github/dependabot.yml
vendored
@@ -39,6 +39,19 @@ updates:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/cmake/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
time: "04:00"
|
||||
timezone: Etc/GMT
|
||||
reviewers:
|
||||
- XRPLF/clio-dev-team
|
||||
commit-message:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/code_coverage/
|
||||
schedule:
|
||||
@@ -53,7 +66,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/create_issue/
|
||||
directory: .github/actions/conan/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -66,7 +79,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/generate/
|
||||
directory: .github/actions/create_issue/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -104,19 +117,6 @@ updates:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/prepare_runner/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
time: "04:00"
|
||||
timezone: Etc/GMT
|
||||
reviewers:
|
||||
- XRPLF/clio-dev-team
|
||||
commit-message:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/restore_cache/
|
||||
schedule:
|
||||
|
||||
8
.github/scripts/conan/apple-clang-ci.profile
vendored
8
.github/scripts/conan/apple-clang-ci.profile
vendored
@@ -1,8 +0,0 @@
|
||||
[settings]
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=16
|
||||
os=Macos
|
||||
2
.github/scripts/conan/generate_matrix.py
vendored
2
.github/scripts/conan/generate_matrix.py
vendored
@@ -3,7 +3,7 @@ import itertools
|
||||
import json
|
||||
|
||||
LINUX_OS = ["heavy", "heavy-arm64"]
|
||||
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }']
|
||||
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }']
|
||||
LINUX_COMPILERS = ["gcc", "clang"]
|
||||
|
||||
MACOS_OS = ["macos15"]
|
||||
|
||||
7
.github/scripts/conan/init.sh
vendored
7
.github/scripts/conan/init.sh
vendored
@@ -8,10 +8,11 @@ REPO_DIR="$(cd "$CURRENT_DIR/../../../" && pwd)"
|
||||
CONAN_DIR="${CONAN_HOME:-$HOME/.conan2}"
|
||||
PROFILES_DIR="$CONAN_DIR/profiles"
|
||||
|
||||
# When developers' compilers are updated, these profiles might be different
|
||||
if [[ -z "$CI" ]]; then
|
||||
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-local.profile"
|
||||
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-17.profile"
|
||||
else
|
||||
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-ci.profile"
|
||||
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-17.profile"
|
||||
fi
|
||||
|
||||
GCC_PROFILE="$REPO_DIR/docker/ci/conan/gcc.profile"
|
||||
@@ -21,7 +22,7 @@ SANITIZER_TEMPLATE_FILE="$REPO_DIR/docker/ci/conan/sanitizer_template.profile"
|
||||
|
||||
rm -rf "$CONAN_DIR"
|
||||
|
||||
conan remote add --index 0 ripple https://conan.ripplex.io
|
||||
conan remote add --index 0 xrplf https://conan.ripplex.io
|
||||
|
||||
cp "$REPO_DIR/docker/ci/conan/global.conf" "$CONAN_DIR/global.conf"
|
||||
|
||||
|
||||
23
.github/workflows/build.yml
vendored
23
.github/workflows/build.yml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
build_type: [Release, Debug]
|
||||
container:
|
||||
[
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }',
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }',
|
||||
]
|
||||
static: [true]
|
||||
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
disable_cache: false
|
||||
@@ -85,12 +85,29 @@ jobs:
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
package:
|
||||
name: Build packages
|
||||
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
disable_cache: false
|
||||
code_coverage: false
|
||||
static: true
|
||||
upload_clio_server: false
|
||||
package: true
|
||||
targets: package
|
||||
analyze_build_time: false
|
||||
|
||||
check_config:
|
||||
name: Check Config Description
|
||||
needs: build-and-test
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
7
.github/workflows/build_and_test.yml
vendored
7
.github/workflows/build_and_test.yml
vendored
@@ -63,6 +63,12 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
package:
|
||||
description: Whether to generate Debian package
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
@@ -78,6 +84,7 @@ jobs:
|
||||
targets: ${{ inputs.targets }}
|
||||
analyze_build_time: false
|
||||
expected_version: ${{ inputs.expected_version }}
|
||||
package: ${{ inputs.package }}
|
||||
|
||||
test:
|
||||
needs: build
|
||||
|
||||
11
.github/workflows/build_clio_docker_image.yml
vendored
11
.github/workflows/build_clio_docker_image.yml
vendored
@@ -83,6 +83,11 @@ jobs:
|
||||
shell: bash
|
||||
run: strip ./docker/clio/clio_server
|
||||
|
||||
- name: Set GHCR_REPO
|
||||
id: set-ghcr-repo
|
||||
run: |
|
||||
echo "GHCR_REPO=$(echo ghcr.io/${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_OUTPUT}
|
||||
|
||||
- name: Build Docker image
|
||||
uses: ./.github/actions/build_docker_image
|
||||
env:
|
||||
@@ -91,11 +96,11 @@ jobs:
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/xrplf/clio
|
||||
rippleci/clio
|
||||
ghcr.io/${{ steps.set-ghcr-repo.outputs.GHCR_REPO }}/clio
|
||||
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio' || '' }}
|
||||
push_image: ${{ inputs.publish_image }}
|
||||
directory: docker/clio
|
||||
tags: ${{ inputs.tags }}
|
||||
platforms: linux/amd64
|
||||
dockerhub_repo: rippleci/clio
|
||||
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio' || '' }}
|
||||
dockerhub_description: Clio is an XRP Ledger API server.
|
||||
|
||||
34
.github/workflows/build_impl.yml
vendored
34
.github/workflows/build_impl.yml
vendored
@@ -59,6 +59,11 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
package:
|
||||
description: Whether to generate Debian package
|
||||
required: false
|
||||
type: boolean
|
||||
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
required: false
|
||||
@@ -70,9 +75,9 @@ jobs:
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@80b9863b45562c148927c3d53621ef354e5ae7ce # v1.0
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -83,7 +88,7 @@ jobs:
|
||||
ref: ${{ github.ref }}
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
with:
|
||||
disable_ccache: ${{ inputs.disable_cache }}
|
||||
|
||||
@@ -102,15 +107,21 @@ jobs:
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
- name: Run conan
|
||||
uses: ./.github/actions/conan
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
|
||||
- name: Run CMake
|
||||
uses: ./.github/actions/cmake
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
static: ${{ inputs.static }}
|
||||
time_trace: ${{ inputs.analyze_build_time }}
|
||||
use_mold: ${{ runner.os != 'macOS' }}
|
||||
package: ${{ inputs.package }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
@@ -158,19 +169,26 @@ jobs:
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Upload clio_integration_tests
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_integration_tests
|
||||
|
||||
- name: Upload Clio Linux package
|
||||
if: inputs.package
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/*.deb
|
||||
|
||||
- name: Save cache
|
||||
if: ${{ !inputs.disable_cache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save_cache
|
||||
|
||||
24
.github/workflows/check_libxrpl.yml
vendored
24
.github/workflows/check_libxrpl.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -25,23 +25,28 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Update libXRPL version requirement
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.version }}'|g" conanfile.py
|
||||
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.conan_ref }}'|g" conanfile.py
|
||||
rm -f conanfile.py.bak
|
||||
|
||||
- name: Update conan lockfile
|
||||
shell: bash
|
||||
run: |
|
||||
conan lock create . -o '&:tests=True' -o '&:benchmark=True' --profile:all ${{ env.CONAN_PROFILE }}
|
||||
conan lock create . --profile:all ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
- name: Run conan
|
||||
uses: ./.github/actions/conan
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Run CMake
|
||||
uses: ./.github/actions/cmake
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
@@ -62,7 +67,7 @@ jobs:
|
||||
needs: build
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v5
|
||||
@@ -95,6 +100,7 @@ jobs:
|
||||
labels: "compatibility,bug"
|
||||
title: "Proposed libXRPL check failed"
|
||||
body: >
|
||||
Clio build or tests failed against `libXRPL ${{ github.event.client_payload.version }}`.
|
||||
Clio build or tests failed against `libXRPL ${{ github.event.client_payload.conan_ref }}`.
|
||||
|
||||
Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
PR: ${{ github.event.client_payload.pr_url }}
|
||||
Workflow run: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||
|
||||
13
.github/workflows/clang-tidy.yml
vendored
13
.github/workflows/clang-tidy.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
clang_tidy:
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
@@ -48,8 +48,13 @@ jobs:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
- name: Run conan
|
||||
uses: ./.github/actions/conan
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Run CMake
|
||||
uses: ./.github/actions/cmake
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
|
||||
6
.github/workflows/docs.yml
vendored
6
.github/workflows/docs.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
lfs: true
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
with:
|
||||
path: build_docs/html
|
||||
name: docs-develop
|
||||
|
||||
8
.github/workflows/nightly.yml
vendored
8
.github/workflows/nightly.yml
vendored
@@ -39,17 +39,17 @@ jobs:
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
- os: heavy
|
||||
conan_profile: gcc.ubsan
|
||||
build_type: Release
|
||||
static: false
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
include:
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
static: true
|
||||
- os: macos15
|
||||
conan_profile: apple-clang
|
||||
|
||||
52
.github/workflows/pre-commit-autoupdate.yml
vendored
52
.github/workflows/pre-commit-autoupdate.yml
vendored
@@ -4,47 +4,19 @@ on:
|
||||
# every first day of the month
|
||||
schedule:
|
||||
- cron: "0 0 1 * *"
|
||||
# on demand
|
||||
pull_request:
|
||||
branches: [release/*, develop]
|
||||
paths:
|
||||
- ".pre-commit-config.yaml"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
auto-update:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- run: pip install pre-commit
|
||||
- run: pre-commit autoupdate --freeze
|
||||
- run: pre-commit run --all-files || true
|
||||
|
||||
- uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0
|
||||
if: github.event_name != 'pull_request'
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
if: always()
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
commit-message: "style: Update pre-commit hooks"
|
||||
committer: Clio CI <skuznetsov@ripple.com>
|
||||
branch: update/pre-commit-hooks
|
||||
branch-suffix: timestamp
|
||||
delete-branch: true
|
||||
title: "style: Update pre-commit hooks"
|
||||
body: Update versions of pre-commit hooks to latest version.
|
||||
reviewers: "godexsoft,kuznetsss,PeterChen13579,mathbunnyru"
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@afbcbdafbe0ce5439492fb87eda6441371086386
|
||||
with:
|
||||
sign_commit: true
|
||||
committer: "Clio CI <skuznetsov@ripple.com>"
|
||||
reviewers: "godexsoft,kuznetsss,PeterChen13579,mathbunnyru"
|
||||
secrets:
|
||||
GPG_PRIVATE_KEY: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
|
||||
GPG_PASSPHRASE: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
|
||||
|
||||
21
.github/workflows/pre-commit.yml
vendored
21
.github/workflows/pre-commit.yml
vendored
@@ -8,20 +8,7 @@ on:
|
||||
|
||||
jobs:
|
||||
run-hooks:
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo ⚡️
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Run pre-commit ✅
|
||||
run: pre-commit run --all-files
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@afbcbdafbe0ce5439492fb87eda6441371086386
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
|
||||
6
.github/workflows/release_impl.yml
vendored
6
.github/workflows/release_impl.yml
vendored
@@ -42,7 +42,7 @@ jobs:
|
||||
release:
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
shell: bash
|
||||
if: ${{ inputs.generate_changelog }}
|
||||
run: |
|
||||
LAST_TAG="$(gh release view --json tagName -q .tagName)"
|
||||
LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)"
|
||||
LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)"
|
||||
BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)"
|
||||
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc"
|
||||
|
||||
2
.github/workflows/sanitizers.yml
vendored
2
.github/workflows/sanitizers.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
disable_cache: true
|
||||
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
|
||||
8
.github/workflows/test_impl.yml
vendored
8
.github/workflows/test_impl.yml
vendored
@@ -46,9 +46,9 @@ jobs:
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.asan') || endsWith(inputs.conan_profile, '.tsan') }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@80b9863b45562c148927c3d53621ef354e5ae7ce # v1.0
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -120,9 +120,9 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: kuznetsss/workspace-cleanup@80b9863b45562c148927c3d53621ef354e5ae7ce # v1.0
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- name: Spin up scylladb
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
|
||||
58
.github/workflows/update_docker_ci.yml
vendored
58
.github/workflows/update_docker_ci.yml
vendored
@@ -30,8 +30,8 @@ concurrency:
|
||||
|
||||
env:
|
||||
CLANG_MAJOR_VERSION: 19
|
||||
GCC_MAJOR_VERSION: 14
|
||||
GCC_VERSION: 14.3.0
|
||||
GCC_MAJOR_VERSION: 15
|
||||
GCC_VERSION: 15.2.0
|
||||
|
||||
jobs:
|
||||
repo:
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc
|
||||
rippleci/clio_gcc
|
||||
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_gcc' || '' }}
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/gcc
|
||||
tags: |
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
build_args: |
|
||||
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
|
||||
GCC_VERSION=${{ env.GCC_VERSION }}
|
||||
dockerhub_repo: rippleci/clio_gcc
|
||||
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_gcc' || '' }}
|
||||
dockerhub_description: GCC compiler for XRPLF/clio.
|
||||
|
||||
gcc-arm64:
|
||||
@@ -94,7 +94,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc
|
||||
rippleci/clio_gcc
|
||||
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_gcc' || '' }}
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/gcc
|
||||
tags: |
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
build_args: |
|
||||
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
|
||||
GCC_VERSION=${{ env.GCC_VERSION }}
|
||||
dockerhub_repo: rippleci/clio_gcc
|
||||
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_gcc' || '' }}
|
||||
dockerhub_description: GCC compiler for XRPLF/clio.
|
||||
|
||||
gcc-merge:
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
@@ -148,7 +148,7 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request'
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
@@ -157,15 +157,21 @@ jobs:
|
||||
- name: Create and push multi-arch manifest
|
||||
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
for image in ${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc rippleci/clio_gcc; do
|
||||
push_image() {
|
||||
image=$1
|
||||
|
||||
docker buildx imagetools create \
|
||||
-t $image:latest \
|
||||
-t $image:${{ env.GCC_MAJOR_VERSION }} \
|
||||
-t $image:${{ env.GCC_VERSION }} \
|
||||
-t $image:${{ github.sha }} \
|
||||
$image:arm64-latest \
|
||||
$image:amd64-latest
|
||||
done
|
||||
-t $image:latest \
|
||||
-t $image:${{ env.GCC_MAJOR_VERSION }} \
|
||||
-t $image:${{ env.GCC_VERSION }} \
|
||||
-t $image:${{ github.sha }} \
|
||||
$image:arm64-latest \
|
||||
$image:amd64-latest
|
||||
}
|
||||
push_image ${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc
|
||||
if [[ ${{ github.repository_owner }} == 'XRPLF' ]]; then
|
||||
push_image rippleci/clio_clang
|
||||
fi
|
||||
|
||||
clang:
|
||||
name: Build and push Clang docker image
|
||||
@@ -177,7 +183,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/compilers/clang/**"
|
||||
|
||||
@@ -190,7 +196,7 @@ jobs:
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-clang
|
||||
rippleci/clio_clang
|
||||
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_clang' || '' }}
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/clang
|
||||
tags: |
|
||||
@@ -200,7 +206,7 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_args: |
|
||||
CLANG_MAJOR_VERSION=${{ env.CLANG_MAJOR_VERSION }}
|
||||
dockerhub_repo: rippleci/clio_clang
|
||||
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_clang' || '' }}
|
||||
dockerhub_description: Clang compiler for XRPLF/clio.
|
||||
|
||||
tools-amd64:
|
||||
@@ -213,7 +219,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
@@ -244,7 +250,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
@@ -275,7 +281,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
@@ -315,7 +321,7 @@ jobs:
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-ci
|
||||
rippleci/clio_ci
|
||||
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_ci' || '' }}
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/ci
|
||||
tags: |
|
||||
@@ -328,5 +334,5 @@ jobs:
|
||||
CLANG_MAJOR_VERSION=${{ env.CLANG_MAJOR_VERSION }}
|
||||
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
|
||||
GCC_VERSION=${{ env.GCC_VERSION }}
|
||||
dockerhub_repo: rippleci/clio_ci
|
||||
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_ci' || '' }}
|
||||
dockerhub_description: CI image for XRPLF/clio.
|
||||
|
||||
23
.github/workflows/upload_conan_deps.yml
vendored
23
.github/workflows/upload_conan_deps.yml
vendored
@@ -20,10 +20,8 @@ on:
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
|
||||
- .github/actions/generate/action.yml
|
||||
- .github/actions/prepare_runner/action.yml
|
||||
- .github/actions/conan/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
- "!.github/scripts/conan/apple-clang-local.profile"
|
||||
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
@@ -32,10 +30,8 @@ on:
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
|
||||
- .github/actions/generate/action.yml
|
||||
- .github/actions/prepare_runner/action.yml
|
||||
- .github/actions/conan/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
- "!.github/scripts/conan/apple-clang-local.profile"
|
||||
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
@@ -64,6 +60,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
max-parallel: 10
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container != '' && fromJson(matrix.container) || null }}
|
||||
@@ -75,7 +72,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
@@ -87,8 +84,8 @@ jobs:
|
||||
- name: Show conan profile
|
||||
run: conan profile show --profile:all ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
- name: Run conan
|
||||
uses: ./.github/actions/conan
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
# We check that everything builds fine from source on scheduled runs
|
||||
@@ -97,9 +94,9 @@ jobs:
|
||||
build_type: ${{ matrix.build_type }}
|
||||
|
||||
- name: Login to Conan
|
||||
if: github.event_name != 'pull_request'
|
||||
run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ripple ${{ secrets.CONAN_USERNAME }}
|
||||
if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request'
|
||||
run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} xrplf ${{ secrets.CONAN_USERNAME }}
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: github.event_name != 'pull_request' && github.event_name != 'schedule'
|
||||
run: conan upload "*" -r=ripple --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule'
|
||||
run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
|
||||
2
.github/workflows/upload_coverage_report.yml
vendored
2
.github/workflows/upload_coverage_report.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
|
||||
- name: Upload coverage report
|
||||
if: ${{ hashFiles('build/coverage_report.xml') != '' }}
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
with:
|
||||
files: build/coverage_report.xml
|
||||
fail_ci_if_error: true
|
||||
|
||||
@@ -16,7 +16,7 @@ exclude: ^(docs/doxygen-awesome-theme/|conan\.lock$)
|
||||
repos:
|
||||
# `pre-commit sample-config` default hooks
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: cef0300fd0fc4d2a87a85fa2093c6b283ea36f4b # frozen: v5.0.0
|
||||
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-executables-have-shebangs
|
||||
@@ -55,12 +55,6 @@ repos:
|
||||
--ignore-words=pre-commit-hooks/codespell_ignore.txt,
|
||||
]
|
||||
|
||||
- repo: https://github.com/trufflesecurity/trufflehog
|
||||
rev: a05cf0859455b5b16317ee22d809887a4043cdf0 # frozen: v3.90.2
|
||||
hooks:
|
||||
- id: trufflehog
|
||||
entry: trufflehog git file://. --since-commit HEAD --max-depth=1 --no-verification --fail
|
||||
|
||||
# Running some C++ hooks before clang-format
|
||||
# to ensure that the style is consistent.
|
||||
- repo: local
|
||||
@@ -86,7 +80,7 @@ repos:
|
||||
language: script
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: 182152eb8c5ce1cf5299b956b04392c86bd8a126 # frozen: v20.1.8
|
||||
rev: 86fdcc9bd34d6afbbd29358b97436c8ffe3aa3b2 # frozen: v21.1.0
|
||||
hooks:
|
||||
- id: clang-format
|
||||
args: [--style=file]
|
||||
|
||||
@@ -11,12 +11,11 @@ option(integration_tests "Build integration tests" FALSE)
|
||||
option(benchmark "Build benchmarks" FALSE)
|
||||
option(docs "Generate doxygen docs" FALSE)
|
||||
option(coverage "Build test coverage report" FALSE)
|
||||
option(packaging "Create distribution packages" FALSE)
|
||||
option(package "Create distribution packages" FALSE)
|
||||
option(lint "Run clang-tidy checks during compilation" FALSE)
|
||||
option(static "Statically linked Clio" FALSE)
|
||||
option(snapshot "Build snapshot tool" FALSE)
|
||||
option(time_trace "Build using -ftime-trace to create compiler trace reports" FALSE)
|
||||
option(use_mold "Use mold linker" FALSE)
|
||||
|
||||
# ========================================================================== #
|
||||
set(san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
@@ -40,11 +39,6 @@ if (verbose)
|
||||
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
endif ()
|
||||
|
||||
if (packaging)
|
||||
add_definitions(-DPKG=1)
|
||||
target_compile_definitions(clio_options INTERFACE PKG=1)
|
||||
endif ()
|
||||
|
||||
# Clio tweaks and checks
|
||||
include(CheckCompiler)
|
||||
include(Settings)
|
||||
@@ -58,6 +52,7 @@ include(deps/Threads)
|
||||
include(deps/libfmt)
|
||||
include(deps/cassandra)
|
||||
include(deps/libbacktrace)
|
||||
include(deps/spdlog)
|
||||
|
||||
add_subdirectory(src)
|
||||
add_subdirectory(tests)
|
||||
@@ -93,8 +88,8 @@ if (docs)
|
||||
endif ()
|
||||
|
||||
include(install/install)
|
||||
if (packaging)
|
||||
include(cmake/packaging.cmake) # This file exists only in build runner
|
||||
if (package)
|
||||
include(ClioPackage)
|
||||
endif ()
|
||||
|
||||
if (snapshot)
|
||||
|
||||
@@ -14,5 +14,5 @@ target_sources(
|
||||
include(deps/gbench)
|
||||
|
||||
target_include_directories(clio_benchmark PRIVATE .)
|
||||
target_link_libraries(clio_benchmark PUBLIC clio_util benchmark::benchmark_main)
|
||||
target_link_libraries(clio_benchmark PUBLIC clio_util benchmark::benchmark_main spdlog::spdlog)
|
||||
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||
|
||||
@@ -22,26 +22,36 @@
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
|
||||
#include <benchmark/benchmark.h>
|
||||
#include <boost/log/core/core.hpp>
|
||||
#include <boost/log/utility/setup/common_attributes.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <spdlog/async.h>
|
||||
#include <spdlog/async_logger.h>
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include <barrier>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace util;
|
||||
|
||||
struct BenchmarkLoggingInitializer {
|
||||
static constexpr auto kLOG_FORMAT = "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%";
|
||||
static constexpr auto kLOG_FORMAT = "%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v";
|
||||
|
||||
static void
|
||||
initFileLogging(LogService::FileLoggingParams const& params)
|
||||
struct BenchmarkLoggingInitializer {
|
||||
[[nodiscard]] static std::shared_ptr<spdlog::sinks::sink>
|
||||
createFileSink(LogService::FileLoggingParams const& params)
|
||||
{
|
||||
LogService::initFileLogging(params, kLOG_FORMAT);
|
||||
return LogService::createFileSink(params, kLOG_FORMAT);
|
||||
}
|
||||
|
||||
static Logger
|
||||
getLogger(std::shared_ptr<spdlog::logger> logger)
|
||||
{
|
||||
return Logger(std::move(logger));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -53,7 +63,7 @@ uniqueLogDir()
|
||||
auto const epochTime = std::chrono::high_resolution_clock::now().time_since_epoch();
|
||||
auto const tmpDir = std::filesystem::temp_directory_path();
|
||||
std::string const dirName =
|
||||
"logs_" + std::to_string(std::chrono::duration_cast<std::chrono::microseconds>(epochTime).count());
|
||||
fmt::format("logs_{}", std::chrono::duration_cast<std::chrono::microseconds>(epochTime).count());
|
||||
return tmpDir / "clio_benchmark" / dirName;
|
||||
}
|
||||
|
||||
@@ -62,8 +72,6 @@ uniqueLogDir()
|
||||
static void
|
||||
benchmarkConcurrentFileLogging(benchmark::State& state)
|
||||
{
|
||||
boost::log::add_common_attributes();
|
||||
|
||||
auto const numThreads = static_cast<size_t>(state.range(0));
|
||||
auto const messagesPerThread = static_cast<size_t>(state.range(1));
|
||||
|
||||
@@ -74,12 +82,14 @@ benchmarkConcurrentFileLogging(benchmark::State& state)
|
||||
state.PauseTiming();
|
||||
|
||||
std::filesystem::create_directories(logDir);
|
||||
static constexpr size_t kQUEUE_SIZE = 8192;
|
||||
static constexpr size_t kTHREAD_COUNT = 1;
|
||||
spdlog::init_thread_pool(kQUEUE_SIZE, kTHREAD_COUNT);
|
||||
|
||||
BenchmarkLoggingInitializer::initFileLogging({
|
||||
auto fileSink = BenchmarkLoggingInitializer::createFileSink({
|
||||
.logDir = logDir,
|
||||
.rotationSizeMB = 5,
|
||||
.dirMaxSizeMB = 125,
|
||||
.rotationHours = 24,
|
||||
.dirMaxFiles = 25,
|
||||
});
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
@@ -92,10 +102,15 @@ benchmarkConcurrentFileLogging(benchmark::State& state)
|
||||
});
|
||||
|
||||
for (size_t threadNum = 0; threadNum < numThreads; ++threadNum) {
|
||||
threads.emplace_back([threadNum, messagesPerThread, &barrier]() {
|
||||
barrier.arrive_and_wait();
|
||||
threads.emplace_back([threadNum, messagesPerThread, fileSink, &barrier]() {
|
||||
std::string const channel = fmt::format("Thread_{}", threadNum);
|
||||
auto logger = std::make_shared<spdlog::async_logger>(
|
||||
channel, fileSink, spdlog::thread_pool(), spdlog::async_overflow_policy::block
|
||||
);
|
||||
spdlog::register_logger(logger);
|
||||
Logger const threadLogger = BenchmarkLoggingInitializer::getLogger(std::move(logger));
|
||||
|
||||
Logger const threadLogger("Thread_" + std::to_string(threadNum));
|
||||
barrier.arrive_and_wait();
|
||||
|
||||
for (size_t messageNum = 0; messageNum < messagesPerThread; ++messageNum) {
|
||||
LOG(threadLogger.info()) << "Test log message #" << messageNum;
|
||||
@@ -106,10 +121,9 @@ benchmarkConcurrentFileLogging(benchmark::State& state)
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
boost::log::core::get()->flush();
|
||||
spdlog::shutdown();
|
||||
|
||||
auto const end = std::chrono::high_resolution_clock::now();
|
||||
|
||||
state.SetIterationTime(std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count());
|
||||
|
||||
std::filesystem::remove_all(logDir);
|
||||
@@ -129,7 +143,7 @@ BENCHMARK(benchmarkConcurrentFileLogging)
|
||||
// Number of threads
|
||||
{1, 2, 4, 8},
|
||||
// Messages per thread
|
||||
{10'000, 100'000, 500'000},
|
||||
{10'000, 100'000, 500'000, 1'000'000, 10'000'000},
|
||||
})
|
||||
->UseManualTime()
|
||||
->Unit(benchmark::kMillisecond);
|
||||
|
||||
8
cmake/ClioPackage.cmake
Normal file
8
cmake/ClioPackage.cmake
Normal file
@@ -0,0 +1,8 @@
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/ClioVersion.cmake")
|
||||
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/opt/clio")
|
||||
set(CPACK_PACKAGE_VERSION "${CLIO_VERSION}")
|
||||
set(CPACK_STRIP_FILES TRUE)
|
||||
|
||||
include(pkg/deb)
|
||||
include(CPack)
|
||||
@@ -6,15 +6,17 @@ execute_process(
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE TAG
|
||||
RESULT_VARIABLE RC
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
ERROR_VARIABLE ERR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
|
||||
if (RC EQUAL 0)
|
||||
# if we are on a tag, use the tag name
|
||||
message(STATUS "Found tag '${TAG}' in git. Will use it as Clio version")
|
||||
set(CLIO_VERSION "${TAG}")
|
||||
set(DOC_CLIO_VERSION "${TAG}")
|
||||
else ()
|
||||
# if not, use YYYYMMDDHMS-<branch>-<git-rev>
|
||||
message(STATUS "Error finding tag in git: ${ERR}")
|
||||
message(STATUS "Will use 'YYYYMMDDHMS-<branch>-<git-rev>' as Clio version")
|
||||
|
||||
set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd)
|
||||
execute_process(
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
if (use_mold)
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
message(STATUS "Using Mold linker")
|
||||
set(CMAKE_LINKER_TYPE MOLD)
|
||||
else ()
|
||||
message(FATAL_ERROR "Mold linker is only supported on Linux.")
|
||||
endif ()
|
||||
if (DEFINED CMAKE_LINKER_TYPE)
|
||||
message(STATUS "Custom linker is already set: ${CMAKE_LINKER_TYPE}")
|
||||
return()
|
||||
endif ()
|
||||
|
||||
find_program(MOLD_PATH mold)
|
||||
|
||||
if (MOLD_PATH AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
message(STATUS "Using Mold linker: ${MOLD_PATH}")
|
||||
set(CMAKE_LINKER_TYPE MOLD)
|
||||
endif ()
|
||||
|
||||
5
cmake/deps/spdlog.cmake
Normal file
5
cmake/deps/spdlog.cmake
Normal file
@@ -0,0 +1,5 @@
|
||||
find_package(spdlog REQUIRED)
|
||||
|
||||
if (NOT TARGET spdlog::spdlog)
|
||||
message(FATAL_ERROR "spdlog::spdlog target not found")
|
||||
endif ()
|
||||
@@ -1,17 +0,0 @@
|
||||
[Unit]
|
||||
Description=Clio XRPL API server
|
||||
Documentation=https://github.com/XRPLF/clio.git
|
||||
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json
|
||||
Restart=on-failure
|
||||
User=clio
|
||||
Group=clio
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,13 +1,13 @@
|
||||
set(CLIO_INSTALL_DIR "/opt/clio")
|
||||
set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
||||
set(CMAKE_INSTALL_PREFIX "${CLIO_INSTALL_DIR}" CACHE PATH "Install prefix" FORCE)
|
||||
|
||||
install(TARGETS clio_server DESTINATION bin)
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}")
|
||||
|
||||
include(GNUInstallDirs)
|
||||
|
||||
install(TARGETS clio_server DESTINATION "${CMAKE_INSTALL_BINDIR}")
|
||||
|
||||
file(READ docs/examples/config/example-config.json config)
|
||||
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
|
||||
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
|
||||
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
|
||||
|
||||
configure_file("${CMAKE_SOURCE_DIR}/cmake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
||||
|
||||
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||
|
||||
12
cmake/pkg/deb.cmake
Normal file
12
cmake/pkg/deb.cmake
Normal file
@@ -0,0 +1,12 @@
|
||||
set(CPACK_GENERATOR "DEB")
|
||||
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/XRPLF/clio")
|
||||
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Ripple Labs Inc. <support@ripple.com>")
|
||||
|
||||
set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT)
|
||||
|
||||
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
|
||||
|
||||
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA ${CMAKE_SOURCE_DIR}/cmake/pkg/postinst)
|
||||
|
||||
# We must replace "-" with "~" otherwise dpkg will sort "X.Y.Z-b1" as greater than "X.Y.Z"
|
||||
string(REPLACE "-" "~" git "${CPACK_PACKAGE_VERSION}")
|
||||
46
cmake/pkg/postinst
Executable file
46
cmake/pkg/postinst
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
USER_NAME=clio
|
||||
GROUP_NAME="${USER_NAME}"
|
||||
CLIO_EXECUTABLE="clio_server"
|
||||
CLIO_PREFIX="/opt/clio"
|
||||
CLIO_BIN="$CLIO_PREFIX/bin/${CLIO_EXECUTABLE}"
|
||||
CLIO_CONFIG="$CLIO_PREFIX/etc/config.json"
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
if ! id -u "$USER_NAME" >/dev/null 2>&1; then
|
||||
# Users who should not have a home directory should have their home directory set to /nonexistent
|
||||
# https://www.debian.org/doc/debian-policy/ch-opersys.html#non-existent-home-directories
|
||||
useradd \
|
||||
--system \
|
||||
--home-dir /nonexistent \
|
||||
--no-create-home \
|
||||
--shell /usr/sbin/nologin \
|
||||
--comment "system user for ${CLIO_EXECUTABLE}" \
|
||||
--user-group \
|
||||
${USER_NAME}
|
||||
fi
|
||||
|
||||
install -d -o "$USER_NAME" -g "$GROUP_NAME" /var/log/clio
|
||||
|
||||
if [ -f "$CLIO_CONFIG" ]; then
|
||||
chown "$USER_NAME:$GROUP_NAME" "$CLIO_CONFIG"
|
||||
fi
|
||||
|
||||
chown -R "$USER_NAME:$GROUP_NAME" "$CLIO_PREFIX"
|
||||
|
||||
ln -sf "$CLIO_BIN" "/usr/bin/${CLIO_EXECUTABLE}"
|
||||
|
||||
;;
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
*)
|
||||
echo "postinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
58
conan.lock
58
conan.lock
@@ -1,56 +1,60 @@
|
||||
{
|
||||
"version": "0.5",
|
||||
"requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1754412218.488",
|
||||
"xxhash/0.8.2#7856c968c985b2981b707ee8f2413b2b%1754325010.01",
|
||||
"xrpl/2.5.0@clio/boost-odr#f68e48da1490c0a583052e4f068ada55%1754325014.392",
|
||||
"sqlite3/3.47.0#7a0904fd061f5f8a2366c294f9387830%1754325009.708",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1754412158.144",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1754412148.014",
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
|
||||
"xrpl/2.6.0#57b93b5a6c99dc8511fccb3bb5390352%1756820296.642",
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||
"spdlog/1.15.3#3ca0e9e6b83af4d0151e26541d140c86%1754401846.61",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
|
||||
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1754412120.055",
|
||||
"openssl/1.1.1v#216374e4fb5b2e0f5ab1fb6f27b5b434%1754325006.553",
|
||||
"nudb/2.0.8#63990d3e517038e04bf529eb8167f69f%1754325004.398",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
|
||||
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
|
||||
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1754412069.24",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
|
||||
"libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1754325002.385",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1754410401.723",
|
||||
"libarchive/3.7.6#7e902bb4ac1d20504fb330c0dd040192%1754325001.673",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
|
||||
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
|
||||
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385",
|
||||
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1754325000.842",
|
||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1754412166.358",
|
||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
|
||||
"fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1754324999.086",
|
||||
"date/3.0.3#cf28fe9c0aab99fe12da08aa42df65e1%1754324996.727",
|
||||
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
|
||||
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
|
||||
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1754324997.363",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1754412042.679",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1754412214.559",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
|
||||
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368%1754325043.336",
|
||||
"benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1749892625.885",
|
||||
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1754412054.336"
|
||||
"benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1754578869.672",
|
||||
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
|
||||
],
|
||||
"build_requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1754412218.488",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1754412120.055",
|
||||
"protobuf/3.21.9#64ce20e1d9ea24f3d6c504015d5f6fa8%1754325048.831",
|
||||
"cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1754412060.968",
|
||||
"b2/5.3.3#107c15377719889654eb9a162a673975%1754412065.321"
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89",
|
||||
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28"
|
||||
],
|
||||
"python_requires": [],
|
||||
"overrides": {
|
||||
"boost/1.83.0": [
|
||||
null,
|
||||
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368"
|
||||
"boost/1.83.0"
|
||||
],
|
||||
"protobuf/3.21.9": [
|
||||
"protobuf/3.21.12": [
|
||||
null,
|
||||
"protobuf/3.21.12"
|
||||
],
|
||||
"boost/1.86.0": [
|
||||
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368"
|
||||
],
|
||||
"lz4/1.9.4": [
|
||||
"lz4/1.10.0"
|
||||
],
|
||||
"sqlite3/3.44.2": [
|
||||
"sqlite3/3.47.0"
|
||||
"sqlite3/3.49.1"
|
||||
]
|
||||
},
|
||||
"config_requires": []
|
||||
|
||||
43
conanfile.py
43
conanfile.py
@@ -9,20 +9,7 @@ class ClioConan(ConanFile):
|
||||
url = 'https://github.com/xrplf/clio'
|
||||
description = 'Clio RPC server'
|
||||
settings = 'os', 'compiler', 'build_type', 'arch'
|
||||
options = {
|
||||
'static': [True, False], # static linkage
|
||||
'verbose': [True, False],
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'integration_tests': [True, False], # build integration tests; create `clio_integration_tests` binary
|
||||
'benchmark': [True, False], # build benchmarks; create `clio_benchmarks` binary
|
||||
'docs': [True, False], # doxygen API docs; create custom target 'docs'
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
'snapshot': [True, False], # build export/import snapshot tool
|
||||
'time_trace': [True, False], # build using -ftime-trace to create compiler trace reports
|
||||
'use_mold': [True, False], # use mold linker for faster linking
|
||||
}
|
||||
options = {}
|
||||
|
||||
requires = [
|
||||
'boost/1.83.0',
|
||||
@@ -30,26 +17,14 @@ class ClioConan(ConanFile):
|
||||
'fmt/11.2.0',
|
||||
'protobuf/3.21.12',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1v',
|
||||
'xrpl/2.5.0@clio/boost-odr',
|
||||
'openssl/1.1.1w',
|
||||
'xrpl/2.6.0',
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118'
|
||||
'libbacktrace/cci.20210118',
|
||||
'spdlog/1.15.3',
|
||||
]
|
||||
|
||||
default_options = {
|
||||
'static': False,
|
||||
'verbose': False,
|
||||
'tests': False,
|
||||
'integration_tests': False,
|
||||
'benchmark': False,
|
||||
'packaging': False,
|
||||
'coverage': False,
|
||||
'lint': False,
|
||||
'docs': False,
|
||||
'snapshot': False,
|
||||
'time_trace': False,
|
||||
'use_mold': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
'xrpl/*:rocksdb': False,
|
||||
'cassandra-cpp-driver/*:shared': False,
|
||||
@@ -70,10 +45,8 @@ class ClioConan(ConanFile):
|
||||
)
|
||||
|
||||
def requirements(self):
|
||||
if self.options.tests or self.options.integration_tests:
|
||||
self.requires('gtest/1.14.0')
|
||||
if self.options.benchmark:
|
||||
self.requires('benchmark/1.9.4')
|
||||
self.requires('gtest/1.14.0')
|
||||
self.requires('benchmark/1.9.4')
|
||||
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
@@ -89,8 +62,6 @@ class ClioConan(ConanFile):
|
||||
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
for option_name, option_value in self.options.items():
|
||||
tc.variables[option_name] = option_value
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
|
||||
@@ -20,43 +20,60 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
USER root
|
||||
WORKDIR /root
|
||||
|
||||
ARG LLVM_TOOLS_VERSION=20
|
||||
|
||||
# Add repositories
|
||||
# Install common tools and dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
curl \
|
||||
dpkg-dev \
|
||||
file \
|
||||
git \
|
||||
git-lfs \
|
||||
gnupg \
|
||||
graphviz \
|
||||
jq \
|
||||
# libgmp, libmpfr and libncurses are gdb dependencies
|
||||
libgmp-dev \
|
||||
libmpfr-dev \
|
||||
libncurses-dev \
|
||||
make \
|
||||
ninja-build \
|
||||
wget \
|
||||
zip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python tools
|
||||
ARG PYTHON_VERSION=3.13
|
||||
|
||||
RUN add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-venv \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${LLVM_TOOLS_VERSION} main" >> /etc/apt/sources.list \
|
||||
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}
|
||||
|
||||
# Create a virtual environment for python tools
|
||||
RUN python${PYTHON_VERSION} -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
RUN pip install -q --no-cache-dir \
|
||||
cmake \
|
||||
conan==2.20.1 \
|
||||
gcovr \
|
||||
pre-commit
|
||||
|
||||
# Install LLVM tools
|
||||
ARG LLVM_TOOLS_VERSION=20
|
||||
|
||||
RUN echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${LLVM_TOOLS_VERSION} main" >> /etc/apt/sources.list \
|
||||
&& wget --progress=dot:giga -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||
|
||||
# Install packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
clang-tidy-${LLVM_TOOLS_VERSION} \
|
||||
clang-tools-${LLVM_TOOLS_VERSION} \
|
||||
git \
|
||||
git-lfs \
|
||||
graphviz \
|
||||
jq \
|
||||
make \
|
||||
ninja-build \
|
||||
python3 \
|
||||
python3-pip \
|
||||
zip \
|
||||
&& pip3 install -q --upgrade --no-cache-dir pip \
|
||||
&& pip3 install -q --no-cache-dir \
|
||||
# TODO: Remove this once we switch to newer Ubuntu base image
|
||||
# lxml 6.0.0 is not compatible with our image
|
||||
'lxml<6.0.0' \
|
||||
\
|
||||
cmake \
|
||||
conan==2.17.0 \
|
||||
gcovr \
|
||||
pre-commit \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -92,12 +109,13 @@ COPY --from=clio-tools \
|
||||
/usr/local/bin/ClangBuildAnalyzer \
|
||||
/usr/local/bin/git-cliff \
|
||||
/usr/local/bin/gh \
|
||||
/usr/local/bin/gdb \
|
||||
/usr/local/bin/
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# Setup conan
|
||||
RUN conan remote add --index 0 ripple https://conan.ripplex.io
|
||||
RUN conan remote add --index 0 xrplf https://conan.ripplex.io
|
||||
|
||||
WORKDIR /root/.conan2
|
||||
COPY conan/global.conf ./global.conf
|
||||
|
||||
@@ -8,12 +8,14 @@ The image is based on Ubuntu 20.04 and contains:
|
||||
- ccache 4.11.3
|
||||
- Clang 19
|
||||
- ClangBuildAnalyzer 1.6.0
|
||||
- Conan 2.17.0
|
||||
- Conan 2.20.1
|
||||
- Doxygen 1.12
|
||||
- GCC 14.3.0
|
||||
- GCC 15.2.0
|
||||
- GDB 16.3
|
||||
- gh 2.74
|
||||
- git-cliff 2.9.1
|
||||
- mold 2.40.1
|
||||
- Python 3.13
|
||||
- and some other useful tools
|
||||
|
||||
Conan is set up to build Clio without any additional steps.
|
||||
|
||||
@@ -4,8 +4,8 @@ build_type=Release
|
||||
compiler=gcc
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libstdc++11
|
||||
compiler.version=14
|
||||
compiler.version=15
|
||||
os=Linux
|
||||
|
||||
[conf]
|
||||
tools.build:compiler_executables={"c": "/usr/bin/gcc-14", "cpp": "/usr/bin/g++-14"}
|
||||
tools.build:compiler_executables={"c": "/usr/bin/gcc-15", "cpp": "/usr/bin/g++-15"}
|
||||
|
||||
@@ -13,8 +13,8 @@ Clio repository provides an [example](https://github.com/XRPLF/clio/blob/develop
|
||||
|
||||
Config file recommendations:
|
||||
|
||||
- Set `log_to_console` to `false` if you want to avoid logs being written to `stdout`.
|
||||
- Set `log_directory` to `/opt/clio/log` to store logs in a volume.
|
||||
- Set `log.enable_console` to `false` if you want to avoid logs being written to `stdout`.
|
||||
- Set `log.directory` to `/opt/clio/log` to store logs in a volume.
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ RUN apt-get update \
|
||||
|
||||
ARG CLANG_MAJOR_VERSION=invalid
|
||||
# Bump this version to force rebuild of the image
|
||||
ARG BUILD_VERSION=0
|
||||
ARG BUILD_VERSION=1
|
||||
|
||||
RUN wget --progress=dot:giga https://apt.llvm.org/llvm.sh \
|
||||
&& chmod +x llvm.sh \
|
||||
|
||||
@@ -8,7 +8,7 @@ ARG UBUNTU_VERSION
|
||||
|
||||
ARG GCC_MAJOR_VERSION
|
||||
|
||||
ARG BUILD_VERSION=0
|
||||
ARG BUILD_VERSION=1
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Package: gcc-14-ubuntu-UBUNTUVERSION
|
||||
Package: gcc-15-ubuntu-UBUNTUVERSION
|
||||
Version: VERSION
|
||||
Architecture: TARGETARCH
|
||||
Maintainer: Alex Kremer <akremer@ripple.com>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
clio_develop:
|
||||
image: ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
volumes:
|
||||
- clio_develop_conan_data:/root/.conan2/p
|
||||
- clio_develop_ccache:/root/.ccache
|
||||
|
||||
@@ -8,12 +8,10 @@ ARG TARGETARCH
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
ARG BUILD_VERSION=1
|
||||
ARG BUILD_VERSION=2
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
bison \
|
||||
flex \
|
||||
ninja-build \
|
||||
python3 \
|
||||
python3-pip \
|
||||
@@ -46,6 +44,13 @@ RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download
|
||||
&& ninja install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
bison \
|
||||
flex \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG DOXYGEN_VERSION=1.12.0
|
||||
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
@@ -78,4 +83,22 @@ RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH
|
||||
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
libgmp-dev \
|
||||
libmpfr-dev \
|
||||
libncurses-dev \
|
||||
make \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG GDB_VERSION=16.3
|
||||
RUN wget --progress=dot:giga "https://sourceware.org/pub/gdb/releases/gdb-${GDB_VERSION}.tar.gz" \
|
||||
&& tar xf "gdb-${GDB_VERSION}.tar.gz" \
|
||||
&& cd "gdb-${GDB_VERSION}" \
|
||||
&& ./configure --prefix=/usr/local \
|
||||
&& make -j "$(nproc)" \
|
||||
&& make install-gdb \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
@@ -6,16 +6,22 @@
|
||||
## Minimum Requirements
|
||||
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 2.17.0](https://conan.io/downloads.html)
|
||||
- [Conan 2.20.1](https://conan.io/downloads.html)
|
||||
- [CMake 3.20](https://cmake.org/download/)
|
||||
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html): needed for code coverage generation
|
||||
- [**Optional**] [CCache](https://ccache.dev/): speeds up compilation if you are going to compile Clio often
|
||||
|
||||
We use our Docker image `ghcr.io/XRPLF/clio-ci` to build `Clio`, see [Building Clio with Docker](#building-clio-with-docker).
|
||||
You can find information about exact compiler versions and tools in the [image's README](https://github.com/XRPLF/clio/blob/develop/docker/ci/README.md).
|
||||
|
||||
The following compiler version are guaranteed to work.
|
||||
Any compiler with lower version may not be able to build Clio:
|
||||
|
||||
| Compiler | Version |
|
||||
| ----------- | ------- |
|
||||
| GCC | 12.3 |
|
||||
| Clang | 16 |
|
||||
| Apple Clang | 15 |
|
||||
| GCC | 15.2 |
|
||||
| Clang | 19 |
|
||||
| Apple Clang | 17 |
|
||||
|
||||
### Conan Configuration
|
||||
|
||||
@@ -84,7 +90,7 @@ core.upload:parallel={{os.cpu_count()}}
|
||||
Make sure artifactory is setup with Conan.
|
||||
|
||||
```sh
|
||||
conan remote add --index 0 ripple https://conan.ripplex.io
|
||||
conan remote add --index 0 xrplf https://conan.ripplex.io
|
||||
```
|
||||
|
||||
Now you should be able to download the prebuilt dependencies (including `xrpl` package) on supported platforms.
|
||||
@@ -98,79 +104,100 @@ It is implicitly used when running `conan` commands, you don't need to specify i
|
||||
|
||||
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
|
||||
|
||||
To do that, run the following command in the repository root:
|
||||
> [!NOTE]
|
||||
> Conan uses local cache by default when creating a lockfile.
|
||||
>
|
||||
> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile.
|
||||
|
||||
To create a new lockfile, run the following commands in the repository root:
|
||||
|
||||
```bash
|
||||
conan lock create . -o '&:tests=True' -o '&:benchmark=True'
|
||||
conan remove '*' --confirm
|
||||
rm conan.lock
|
||||
# This ensure that xrplf remote is the first to be consulted
|
||||
conan remote add --force --index 0 xrplf https://conan.ripplex.io
|
||||
conan lock create .
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all <PROFILE>`.
|
||||
|
||||
## Building Clio
|
||||
|
||||
Navigate to Clio's root directory and run:
|
||||
1. Navigate to Clio's root directory and run:
|
||||
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
# You can also specify profile explicitly by adding `--profile:all <PROFILE_NAME>`
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True'
|
||||
# You can also add -GNinja to use Ninja build system instead of Make
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
|
||||
```
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> You can omit the `-o '&:tests=True'` if you don't want to build `clio_tests`.
|
||||
2. Install dependencies through conan.
|
||||
|
||||
If successful, `conan install` will find the required packages and `cmake` will do the rest. You should see `clio_server` and `clio_tests` in the `build` directory (the current directory).
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> To generate a Code Coverage report, include `-o '&:coverage=True'` in the `conan install` command above, along with `-o '&:tests=True'` to enable tests.
|
||||
> After running the `cmake` commands, execute `make clio_tests-ccov`.
|
||||
> The coverage report will be found at `clio_tests-llvm-cov/index.html`.
|
||||
> You can add `--profile:all <PROFILE_NAME>` to choose a specific conan profile.
|
||||
|
||||
<!-- markdownlint-disable-line MD028 -->
|
||||
3. Configure and generate build files with CMake.
|
||||
|
||||
```sh
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
```
|
||||
|
||||
> You can add `-GNinja` to use the Ninja build system (instead of Make).
|
||||
|
||||
4. Now, you can build all targets or specific ones:
|
||||
|
||||
```sh
|
||||
# builds all targets
|
||||
cmake --build . --parallel 8
|
||||
# builds only clio_server target
|
||||
cmake --build . --parallel 8 --target clio_server
|
||||
```
|
||||
|
||||
You should see `clio_server` and `clio_tests` in the current directory.
|
||||
|
||||
> [!NOTE]
|
||||
> If you've built Clio before and the build is now failing, it's likely due to updated dependencies. Try deleting the build folder and then rerunning the Conan and CMake commands mentioned above.
|
||||
|
||||
### CMake options
|
||||
|
||||
There are several CMake options you can use to customize the build:
|
||||
|
||||
| CMake Option | Default | CMake Target | Description |
|
||||
| --------------------- | ------- | -------------------------------------------------------- | ------------------------------------- |
|
||||
| `-Dcoverage` | OFF | `clio_tests-ccov` | Enables code coverage generation |
|
||||
| `-Dtests` | OFF | `clio_tests` | Enables unit tests |
|
||||
| `-Dintegration_tests` | OFF | `clio_integration_tests` | Enables integration tests |
|
||||
| `-Dbenchmark` | OFF | `clio_benchmark` | Enables benchmark executable |
|
||||
| `-Ddocs` | OFF | `docs` | Enables API documentation generation |
|
||||
| `-Dlint` | OFF | See [#clang-tidy](#using-clang-tidy-for-static-analysis) | Enables `clang-tidy` static analysis |
|
||||
| `-Dsan` | N/A | N/A | Enables Sanitizer (asan, tsan, ubsan) |
|
||||
| `-Dpackage` | OFF | N/A | Creates a debian package |
|
||||
|
||||
### Generating API docs for Clio
|
||||
|
||||
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.12.0 on your system.
|
||||
|
||||
To generate the API docs:
|
||||
To generate the API docs, please use CMake option `-Ddocs=ON` as described above and build the `docs` target.
|
||||
|
||||
1. First, include `-o '&:docs=True'` in the conan install command. For example:
|
||||
To view the generated files, go to `build/docs/html`.
|
||||
Open the `index.html` file in your browser to see the documentation pages.
|
||||
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:docs=True'
|
||||
```
|
||||
|
||||
2. Once that has completed successfully, run the `cmake` command and add the `--target docs` option:
|
||||
|
||||
```sh
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 --target docs
|
||||
```
|
||||
|
||||
3. Go to `build/docs/html` to view the generated files.
|
||||
|
||||
Open the `index.html` file in your browser to see the documentation pages.
|
||||
|
||||

|
||||

|
||||
|
||||
## Building Clio with Docker
|
||||
|
||||
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
|
||||
|
||||
```sh
|
||||
docker run -it ghcr.io/xrplf/clio-ci:a446d85297b3006e6d2c4dc7640368f096afecf5
|
||||
docker run -it ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
git clone https://github.com/XRPLF/clio
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True'
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
|
||||
cd clio
|
||||
```
|
||||
|
||||
Follow the same steps in the [Building Clio](#building-clio) section. You can use `--profile:all gcc` or `--profile:all clang` with the `conan install` command to choose the desired compiler.
|
||||
|
||||
## Developing against `rippled` in standalone mode
|
||||
|
||||
If you wish to develop against a `rippled` instance running in standalone mode there are a few quirks of both Clio and `rippled` that you need to keep in mind. You must:
|
||||
@@ -223,10 +250,10 @@ Sometimes, during development, you need to build against a custom version of `li
|
||||
## Using `clang-tidy` for static analysis
|
||||
|
||||
Clang-tidy can be run by CMake when building the project.
|
||||
To achieve this, you just need to provide the option `-o '&:lint=True'` for the `conan install` command:
|
||||
To achieve this, you just need to provide the option `-Dlint=ON` when generating CMake files:
|
||||
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:lint=True' --profile:all clang
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dlint=ON ..
|
||||
```
|
||||
|
||||
By default CMake will try to find `clang-tidy` automatically in your system.
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
This document provides a list of all available Clio configuration properties in detail.
|
||||
|
||||
> [!NOTE]
|
||||
> Dot notation in configuration key names represents nested fields. For example, **database.scylladb** refers to the _scylladb_ field inside the _database_ object. If a key name includes "[]", it indicates that the nested field is an array (e.g., etl_sources.[]).
|
||||
> Dot notation in configuration key names represents nested fields.
|
||||
> For example, **database.scylladb** refers to the _scylladb_ field inside the _database_ object.
|
||||
> If a key name includes "[]", it indicates that the nested field is an array (e.g., etl_sources.[]).
|
||||
|
||||
## Configuration Details
|
||||
|
||||
@@ -87,6 +89,14 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
|
||||
- **Description**: Represents the number of threads that will be used for database operations.
|
||||
|
||||
### database.cassandra.provider
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: `cassandra`
|
||||
- **Constraints**: The value must be one of the following: `cassandra`, `aws_keyspace`.
|
||||
- **Description**: The specific database backend provider we are using.
|
||||
|
||||
### database.cassandra.core_connections_per_host
|
||||
|
||||
- **Required**: True
|
||||
@@ -327,6 +337,22 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
|
||||
- **Description**: Maximum queue size for sending subscription data to clients. This queue buffers data when a client is slow to receive it, ensuring delivery once the client is ready.
|
||||
|
||||
### server.proxy.ips.[]
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: List of proxy ip addresses. When Clio receives a request from proxy it will use `Forwarded` value (if any) as client ip. When this option is used together with `server.proxy.tokens` Clio will identify proxy by ip or by token.
|
||||
|
||||
### server.proxy.tokens.[]
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: None
|
||||
- **Constraints**: None
|
||||
- **Description**: List of tokens in identifying request as a request from proxy. Token should be provided in `X-Proxy-Token` header, e.g. `X-Proxy-Token: <very_secret_token>'. When Clio receives a request from proxy it will use 'Forwarded` value (if any) to get client ip. When this option is used together with 'server.proxy.ips' Clio will identify proxy by ip or by token.
|
||||
|
||||
### prometheus.enabled
|
||||
|
||||
- **Required**: True
|
||||
@@ -415,7 +441,7 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The value must be one of the following: `sync`, `async`, `none`.
|
||||
- **Description**: The strategy used for Cache loading.
|
||||
|
||||
### log_channels.[].channel
|
||||
### log.channels.[].channel
|
||||
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
@@ -423,7 +449,7 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The value must be one of the following: `General`, `WebServer`, `Backend`, `RPC`, `ETL`, `Subscriptions`, `Performance`, `Migration`.
|
||||
- **Description**: The name of the log channel.
|
||||
|
||||
### log_channels.[].log_level
|
||||
### log.channels.[].level
|
||||
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
@@ -431,7 +457,7 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The value must be one of the following: `trace`, `debug`, `info`, `warning`, `error`, `fatal`.
|
||||
- **Description**: The log level for the specific log channel.
|
||||
|
||||
### log_level
|
||||
### log.level
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
@@ -439,15 +465,39 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The value must be one of the following: `trace`, `debug`, `info`, `warning`, `error`, `fatal`.
|
||||
- **Description**: The general logging level of Clio. This level is applied to all log channels that do not have an explicitly defined logging level.
|
||||
|
||||
### log_format
|
||||
### log.format
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: `%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%`
|
||||
- **Default value**: `%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v`
|
||||
- **Constraints**: None
|
||||
- **Description**: The format string for log messages. The format is described here: <https://www.boost.org/doc/libs/1_83_0/libs/log/doc/html/log/tutorial/formatters.html>.
|
||||
- **Description**: The format string for log messages using spdlog format patterns.
|
||||
|
||||
### log_to_console
|
||||
Each of the variables expands like so:
|
||||
|
||||
- `%Y-%m-%d %H:%M:%S.%f`: The full date and time of the log entry with microsecond precision
|
||||
- `%^`: Start color range
|
||||
- `%3!l`: The severity (aka log level) the entry was sent at stripped to 3 characters
|
||||
- `%n`: The logger name (channel) that this log entry was sent to
|
||||
- `%$`: End color range
|
||||
- `%v`: The actual log message
|
||||
|
||||
Some additional variables that might be useful:
|
||||
|
||||
- `%@`: A partial path to the C++ file and the line number in the said file (`src/file/path:linenumber`)
|
||||
- `%t`: The ID of the thread the log entry is written from
|
||||
|
||||
Documentation can be found at: <https://github.com/gabime/spdlog/wiki/Custom-formatting>.
|
||||
|
||||
### log.is_async
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
- **Default value**: `True`
|
||||
- **Constraints**: None
|
||||
- **Description**: Whether spdlog is asynchronous or not.
|
||||
|
||||
### log.enable_console
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: boolean
|
||||
@@ -455,7 +505,7 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: None
|
||||
- **Description**: Enables or disables logging to the console.
|
||||
|
||||
### log_directory
|
||||
### log.directory
|
||||
|
||||
- **Required**: False
|
||||
- **Type**: string
|
||||
@@ -463,7 +513,7 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: None
|
||||
- **Description**: The directory path for the log files.
|
||||
|
||||
### log_rotation_size
|
||||
### log.rotation_size
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
@@ -471,23 +521,15 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
|
||||
- **Description**: The log rotation size in megabytes. When the log file reaches this particular size, a new log file starts.
|
||||
|
||||
### log_directory_max_size
|
||||
### log.directory_max_files
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: `51200`
|
||||
- **Default value**: `25`
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
|
||||
- **Description**: The maximum size of the log directory in megabytes.
|
||||
- **Description**: The maximum number of log files in the directory.
|
||||
|
||||
### log_rotation_hour_interval
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: int
|
||||
- **Default value**: `12`
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
|
||||
- **Description**: Represents the interval (in hours) for log rotation. If the current log file reaches this value in logging, a new log file starts.
|
||||
|
||||
### log_tag_style
|
||||
### log.tag_style
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
|
||||
@@ -88,13 +88,15 @@ Exactly equal password gains admin rights for the request or a websocket connect
|
||||
Clio can cache requests to ETL sources to reduce the load on the ETL source.
|
||||
Only following commands are cached: `server_info`, `server_state`, `server_definitions`, `fee`, `ledger_closed`.
|
||||
By default the forwarding cache is off.
|
||||
To enable the caching for a source, `forwarding_cache_timeout` value should be added to the configuration file, e.g.:
|
||||
To enable the caching for a source, `forwarding.cache_timeout` value should be added to the configuration file, e.g.:
|
||||
|
||||
```json
|
||||
"forwarding_cache_timeout": 0.250,
|
||||
"forwarding": {
|
||||
"cache_timeout": 0.250,
|
||||
}
|
||||
```
|
||||
|
||||
`forwarding_cache_timeout` defines for how long (in seconds) a cache entry will be valid after being placed into the cache.
|
||||
`forwarding.cache_timeout` defines for how long (in seconds) a cache entry will be valid after being placed into the cache.
|
||||
Zero value turns off the cache feature.
|
||||
|
||||
## Graceful shutdown (not fully implemented yet)
|
||||
|
||||
@@ -76,38 +76,60 @@
|
||||
"parallel_requests_limit": 10, // Optional parameter, used only if "processing_strategy" is "parallel". It limits the number of requests for one client connection processed in parallel. Infinite if not specified.
|
||||
// Max number of responses to queue up before sent successfully. If a client's waiting queue is too long, the server will close the connection.
|
||||
"ws_max_sending_queue_size": 1500,
|
||||
"__ng_web_server": false // Use ng web server. This is a temporary setting which will be deleted after switching to ng web server
|
||||
"__ng_web_server": false, // Use ng web server. This is a temporary setting which will be deleted after switching to ng web server
|
||||
"proxy": {
|
||||
"ips": [],
|
||||
"tokens": []
|
||||
}
|
||||
},
|
||||
// Time in seconds for graceful shutdown. Defaults to 10 seconds. Not fully implemented yet.
|
||||
"graceful_period": 10.0,
|
||||
// Overrides log level on a per logging channel.
|
||||
// Defaults to global "log_level" for each unspecified channel.
|
||||
"log_channels": [
|
||||
{
|
||||
"channel": "Backend",
|
||||
"log_level": "fatal"
|
||||
},
|
||||
{
|
||||
"channel": "WebServer",
|
||||
"log_level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "Subscriptions",
|
||||
"log_level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "RPC",
|
||||
"log_level": "error"
|
||||
},
|
||||
{
|
||||
"channel": "ETL",
|
||||
"log_level": "debug"
|
||||
},
|
||||
{
|
||||
"channel": "Performance",
|
||||
"log_level": "trace"
|
||||
}
|
||||
],
|
||||
"log": {
|
||||
// Overrides log level on a per logging channel.
|
||||
// Defaults to global "log.level" for each unspecified channel.
|
||||
"channels": [
|
||||
{
|
||||
"channel": "Backend",
|
||||
"level": "fatal"
|
||||
},
|
||||
{
|
||||
"channel": "WebServer",
|
||||
"level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "Subscriptions",
|
||||
"level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "RPC",
|
||||
"level": "error"
|
||||
},
|
||||
{
|
||||
"channel": "ETL",
|
||||
"level": "debug"
|
||||
},
|
||||
{
|
||||
"channel": "Performance",
|
||||
"level": "trace"
|
||||
}
|
||||
],
|
||||
// The general logging level of Clio. This level is applied to all log channels that do not have an explicitly defined logging level.
|
||||
"level": "info",
|
||||
// Log format using spdlog format patterns (this is the default format)
|
||||
"format": "%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v",
|
||||
// Whether spdlog is asynchronous or not.
|
||||
"is_async": true,
|
||||
// Enables or disables logging to the console.
|
||||
"enable_console": true,
|
||||
// Clio logs to file in the specified directory only if "log.directory" is set
|
||||
// "directory": "./clio_log",
|
||||
// The log rotation size in megabytes. When the log file reaches this particular size, a new log file starts.
|
||||
"rotation_size": 2048,
|
||||
// The maximum number of log files in the directory.
|
||||
"directory_max_files": 25,
|
||||
// Log tags style to use
|
||||
"tag_style": "uint"
|
||||
},
|
||||
"cache": {
|
||||
// Configure this to use either "num_diffs", "num_cursors_from_diff", or "num_cursors_from_account". By default, Clio uses "num_diffs".
|
||||
"num_diffs": 32, // Generate the cursors from the latest ledger diff, then use the cursors to partition the ledger to load concurrently. The cursors number is affected by the busyness of the network.
|
||||
@@ -121,16 +143,6 @@
|
||||
"enabled": true,
|
||||
"compress_reply": true
|
||||
},
|
||||
"log_level": "info",
|
||||
// Log format (this is the default format)
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%",
|
||||
"log_to_console": true,
|
||||
// Clio logs to file in the specified directory only if "log_directory" is set
|
||||
// "log_directory": "./clio_log",
|
||||
"log_rotation_size": 2048,
|
||||
"log_directory_max_size": 51200,
|
||||
"log_rotation_hour_interval": 12,
|
||||
"log_tag_style": "uint",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false,
|
||||
// "start_sequence": [integer] the ledger index to start from,
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
# Logging
|
||||
|
||||
Clio provides several logging options, which all are configurable via the config file. These are detailed in the following sections.
|
||||
|
||||
## `log_level`
|
||||
|
||||
The minimum level of severity at which the log message will be outputted by default. Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `info`.
|
||||
|
||||
## `log_format`
|
||||
|
||||
The format of log lines produced by Clio. Defaults to `"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%"`.
|
||||
|
||||
Each of the variables expands like so:
|
||||
|
||||
- `TimeStamp`: The full date and time of the log entry
|
||||
- `SourceLocation`: A partial path to the c++ file and the line number in said file (`source/file/path:linenumber`)
|
||||
- `ThreadID`: The ID of the thread the log entry is written from
|
||||
- `Channel`: The channel that this log entry was sent to
|
||||
- `Severity`: The severity (aka log level) the entry was sent at
|
||||
- `Message`: The actual log message
|
||||
|
||||
## `log_channels`
|
||||
|
||||
An array of JSON objects, each overriding properties for a logging `channel`.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> At the time of writing, only `log_level` can be overridden using this mechanism.
|
||||
|
||||
Each object is of this format:
|
||||
|
||||
```json
|
||||
{
|
||||
"channel": "Backend",
|
||||
"log_level": "fatal"
|
||||
}
|
||||
```
|
||||
|
||||
If no override is present for a given channel, that channel will log at the severity specified by the global `log_level`.
|
||||
|
||||
The log channels that can be overridden are: `Backend`, `WebServer`, `Subscriptions`, `RPC`, `ETL` and `Performance`.
|
||||
|
||||
> [!NOTE]
|
||||
> See [example-config.json](../docs/examples/config/example-config.json) for more details.
|
||||
|
||||
## `log_to_console`
|
||||
|
||||
Enable or disable log output to console. Options are `true`/`false`. This option defaults to `true`.
|
||||
|
||||
## `log_directory`
|
||||
|
||||
Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it.
|
||||
|
||||
If the option is not specified, the logs are not written to a file.
|
||||
|
||||
## `log_rotation_size`
|
||||
|
||||
The max size of the log file in **megabytes** before it will rotate into a smaller file. Defaults to 2GB.
|
||||
|
||||
## `log_directory_max_size`
|
||||
|
||||
The max size of the log directory in **megabytes** before old log files will be deleted to free up space. Defaults to 50GB.
|
||||
|
||||
## `log_rotation_hour_interval`
|
||||
|
||||
The time interval in **hours** after the last log rotation to automatically rotate the current log file. Defaults to 12 hours.
|
||||
|
||||
> [!NOTE]
|
||||
> Log rotation based on time occurs in conjunction with size-based log rotation. For example, if a size-based log rotation occurs, the timer for the time-based rotation will reset.
|
||||
|
||||
## `log_tag_style`
|
||||
|
||||
Tag implementation to use. Must be one of:
|
||||
|
||||
- `uint`: Lock free and threadsafe but outputs just a simple unsigned integer
|
||||
- `uuid`: Threadsafe and outputs a UUID tag
|
||||
- `none`: Doesn't use tagging at all
|
||||
@@ -5,11 +5,14 @@ import re
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
PATTERN = r'R"JSON\((.*?)\)JSON"'
|
||||
|
||||
|
||||
def use_uppercase(cpp_content: str) -> str:
|
||||
return cpp_content.replace('R"json(', 'R"JSON(').replace(')json"', ')JSON"')
|
||||
|
||||
|
||||
def fix_json_style(cpp_content: str) -> str:
|
||||
cpp_content = cpp_content.replace('R"json(', 'R"JSON(').replace(')json"', ')JSON"')
|
||||
|
||||
pattern = r'R"JSON\((.*?)\)JSON"'
|
||||
|
||||
def replace_json(match):
|
||||
raw_json = match.group(1)
|
||||
|
||||
@@ -29,12 +32,51 @@ def fix_json_style(cpp_content: str) -> str:
|
||||
raw_json = raw_json.replace(f'":{digit}', f'": {digit}')
|
||||
return f'R"JSON({raw_json})JSON"'
|
||||
|
||||
return re.sub(pattern, replace_json, cpp_content, flags=re.DOTALL)
|
||||
return re.sub(PATTERN, replace_json, cpp_content, flags=re.DOTALL)
|
||||
|
||||
|
||||
def fix_colon_spacing(cpp_content: str) -> str:
|
||||
def replace_json(match):
|
||||
raw_json = match.group(1)
|
||||
raw_json = re.sub(r'":\n\s*(\[|\{)', r'": \1', raw_json)
|
||||
return f'R"JSON({raw_json})JSON"'
|
||||
return re.sub(PATTERN, replace_json, cpp_content, flags=re.DOTALL)
|
||||
|
||||
|
||||
def fix_indentation(cpp_content: str) -> str:
|
||||
lines = cpp_content.splitlines()
|
||||
|
||||
def find_indentation(line: str) -> int:
|
||||
return len(line) - len(line.lstrip())
|
||||
|
||||
for (line_num, (line, next_line)) in enumerate(zip(lines[:-1], lines[1:])):
|
||||
if "JSON(" in line and ")JSON" not in line:
|
||||
indent = find_indentation(line)
|
||||
next_indent = find_indentation(next_line)
|
||||
|
||||
by_how_much = next_indent - (indent + 4)
|
||||
if by_how_much != 0:
|
||||
print(
|
||||
f"Indentation error at line: {line_num + 2}: expected {indent + 4} spaces, found {next_indent} spaces"
|
||||
)
|
||||
|
||||
for i in range(line_num + 1, len(lines)):
|
||||
if ")JSON" in lines[i]:
|
||||
lines[i] = " " * indent + lines[i].lstrip()
|
||||
break
|
||||
lines[i] = lines[i][by_how_much:] if by_how_much > 0 else " " * (-by_how_much) + lines[i]
|
||||
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
|
||||
def process_file(file_path: Path, dry_run: bool) -> bool:
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
new_content = fix_json_style(content)
|
||||
|
||||
new_content = content
|
||||
new_content = use_uppercase(new_content)
|
||||
new_content = fix_json_style(new_content)
|
||||
new_content = fix_colon_spacing(new_content)
|
||||
new_content = fix_indentation(new_content)
|
||||
|
||||
if new_content != content:
|
||||
print(f"Processing file: {file_path}")
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include "util/build/Build.hpp"
|
||||
#include "util/config/ConfigDescription.hpp"
|
||||
|
||||
#include <boost/program_options/errors.hpp>
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <boost/program_options/parsers.hpp>
|
||||
#include <boost/program_options/positional_options.hpp>
|
||||
@@ -56,12 +57,22 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
po::positional_options_description positional;
|
||||
positional.add("conf", 1);
|
||||
|
||||
auto const printHelp = [&description]() {
|
||||
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n" << description;
|
||||
};
|
||||
|
||||
po::variables_map parsed;
|
||||
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
|
||||
po::notify(parsed);
|
||||
try {
|
||||
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
|
||||
po::notify(parsed);
|
||||
} catch (po::error const& e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl << std::endl;
|
||||
printHelp();
|
||||
return Action{Action::Exit{EXIT_FAILURE}};
|
||||
}
|
||||
|
||||
if (parsed.contains("help")) {
|
||||
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n" << description;
|
||||
printHelp();
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
}
|
||||
|
||||
|
||||
@@ -178,7 +178,9 @@ ClioApplication::run(bool const useNgWebServer)
|
||||
}
|
||||
auto const adminVerifier = std::move(expectedAdminVerifier).value();
|
||||
|
||||
auto httpServer = web::ng::makeServer(config_, OnConnectCheck{dosGuard}, DisconnectHook{dosGuard}, ioc);
|
||||
auto httpServer = web::ng::makeServer(
|
||||
config_, OnConnectCheck{dosGuard}, IpChangeHook{dosGuard}, DisconnectHook{dosGuard}, ioc
|
||||
);
|
||||
|
||||
if (not httpServer.has_value()) {
|
||||
LOG(util::LogService::error()) << "Error creating web server: " << httpServer.error();
|
||||
|
||||
@@ -108,6 +108,8 @@ public:
|
||||
|
||||
ioc.stop();
|
||||
LOG(util::LogService::info()) << "io_context stopped";
|
||||
|
||||
util::LogService::shutdown();
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
@@ -54,6 +55,17 @@ OnConnectCheck::operator()(web::ng::Connection const& connection)
|
||||
return {};
|
||||
}
|
||||
|
||||
IpChangeHook::IpChangeHook(web::dosguard::DOSGuardInterface& dosguard) : dosguard_(dosguard)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
IpChangeHook::operator()(std::string const& oldIp, std::string const& newIp)
|
||||
{
|
||||
dosguard_.get().decrement(oldIp);
|
||||
dosguard_.get().increment(newIp);
|
||||
}
|
||||
|
||||
DisconnectHook::DisconnectHook(web::dosguard::DOSGuardInterface& dosguard) : dosguard_{dosguard}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
namespace app {
|
||||
@@ -64,6 +65,31 @@ public:
|
||||
operator()(web::ng::Connection const& connection);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object that is called when the IP of a connection changes (usually if proxy detected).
|
||||
* This is used to update the DOS guard.
|
||||
*/
|
||||
class IpChangeHook {
|
||||
std::reference_wrapper<web::dosguard::DOSGuardInterface> dosguard_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new IpChangeHook object.
|
||||
*
|
||||
* @param dosguard The DOS guard to use.
|
||||
*/
|
||||
IpChangeHook(web::dosguard::DOSGuardInterface& dosguard);
|
||||
|
||||
/**
|
||||
* @brief The call of the function object.
|
||||
*
|
||||
* @param oldIp The old IP of the connection.
|
||||
* @param newIp The new IP of the connection.
|
||||
*/
|
||||
void
|
||||
operator()(std::string const& oldIp, std::string const& newIp);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A function object to be called when a connection is disconnected.
|
||||
*/
|
||||
|
||||
@@ -21,11 +21,16 @@
|
||||
|
||||
#include "cluster/ClioNode.hpp"
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Spawn.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/bind_cancellation_slot.hpp>
|
||||
#include <boost/asio/cancellation_type.hpp>
|
||||
#include <boost/asio/error.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/steady_timer.hpp>
|
||||
#include <boost/asio/use_future.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
#include <boost/json/serialize.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
@@ -36,11 +41,16 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <ctime>
|
||||
#include <latch>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
constexpr auto kTOTAL_WORKERS = 2uz; // 1 reading and 1 writing worker (coroutines)
|
||||
} // namespace
|
||||
|
||||
namespace cluster {
|
||||
|
||||
ClusterCommunicationService::ClusterCommunicationService(
|
||||
@@ -51,6 +61,7 @@ ClusterCommunicationService::ClusterCommunicationService(
|
||||
: backend_(std::move(backend))
|
||||
, readInterval_(readInterval)
|
||||
, writeInterval_(writeInterval)
|
||||
, finishedCountdown_(kTOTAL_WORKERS)
|
||||
, selfData_{ClioNode{
|
||||
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()),
|
||||
.updateTime = std::chrono::system_clock::time_point{}
|
||||
@@ -63,22 +74,42 @@ ClusterCommunicationService::ClusterCommunicationService(
|
||||
void
|
||||
ClusterCommunicationService::run()
|
||||
{
|
||||
ASSERT(not running_ and not stopped_, "Can only be ran once");
|
||||
running_ = true;
|
||||
|
||||
util::spawn(strand_, [this](boost::asio::yield_context yield) {
|
||||
boost::asio::steady_timer timer(yield.get_executor());
|
||||
while (true) {
|
||||
boost::system::error_code ec;
|
||||
|
||||
while (running_) {
|
||||
timer.expires_after(readInterval_);
|
||||
timer.async_wait(yield);
|
||||
auto token = cancelSignal_.slot();
|
||||
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
|
||||
|
||||
if (ec == boost::asio::error::operation_aborted or not running_)
|
||||
break;
|
||||
|
||||
doRead(yield);
|
||||
}
|
||||
|
||||
finishedCountdown_.count_down(1);
|
||||
});
|
||||
|
||||
util::spawn(strand_, [this](boost::asio::yield_context yield) {
|
||||
boost::asio::steady_timer timer(yield.get_executor());
|
||||
while (true) {
|
||||
boost::system::error_code ec;
|
||||
|
||||
while (running_) {
|
||||
doWrite();
|
||||
timer.expires_after(writeInterval_);
|
||||
timer.async_wait(yield);
|
||||
auto token = cancelSignal_.slot();
|
||||
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
|
||||
|
||||
if (ec == boost::asio::error::operation_aborted or not running_)
|
||||
break;
|
||||
}
|
||||
|
||||
finishedCountdown_.count_down(1);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -93,9 +124,19 @@ ClusterCommunicationService::stop()
|
||||
if (stopped_)
|
||||
return;
|
||||
|
||||
ctx_.stop();
|
||||
ctx_.join();
|
||||
stopped_ = true;
|
||||
|
||||
// for ASAN to see through concurrency correctly we need to exit all coroutines before joining the ctx
|
||||
running_ = false;
|
||||
|
||||
// cancelSignal_ is not thread safe so we execute emit on the same strand
|
||||
boost::asio::spawn(
|
||||
strand_, [this](auto&&) { cancelSignal_.emit(boost::asio::cancellation_type::all); }, boost::asio::use_future
|
||||
)
|
||||
.wait();
|
||||
finishedCountdown_.wait();
|
||||
|
||||
ctx_.join();
|
||||
}
|
||||
|
||||
std::shared_ptr<boost::uuids::uuid>
|
||||
|
||||
@@ -27,12 +27,15 @@
|
||||
#include "util/prometheus/Gauge.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
|
||||
#include <boost/asio/cancellation_signal.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/asio/thread_pool.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <latch>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
@@ -65,11 +68,14 @@ class ClusterCommunicationService : public ClusterCommunicationServiceInterface
|
||||
std::chrono::steady_clock::duration readInterval_;
|
||||
std::chrono::steady_clock::duration writeInterval_;
|
||||
|
||||
boost::asio::cancellation_signal cancelSignal_;
|
||||
std::latch finishedCountdown_;
|
||||
std::atomic_bool running_ = false;
|
||||
bool stopped_ = false;
|
||||
|
||||
ClioNode selfData_;
|
||||
std::vector<ClioNode> otherNodesData_;
|
||||
|
||||
bool stopped_ = false;
|
||||
|
||||
public:
|
||||
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{2100};
|
||||
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1200};
|
||||
|
||||
@@ -68,7 +68,6 @@ struct Amendments {
|
||||
|
||||
/** @cond */
|
||||
// NOLINTBEGIN(readability-identifier-naming)
|
||||
REGISTER(OwnerPaysFee);
|
||||
REGISTER(Flow);
|
||||
REGISTER(FlowCross);
|
||||
REGISTER(fix1513);
|
||||
@@ -145,6 +144,9 @@ struct Amendments {
|
||||
REGISTER(TokenEscrow);
|
||||
REGISTER(fixAMMv1_3);
|
||||
REGISTER(fixEnforceNFTokenTrustlineV2);
|
||||
REGISTER(fixAMMClawbackRounding);
|
||||
REGISTER(fixMPTDeliveredAmount);
|
||||
REGISTER(fixPriceOracleOrder);
|
||||
|
||||
// Obsolete but supported by libxrpl
|
||||
REGISTER(CryptoConditionsSuite);
|
||||
@@ -153,6 +155,7 @@ struct Amendments {
|
||||
REGISTER(fixNFTokenNegOffer);
|
||||
|
||||
// Retired amendments
|
||||
REGISTER(OwnerPaysFee); // Removed in xrpl 2.6.0 (https://github.com/XRPLF/rippled/pull/5435)
|
||||
REGISTER(MultiSign);
|
||||
REGISTER(TrustSetAuth);
|
||||
REGISTER(FeeEscalation);
|
||||
|
||||
@@ -43,11 +43,6 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
util::Logger gLog{"Backend"};
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* @brief This namespace implements the data access layer and related components.
|
||||
*
|
||||
@@ -58,10 +53,10 @@ namespace data {
|
||||
bool
|
||||
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
{
|
||||
LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence;
|
||||
LOG(log_.debug()) << "Want finish writes for " << ledgerSequence;
|
||||
auto commitRes = doFinishWrites();
|
||||
if (commitRes) {
|
||||
LOG(gLog.debug()) << "Successfully committed. Updating range now to " << ledgerSequence;
|
||||
LOG(log_.debug()) << "Successfully committed. Updating range now to " << ledgerSequence;
|
||||
updateRange(ledgerSequence);
|
||||
}
|
||||
return commitRes;
|
||||
@@ -89,15 +84,15 @@ BackendInterface::fetchLedgerObject(
|
||||
{
|
||||
auto obj = cache_.get().get(key, sequence);
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
LOG(log_.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return obj;
|
||||
}
|
||||
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj) {
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
LOG(log_.trace()) << "Missed cache and missed in db";
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Missed cache but found in db";
|
||||
LOG(log_.trace()) << "Missed cache but found in db";
|
||||
}
|
||||
return dbObj;
|
||||
}
|
||||
@@ -111,7 +106,7 @@ BackendInterface::fetchLedgerObjectSeq(
|
||||
{
|
||||
auto seq = doFetchLedgerObjectSeq(key, sequence, yield);
|
||||
if (!seq)
|
||||
LOG(gLog.trace()) << "Missed in db";
|
||||
LOG(log_.trace()) << "Missed in db";
|
||||
return seq;
|
||||
}
|
||||
|
||||
@@ -133,7 +128,7 @@ BackendInterface::fetchLedgerObjects(
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
}
|
||||
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
LOG(log_.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
|
||||
if (!misses.empty()) {
|
||||
auto objs = doFetchLedgerObjects(misses, sequence, yield);
|
||||
@@ -158,9 +153,9 @@ BackendInterface::fetchSuccessorKey(
|
||||
{
|
||||
auto succ = cache_.get().getSuccessor(key, ledgerSequence);
|
||||
if (succ) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
LOG(log_.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
LOG(log_.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
}
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
@@ -210,7 +205,7 @@ BackendInterface::fetchBookOffers(
|
||||
numSucc++;
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd) {
|
||||
LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
LOG(log_.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
@@ -223,7 +218,7 @@ BackendInterface::fetchBookOffers(
|
||||
keys.insert(keys.end(), indexes.begin(), indexes.end());
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (next == 0u) {
|
||||
LOG(gLog.trace()) << "Next is empty. breaking";
|
||||
LOG(log_.trace()) << "Next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||
@@ -238,13 +233,13 @@ BackendInterface::fetchBookOffers(
|
||||
auto mid = std::chrono::system_clock::now();
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i) {
|
||||
LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
LOG(log_.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
ASSERT(!objs[i].empty(), "Ledger object can't be empty");
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
LOG(gLog.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
LOG(log_.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseconds. Fetched next dir " << std::to_string(numSucc)
|
||||
<< " times"
|
||||
@@ -275,14 +270,17 @@ BackendInterface::updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
|
||||
ASSERT(
|
||||
!range_ || newMax >= range_->maxSequence,
|
||||
"Range shouldn't exist yet or newMax should be greater. newMax = {}, range->maxSequence = {}",
|
||||
newMax,
|
||||
range_->maxSequence
|
||||
);
|
||||
if (range_.has_value() && newMax < range_->maxSequence) {
|
||||
ASSERT(
|
||||
false,
|
||||
"Range shouldn't exist yet or newMax should be at least range->maxSequence. newMax = {}, "
|
||||
"range->maxSequence = {}",
|
||||
newMax,
|
||||
range_->maxSequence
|
||||
);
|
||||
}
|
||||
|
||||
if (!range_) {
|
||||
if (!range_.has_value()) {
|
||||
range_ = {.minSequence = newMax, .maxSequence = newMax};
|
||||
} else {
|
||||
range_->maxSequence = newMax;
|
||||
@@ -338,13 +336,13 @@ BackendInterface::fetchLedgerPage(
|
||||
if (!objects[i].empty()) {
|
||||
page.objects.push_back({keys[i], std::move(objects[i])});
|
||||
} else if (!outOfOrder) {
|
||||
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
LOG(log_.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
<< " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j) {
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
LOG(gLog.error()) << msg.str();
|
||||
LOG(log_.error()) << msg.str();
|
||||
|
||||
if (corruptionDetector_.has_value())
|
||||
corruptionDetector_->onCorruptionDetected();
|
||||
@@ -365,7 +363,7 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
|
||||
auto bytes = fetchLedgerObject(key, seq, yield);
|
||||
|
||||
if (!bytes) {
|
||||
LOG(gLog.error()) << "Could not find fees";
|
||||
LOG(log_.error()) << "Could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
@@ -138,6 +138,7 @@ synchronousAndRetryOnTimeout(FnType&& func)
|
||||
*/
|
||||
class BackendInterface {
|
||||
protected:
|
||||
util::Logger log_{"Backend"};
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range_;
|
||||
std::reference_wrapper<LedgerCacheInterface> cache_;
|
||||
|
||||
@@ -225,8 +225,11 @@ public:
|
||||
{
|
||||
waitForWritesToFinish();
|
||||
|
||||
if (!range_) {
|
||||
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
|
||||
// !range_.has_value() means the table 'ledger_range' is not populated; This would be the first write to the
|
||||
// table In this case, insert both min_sequence/max_sequence range into the table
|
||||
if (!range_.has_value()) {
|
||||
executor_.writeSync(schema_->insertLedgerRange, false, ledgerSequence_);
|
||||
executor_.writeSync(schema_->insertLedgerRange, true, ledgerSequence_);
|
||||
}
|
||||
|
||||
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
|
||||
@@ -513,80 +516,14 @@ public:
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
NFTsAndCursor ret;
|
||||
|
||||
Statement const idQueryStatement = [&taxon, &issuer, &cursorIn, &limit, this]() {
|
||||
if (taxon.has_value()) {
|
||||
auto r = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
r.bindAt(1, *taxon);
|
||||
r.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
|
||||
r.bindAt(3, Limit{limit});
|
||||
return r;
|
||||
}
|
||||
|
||||
auto r = schema_->selectNFTIDsByIssuer.bind(issuer);
|
||||
r.bindAt(
|
||||
1,
|
||||
std::make_tuple(
|
||||
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
|
||||
cursorIn.value_or(ripple::uint256(0))
|
||||
)
|
||||
);
|
||||
r.bindAt(2, Limit{limit});
|
||||
return r;
|
||||
}();
|
||||
|
||||
// Query for all the NFTs issued by the account, potentially filtered by the taxon
|
||||
auto const res = executor_.read(yield, idQueryStatement);
|
||||
|
||||
auto const& idQueryResults = res.value();
|
||||
if (not idQueryResults.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
for (auto const [nftID] : extract<ripple::uint256>(idQueryResults))
|
||||
nftIDs.push_back(nftID);
|
||||
|
||||
if (nftIDs.empty())
|
||||
return ret;
|
||||
|
||||
if (nftIDs.size() == limit)
|
||||
ret.cursor = nftIDs.back();
|
||||
|
||||
std::vector<Statement> selectNFTStatements;
|
||||
selectNFTStatements.reserve(nftIDs.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
|
||||
return schema_->selectNFT.bind(nftID, ledgerSequence);
|
||||
}
|
||||
);
|
||||
|
||||
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
|
||||
|
||||
std::vector<Statement> selectNFTURIStatements;
|
||||
selectNFTURIStatements.reserve(nftIDs.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
|
||||
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
|
||||
}
|
||||
);
|
||||
|
||||
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
|
||||
|
||||
for (auto i = 0u; i < nftIDs.size(); i++) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
if (taxon.has_value()) {
|
||||
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
|
||||
} else {
|
||||
nftIDs = fetchNFTIDsWithoutTaxon(issuer, limit, cursorIn, yield);
|
||||
}
|
||||
return ret;
|
||||
|
||||
return populateNFTsAndCreateCursor(nftIDs, ledgerSequence, limit, yield);
|
||||
}
|
||||
|
||||
MPTHoldersAndCursor
|
||||
@@ -803,8 +740,9 @@ public:
|
||||
std::optional<ripple::AccountID> lastItem;
|
||||
|
||||
while (liveAccounts.size() < number) {
|
||||
Statement const statement = lastItem ? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize})
|
||||
: schema_->selectAccountFromBeginning.bind(Limit{pageSize});
|
||||
Statement const statement = lastItem
|
||||
? schema_->selectAccountFromTokenScylla->bind(*lastItem, Limit{pageSize})
|
||||
: schema_->selectAccountFromBeginningScylla->bind(Limit{pageSize});
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
if (res) {
|
||||
@@ -1116,6 +1054,139 @@ private:
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchNFTIDsByTaxon(
|
||||
ripple::AccountID const& issuer,
|
||||
std::uint32_t const taxon,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
Statement statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
statement.bindAt(1, taxon);
|
||||
statement.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
|
||||
statement.bindAt(3, Limit{limit});
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
if (res && res.value().hasRows()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(res.value()))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
return nftIDs;
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchNFTIDsWithoutTaxon(
|
||||
ripple::AccountID const& issuer,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
if (settingsProvider_.getSettings().provider == "aws_keyspace") {
|
||||
// --- Amazon Keyspaces Workflow ---
|
||||
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
|
||||
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
|
||||
|
||||
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
firstQuery.bindAt(1, startTaxon);
|
||||
firstQuery.bindAt(2, startTokenID);
|
||||
firstQuery.bindAt(3, Limit{limit});
|
||||
|
||||
auto const firstRes = executor_.read(yield, firstQuery);
|
||||
if (firstRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
|
||||
if (nftIDs.size() < limit) {
|
||||
auto const remainingLimit = limit - nftIDs.size();
|
||||
Statement secondQuery = schema_->selectNFTsAfterTaxonKeyspaces->bind(issuer);
|
||||
secondQuery.bindAt(1, startTaxon);
|
||||
secondQuery.bindAt(2, Limit{remainingLimit});
|
||||
|
||||
auto const secondRes = executor_.read(yield, secondQuery);
|
||||
if (secondRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
}
|
||||
} else if (settingsProvider_.getSettings().provider == "scylladb") {
|
||||
auto r = schema_->selectNFTsByIssuerScylla->bind(issuer);
|
||||
r.bindAt(
|
||||
1,
|
||||
std::make_tuple(
|
||||
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
|
||||
cursorIn.value_or(ripple::uint256(0))
|
||||
)
|
||||
);
|
||||
r.bindAt(2, Limit{limit});
|
||||
|
||||
auto const res = executor_.read(yield, r);
|
||||
if (res && res.value().hasRows()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(res.value()))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
}
|
||||
return nftIDs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Takes a list of NFT IDs, fetches their full data, and assembles the final result with a cursor.
|
||||
*/
|
||||
NFTsAndCursor
|
||||
populateNFTsAndCreateCursor(
|
||||
std::vector<ripple::uint256> const& nftIDs,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
if (nftIDs.empty()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
NFTsAndCursor ret;
|
||||
if (nftIDs.size() == limit)
|
||||
ret.cursor = nftIDs.back();
|
||||
|
||||
// Prepare and execute queries to fetch NFT info and URIs in parallel.
|
||||
std::vector<Statement> selectNFTStatements;
|
||||
selectNFTStatements.reserve(nftIDs.size());
|
||||
std::transform(
|
||||
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
|
||||
return schema_->selectNFT.bind(nftID, ledgerSequence);
|
||||
}
|
||||
);
|
||||
|
||||
std::vector<Statement> selectNFTURIStatements;
|
||||
selectNFTURIStatements.reserve(nftIDs.size());
|
||||
std::transform(
|
||||
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
|
||||
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
|
||||
}
|
||||
);
|
||||
|
||||
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
|
||||
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
|
||||
|
||||
// Combine the results into final NFT objects.
|
||||
for (auto i = 0u; i < nftIDs.size(); ++i) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
using CassandraBackend = BasicCassandraBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
@@ -347,6 +348,86 @@ public:
|
||||
Statements(SettingsProviderType const& settingsProvider, Handle const& handle)
|
||||
: settingsProvider_{settingsProvider}, handle_{std::cref(handle)}
|
||||
{
|
||||
// initialize scylladb supported queries
|
||||
if (settingsProvider_.get().getSettings().provider == "scylladb") {
|
||||
selectAccountFromBeginningScylla = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > 0
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
selectAccountFromTokenScylla = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > token(?)
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
selectNFTsByIssuerScylla = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND (taxon, token_id) > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
updateLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = ?
|
||||
IF sequence IN (?, null)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
// AWS_keyspace supported queries
|
||||
} else if (settingsProvider_.get().getSettings().provider == "aws_keyspace") {
|
||||
selectNFTsAfterTaxonKeyspaces = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND taxon > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
)
|
||||
);
|
||||
}();
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
@@ -526,6 +607,17 @@ public:
|
||||
// Update (and "delete") queries
|
||||
//
|
||||
|
||||
PreparedStatement insertLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
INSERT INTO {} (is_latest, sequence) VALUES (?, ?) IF NOT EXISTS
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement updateLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
@@ -533,7 +625,7 @@ public:
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = ?
|
||||
IF sequence IN (?, null)
|
||||
IF sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
)
|
||||
@@ -654,6 +746,10 @@ public:
|
||||
);
|
||||
}();
|
||||
|
||||
/*
|
||||
Currently, these two SELECT statements is not used.
|
||||
If we ever use them, will need to change the PER PARTITION LIMIT to support for Keyspace
|
||||
|
||||
PreparedStatement selectLedgerPageKeys = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
@@ -687,6 +783,7 @@ public:
|
||||
)
|
||||
);
|
||||
}();
|
||||
*/
|
||||
|
||||
PreparedStatement getToken = [this]() {
|
||||
return handle_.get().prepare(
|
||||
@@ -717,36 +814,6 @@ public:
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountFromBeginning = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > 0
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountFromToken = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > token(?)
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTxForward = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
@@ -827,22 +894,6 @@ public:
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuer = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND (taxon, token_id) > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
@@ -953,6 +1004,15 @@ public:
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
// For ScyllaDB / Cassandra ONLY
|
||||
std::optional<PreparedStatement> selectAccountFromBeginningScylla;
|
||||
std::optional<PreparedStatement> selectAccountFromTokenScylla;
|
||||
std::optional<PreparedStatement> selectNFTsByIssuerScylla;
|
||||
|
||||
// For AWS Keyspaces ONLY
|
||||
// NOTE: AWS keyspace is not able to load cache with accounts
|
||||
std::optional<PreparedStatement> selectNFTsAfterTaxonKeyspaces;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -97,6 +97,7 @@ SettingsProvider::parseSettings() const
|
||||
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
|
||||
settings.provider = config_.get<std::string>("provider");
|
||||
|
||||
if (config_.getValueView("connect_timeout").hasValue()) {
|
||||
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");
|
||||
|
||||
@@ -36,9 +36,18 @@ constexpr auto kBATCH_DELETER = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
|
||||
/*
|
||||
* There are 2 main batches of Cassandra Statements:
|
||||
* LOGGED: Ensures all updates in the batch succeed together, or none do.
|
||||
* Use this for critical, related changes (e.g., for the same user), but it is slower.
|
||||
*
|
||||
* UNLOGGED: For performance. Sends many separate updates in one network trip to be fast.
|
||||
* Use this for bulk-loading unrelated data, but know there's NO all-or-nothing guarantee.
|
||||
*
|
||||
* More info here: https://docs.datastax.com/en/developer/cpp-driver-dse/1.10/features/basics/batches/index.html
|
||||
*/
|
||||
Batch::Batch(std::vector<Statement> const& statements)
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), kBATCH_DELETER}
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_UNLOGGED), kBATCH_DELETER}
|
||||
{
|
||||
cass_batch_set_is_idempotent(*this, cass_true);
|
||||
|
||||
|
||||
@@ -60,6 +60,13 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
|
||||
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
// TODO: AWS keyspace reads should be local_one to save cost
|
||||
if (settings.provider == "aws_keyspace") {
|
||||
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting cassandra consistency: {}", cass_error_desc(rc)));
|
||||
}
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
|
||||
rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));
|
||||
|
||||
@@ -45,6 +45,7 @@ struct Settings {
|
||||
static constexpr uint32_t kDEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
static constexpr std::size_t kDEFAULT_BATCH_SIZE = 20;
|
||||
static constexpr std::string kDEFAULT_PROVIDER = "cassandra";
|
||||
|
||||
/**
|
||||
* @brief Represents the configuration of contact points for cassandra.
|
||||
@@ -83,11 +84,14 @@ struct Settings {
|
||||
uint32_t maxReadRequestsOutstanding = kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The number of connection per host to always have active */
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
uint32_t coreConnectionsPerHost = 3u;
|
||||
|
||||
/** @brief Size of batches when writing */
|
||||
std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE;
|
||||
|
||||
/** @brief Provider to know if we are using scylladb or keyspace */
|
||||
std::string provider = kDEFAULT_PROVIDER;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
|
||||
@@ -58,14 +58,14 @@ public:
|
||||
explicit Statement(std::string_view query, Args&&... args)
|
||||
: ManagedObject{cass_statement_new_n(query.data(), query.size(), sizeof...(args)), kDELETER}
|
||||
{
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
bind<Args...>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/* implicit */ Statement(CassStatement* ptr) : ManagedObject{ptr, kDELETER}
|
||||
{
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
}
|
||||
|
||||
|
||||
@@ -305,6 +305,8 @@ ETLService::startLoading(uint32_t seq)
|
||||
{
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
taskMan_ = taskManagerProvider_->make(ctx_, *monitor_, seq, finishSequence_);
|
||||
|
||||
// FIXME: this legacy name "extractor_threads" is no longer accurate (we have coroutines now)
|
||||
taskMan_->run(config_.get().get<std::size_t>("extractor_threads"));
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "etlng/SchedulerInterface.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "etlng/impl/TaskQueue.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
@@ -70,11 +71,10 @@ TaskManager::~TaskManager()
|
||||
void
|
||||
TaskManager::run(std::size_t numExtractors)
|
||||
{
|
||||
LOG(log_.debug()) << "Starting task manager with " << numExtractors << " extractors...\n";
|
||||
ASSERT(not running_, "TaskManager can only be started once");
|
||||
running_ = true;
|
||||
|
||||
stop();
|
||||
extractors_.clear();
|
||||
loaders_.clear();
|
||||
LOG(log_.debug()) << "Starting task manager with " << numExtractors << " extractors...\n";
|
||||
|
||||
extractors_.reserve(numExtractors);
|
||||
for ([[maybe_unused]] auto _ : std::views::iota(0uz, numExtractors))
|
||||
@@ -157,7 +157,8 @@ TaskManager::spawnLoader(TaskQueue& queue)
|
||||
monitor_.get().notifySequenceLoaded(data->seq);
|
||||
} else {
|
||||
// TODO (https://github.com/XRPLF/clio/issues/1852) this is probably better done with a timeout (on
|
||||
// coroutine) so that the thread itself is not blocked
|
||||
// coroutine) so that the thread itself is not blocked. for now this implies that the context
|
||||
// (io_threads) needs at least 2 threads
|
||||
queue.awaitTask();
|
||||
}
|
||||
}
|
||||
@@ -178,6 +179,8 @@ TaskManager::wait()
|
||||
void
|
||||
TaskManager::stop()
|
||||
{
|
||||
ASSERT(running_, "TaskManager is not running");
|
||||
|
||||
for (auto& extractor : extractors_)
|
||||
extractor.abort();
|
||||
for (auto& loader : loaders_)
|
||||
|
||||
@@ -56,6 +56,7 @@ class TaskManager : public TaskManagerInterface {
|
||||
std::vector<util::async::AnyOperation<void>> extractors_;
|
||||
std::vector<util::async::AnyOperation<void>> loaders_;
|
||||
|
||||
std::atomic_bool running_ = false;
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
|
||||
@@ -96,7 +96,7 @@ SubscriptionManager::forwardProposedTransaction(boost::json::object const& recei
|
||||
boost::json::object
|
||||
SubscriptionManager::subLedger(boost::asio::yield_context yield, SubscriberSharedPtr const& subscriber)
|
||||
{
|
||||
return ledgerFeed_.sub(yield, backend_, subscriber);
|
||||
return ledgerFeed_.sub(yield, backend_, subscriber, networkID_);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -113,7 +113,7 @@ SubscriptionManager::pubLedger(
|
||||
std::uint32_t const txnCount
|
||||
)
|
||||
{
|
||||
ledgerFeed_.pub(lgrInfo, fees, ledgerRange, txnCount);
|
||||
ledgerFeed_.pub(lgrInfo, fees, ledgerRange, txnCount, networkID_);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -44,7 +44,8 @@ LedgerFeed::makeLedgerPubMessage(
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t const txnCount
|
||||
uint32_t const txnCount,
|
||||
uint32_t const networkID
|
||||
)
|
||||
{
|
||||
boost::json::object pubMsg;
|
||||
@@ -57,6 +58,7 @@ LedgerFeed::makeLedgerPubMessage(
|
||||
pubMsg["reserve_inc"] = rpc::toBoostJson(fees.increment.jsonClipped());
|
||||
pubMsg["validated_ledgers"] = ledgerRange;
|
||||
pubMsg["txn_count"] = txnCount;
|
||||
pubMsg["network_id"] = networkID;
|
||||
return pubMsg;
|
||||
}
|
||||
|
||||
@@ -64,7 +66,8 @@ boost::json::object
|
||||
LedgerFeed::sub(
|
||||
boost::asio::yield_context yield,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
SubscriberSharedPtr const& subscriber
|
||||
SubscriberSharedPtr const& subscriber,
|
||||
uint32_t const networkID
|
||||
)
|
||||
{
|
||||
SingleFeedBase::sub(subscriber);
|
||||
@@ -81,7 +84,7 @@ LedgerFeed::sub(
|
||||
|
||||
auto const range = std::to_string(ledgerRange->minSequence) + "-" + std::to_string(ledgerRange->maxSequence);
|
||||
|
||||
auto pubMsg = makeLedgerPubMessage(*lgrInfo, *fees, range, 0);
|
||||
auto pubMsg = makeLedgerPubMessage(*lgrInfo, *fees, range, 0, networkID);
|
||||
pubMsg.erase("txn_count");
|
||||
pubMsg.erase("type");
|
||||
|
||||
@@ -93,9 +96,10 @@ LedgerFeed::pub(
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t const txnCount
|
||||
uint32_t const txnCount,
|
||||
uint32_t const networkID
|
||||
)
|
||||
{
|
||||
SingleFeedBase::pub(boost::json::serialize(makeLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount)));
|
||||
SingleFeedBase::pub(boost::json::serialize(makeLedgerPubMessage(lgrInfo, fees, ledgerRange, txnCount, networkID)));
|
||||
}
|
||||
} // namespace feed::impl
|
||||
|
||||
@@ -41,7 +41,8 @@ namespace feed::impl {
|
||||
* @brief Feed that publishes the ledger info.
|
||||
* Example : {'type': 'ledgerClosed', 'ledger_index': 2647935, 'ledger_hash':
|
||||
* '5D022718CD782A82EE10D2147FD90B5F42F26A7E937C870B4FE3CF1086C916AE', 'ledger_time': 756395681, 'fee_base': 10,
|
||||
* 'reserve_base': 10000000, 'reserve_inc': 2000000, 'validated_ledgers': '2619127-2647935', 'txn_count': 0}
|
||||
* 'reserve_base': 10000000, 'reserve_inc': 2000000, 'validated_ledgers': '2619127-2647935', 'txn_count': 0,
|
||||
* 'network_id': 1}
|
||||
*/
|
||||
class LedgerFeed : public SingleFeedBase {
|
||||
public:
|
||||
@@ -57,13 +58,15 @@ public:
|
||||
* @brief Subscribe the ledger feed.
|
||||
* @param yield The coroutine yield.
|
||||
* @param backend The backend.
|
||||
* @param subscriber
|
||||
* @param subscriber The subscriber.
|
||||
* @param networkID The network ID.
|
||||
* @return The information of the latest ledger.
|
||||
*/
|
||||
boost::json::object
|
||||
sub(boost::asio::yield_context yield,
|
||||
std::shared_ptr<data::BackendInterface const> const& backend,
|
||||
SubscriberSharedPtr const& subscriber);
|
||||
SubscriberSharedPtr const& subscriber,
|
||||
uint32_t networkID);
|
||||
|
||||
/**
|
||||
* @brief Publishes the ledger feed.
|
||||
@@ -71,12 +74,14 @@ public:
|
||||
* @param fees The fees.
|
||||
* @param ledgerRange The ledger range.
|
||||
* @param txnCount The transaction count.
|
||||
* @param networkID The network ID.
|
||||
*/
|
||||
void
|
||||
pub(ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount);
|
||||
uint32_t txnCount,
|
||||
uint32_t networkID);
|
||||
|
||||
private:
|
||||
static boost::json::object
|
||||
@@ -84,7 +89,8 @@ private:
|
||||
ripple::LedgerHeader const& lgrInfo,
|
||||
ripple::Fees const& fees,
|
||||
std::string const& ledgerRange,
|
||||
std::uint32_t txnCount
|
||||
uint32_t txnCount,
|
||||
uint32_t networkID
|
||||
);
|
||||
};
|
||||
} // namespace feed::impl
|
||||
|
||||
@@ -33,10 +33,12 @@
|
||||
#include <boost/json/serialize.hpp>
|
||||
#include <xrpl/basics/chrono.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/json/json_value.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/Book.h>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/NFTSyntheticSerializer.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STObject.h>
|
||||
#include <xrpl/protocol/TER.h>
|
||||
@@ -204,8 +206,18 @@ TransactionFeed::pub(
|
||||
pubObj[txKey] = rpc::toJson(*tx);
|
||||
pubObj[JS(meta)] = rpc::toJson(*meta);
|
||||
rpc::insertDeliveredAmount(pubObj[JS(meta)].as_object(), tx, meta, txMeta.date);
|
||||
rpc::insertDeliverMaxAlias(pubObj[txKey].as_object(), version);
|
||||
rpc::insertMPTIssuanceID(pubObj[JS(meta)].as_object(), tx, meta);
|
||||
|
||||
auto& txnPubobj = pubObj[txKey].as_object();
|
||||
rpc::insertDeliverMaxAlias(txnPubobj, version);
|
||||
|
||||
Json::Value nftJson;
|
||||
ripple::RPC::insertNFTSyntheticInJson(nftJson, tx, *meta);
|
||||
auto const nftBoostJson = rpc::toBoostJson(nftJson).as_object();
|
||||
if (nftBoostJson.contains(JS(meta)) && nftBoostJson.at(JS(meta)).is_object()) {
|
||||
auto& metaObjInPub = pubObj.at(JS(meta)).as_object();
|
||||
for (auto const& [k, v] : nftBoostJson.at(JS(meta)).as_object())
|
||||
metaObjInPub.insert_or_assign(k, v);
|
||||
}
|
||||
|
||||
auto const& metaObj = pubObj[JS(meta)];
|
||||
ASSERT(metaObj.is_object(), "meta must be an obj in rippled and clio");
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "app/VerifyConfig.hpp"
|
||||
#include "migration/MigrationApplication.hpp"
|
||||
#include "rpc/common/impl/HandlerProvider.hpp"
|
||||
#include "util/ScopeGuard.hpp"
|
||||
#include "util/TerminationHandler.hpp"
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -33,18 +34,16 @@
|
||||
|
||||
using namespace util::config;
|
||||
|
||||
[[nodiscard]]
|
||||
int
|
||||
main(int argc, char const* argv[])
|
||||
try {
|
||||
util::setTerminationHandler();
|
||||
|
||||
runApp(int argc, char const* argv[])
|
||||
{
|
||||
auto const action = app::CliArgs::parse(argc, argv);
|
||||
return action.apply(
|
||||
[](app::CliArgs::Action::Exit const& exit) { return exit.exitCode; },
|
||||
[](app::CliArgs::Action::VerifyConfig const& verify) {
|
||||
if (app::parseConfig(verify.configPath)) {
|
||||
std::cout << "Config " << verify.configPath << " is correct"
|
||||
<< "\n";
|
||||
std::cout << "Config " << verify.configPath << " is correct" << "\n";
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
return EXIT_FAILURE;
|
||||
@@ -74,10 +73,22 @@ try {
|
||||
return migrator.run();
|
||||
}
|
||||
);
|
||||
} catch (std::exception const& e) {
|
||||
LOG(util::LogService::fatal()) << "Exit on exception: " << e.what();
|
||||
return EXIT_FAILURE;
|
||||
} catch (...) {
|
||||
LOG(util::LogService::fatal()) << "Exit on exception: unknown";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char const* argv[])
|
||||
{
|
||||
util::setTerminationHandler();
|
||||
|
||||
util::ScopeGuard const loggerShutdownGuard{[] { util::LogService::shutdown(); }};
|
||||
|
||||
try {
|
||||
return runApp(argc, argv);
|
||||
} catch (std::exception const& e) {
|
||||
LOG(util::LogService::fatal()) << "Exit on exception: " << e.what();
|
||||
return EXIT_FAILURE;
|
||||
} catch (...) {
|
||||
LOG(util::LogService::fatal()) << "Exit on exception: unknown";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,11 +101,6 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
util::Logger gLog{"RPC"};
|
||||
} // namespace
|
||||
|
||||
namespace rpc {
|
||||
|
||||
std::optional<AccountCursor>
|
||||
@@ -208,6 +203,8 @@ accountFromStringStrict(std::string const& account)
|
||||
std::pair<std::shared_ptr<ripple::STTx const>, std::shared_ptr<ripple::STObject const>>
|
||||
deserializeTxPlusMeta(data::TransactionAndMetadata const& blobs)
|
||||
{
|
||||
static util::Logger const log{"RPC"}; // NOLINT(readability-identifier-naming)
|
||||
|
||||
try {
|
||||
std::pair<std::shared_ptr<ripple::STTx const>, std::shared_ptr<ripple::STObject const>> result;
|
||||
{
|
||||
@@ -224,9 +221,9 @@ deserializeTxPlusMeta(data::TransactionAndMetadata const& blobs)
|
||||
std::stringstream meta;
|
||||
std::ranges::copy(blobs.transaction, std::ostream_iterator<unsigned char>(txn));
|
||||
std::ranges::copy(blobs.metadata, std::ostream_iterator<unsigned char>(meta));
|
||||
LOG(gLog.error()) << "Failed to deserialize transaction. txn = " << txn.str() << " - meta = " << meta.str()
|
||||
<< " txn length = " << std::to_string(blobs.transaction.size())
|
||||
<< " meta length = " << std::to_string(blobs.metadata.size());
|
||||
LOG(log.error()) << "Failed to deserialize transaction. txn = " << txn.str() << " - meta = " << meta.str()
|
||||
<< " txn length = " << std::to_string(blobs.transaction.size())
|
||||
<< " meta length = " << std::to_string(blobs.metadata.size());
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@@ -262,11 +259,10 @@ toExpandedJson(
|
||||
auto metaJson = toJson(*meta);
|
||||
insertDeliveredAmount(metaJson, txn, meta, blobs.date);
|
||||
insertDeliverMaxAlias(txnJson, apiVersion);
|
||||
insertMPTIssuanceID(metaJson, txn, meta);
|
||||
|
||||
if (nftEnabled == NFTokenjson::ENABLE) {
|
||||
Json::Value nftJson;
|
||||
ripple::insertNFTSyntheticInJson(nftJson, txn, *meta);
|
||||
ripple::RPC::insertNFTSyntheticInJson(nftJson, txn, *meta);
|
||||
// if there is no nft fields, the nftJson will be {"meta":null}
|
||||
auto const nftBoostJson = toBoostJson(nftJson).as_object();
|
||||
if (nftBoostJson.contains(JS(meta)) and nftBoostJson.at(JS(meta)).is_object()) {
|
||||
@@ -321,67 +317,6 @@ insertDeliveredAmount(
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the delivered amount
|
||||
*
|
||||
* @param meta The metadata
|
||||
* @return The mpt_issuance_id or std::nullopt if not available
|
||||
*/
|
||||
static std::optional<ripple::uint192>
|
||||
getMPTIssuanceID(std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
{
|
||||
ripple::TxMeta const& transactionMeta = *meta;
|
||||
|
||||
for (ripple::STObject const& node : transactionMeta.getNodes()) {
|
||||
if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltMPTOKEN_ISSUANCE ||
|
||||
node.getFName() != ripple::sfCreatedNode)
|
||||
continue;
|
||||
|
||||
auto const& mptNode = node.peekAtField(ripple::sfNewFields).downcast<ripple::STObject>();
|
||||
return ripple::makeMptID(mptNode[ripple::sfSequence], mptNode[ripple::sfIssuer]);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if transaction has a new MPToken created
|
||||
*
|
||||
* @param txn The transaction
|
||||
* @param meta The metadata
|
||||
* @return true if the transaction can have a mpt_issuance_id
|
||||
*/
|
||||
static bool
|
||||
canHaveMPTIssuanceID(std::shared_ptr<ripple::STTx const> const& txn, std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
{
|
||||
if (txn->getTxnType() != ripple::ttMPTOKEN_ISSUANCE_CREATE)
|
||||
return false;
|
||||
|
||||
if (meta->getResultTER() != ripple::tesSUCCESS)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
insertMPTIssuanceID(
|
||||
boost::json::object& metaJson,
|
||||
std::shared_ptr<ripple::STTx const> const& txn,
|
||||
std::shared_ptr<ripple::TxMeta const> const& meta
|
||||
)
|
||||
{
|
||||
if (!canHaveMPTIssuanceID(txn, meta))
|
||||
return false;
|
||||
|
||||
if (auto const id = getMPTIssuanceID(meta)) {
|
||||
metaJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
insertDeliverMaxAlias(boost::json::object& txJson, std::uint32_t const apiVersion)
|
||||
{
|
||||
@@ -806,7 +741,9 @@ traverseOwnedNodes(
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
LOG(gLog.debug()) << fmt::format(
|
||||
static util::Logger const log{"RPC"}; // NOLINT(readability-identifier-naming)
|
||||
|
||||
LOG(log.debug()) << fmt::format(
|
||||
"Time loading owned directories: {} milliseconds, entries size: {}",
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count(),
|
||||
keys.size()
|
||||
@@ -814,7 +751,7 @@ traverseOwnedNodes(
|
||||
|
||||
auto [objects, timeDiff] = util::timed([&]() { return backend.fetchLedgerObjects(keys, sequence, yield); });
|
||||
|
||||
LOG(gLog.debug()) << "Time loading owned entries: " << timeDiff << " milliseconds";
|
||||
LOG(log.debug()) << "Time loading owned entries: " << timeDiff << " milliseconds";
|
||||
|
||||
for (auto i = 0u; i < objects.size(); ++i) {
|
||||
ripple::SerialIter it{objects[i].data(), objects[i].size()};
|
||||
@@ -1302,7 +1239,8 @@ postProcessOrderBook(
|
||||
|
||||
jsonOffers.push_back(offerJson);
|
||||
} catch (std::exception const& e) {
|
||||
LOG(gLog.error()) << "caught exception: " << e.what();
|
||||
util::Logger const log{"RPC"};
|
||||
LOG(log.error()) << "caught exception: " << e.what();
|
||||
}
|
||||
}
|
||||
return jsonOffers;
|
||||
|
||||
@@ -199,21 +199,6 @@ insertDeliveredAmount(
|
||||
uint32_t date
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Add "mpt_issuance_id" into MPTokenIssuanceCreate transaction json.
|
||||
*
|
||||
* @param metaJson The metadata json object to add "MPTokenIssuanceID"
|
||||
* @param txn The transaction object
|
||||
* @param meta The metadata object
|
||||
* @return true if the "mpt_issuance_id" is added to the metadata json object
|
||||
*/
|
||||
bool
|
||||
insertMPTIssuanceID(
|
||||
boost::json::object& metaJson,
|
||||
std::shared_ptr<ripple::STTx const> const& txn,
|
||||
std::shared_ptr<ripple::TxMeta const> const& meta
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Convert STBase object to JSON
|
||||
*
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include "rpc/RPCHelpers.hpp"
|
||||
#include "rpc/common/Types.hpp"
|
||||
#include "util/AccountUtils.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/TimeUtils.hpp"
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
@@ -32,6 +33,7 @@
|
||||
#include <xrpl/basics/StringUtilities.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/Protocol.h>
|
||||
#include <xrpl/protocol/UintTypes.h>
|
||||
|
||||
@@ -120,6 +122,18 @@ CustomValidator CustomValidators::ledgerIndexValidator =
|
||||
return MaybeError{};
|
||||
}};
|
||||
|
||||
CustomValidator CustomValidators::ledgerTypeValidator =
|
||||
CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError {
|
||||
if (!value.is_string())
|
||||
return Error{Status{RippledError::rpcINVALID_PARAMS, fmt::format("Invalid field '{}', not string.", key)}};
|
||||
|
||||
auto const type = util::LedgerTypes::getLedgerEntryTypeFromStr(boost::json::value_to<std::string>(value));
|
||||
if (type == ripple::ltANY)
|
||||
return Error{Status{RippledError::rpcINVALID_PARAMS, fmt::format("Invalid field '{}'.", key)}};
|
||||
|
||||
return MaybeError{};
|
||||
}};
|
||||
|
||||
CustomValidator CustomValidators::accountValidator =
|
||||
CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError {
|
||||
if (!value.is_string())
|
||||
@@ -160,6 +174,19 @@ CustomValidator CustomValidators::accountMarkerValidator =
|
||||
return MaybeError{};
|
||||
}};
|
||||
|
||||
CustomValidator CustomValidators::accountTypeValidator =
|
||||
CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError {
|
||||
if (!value.is_string())
|
||||
return Error{Status{RippledError::rpcINVALID_PARAMS, fmt::format("Invalid field '{}', not string.", key)}};
|
||||
|
||||
auto const type =
|
||||
util::LedgerTypes::getAccountOwnedLedgerTypeFromStr(boost::json::value_to<std::string>(value));
|
||||
if (type == ripple::ltANY)
|
||||
return Error{Status{RippledError::rpcINVALID_PARAMS, fmt::format("Invalid field '{}'.", key)}};
|
||||
|
||||
return MaybeError{};
|
||||
}};
|
||||
|
||||
CustomValidator CustomValidators::currencyValidator =
|
||||
CustomValidator{[](boost::json::value const& value, std::string_view key) -> MaybeError {
|
||||
if (!value.is_string())
|
||||
|
||||
@@ -486,6 +486,14 @@ struct CustomValidators final {
|
||||
*/
|
||||
static CustomValidator ledgerIndexValidator;
|
||||
|
||||
/**
|
||||
* @brief Provides a validator for ledger type.
|
||||
*
|
||||
* A type accepts canonical names of ledger entry types (case insensitive) or short names.
|
||||
* Used by ledger_data.
|
||||
*/
|
||||
static CustomValidator ledgerTypeValidator;
|
||||
|
||||
/**
|
||||
* @brief Provides a commonly used validator for accounts.
|
||||
*
|
||||
@@ -508,6 +516,14 @@ struct CustomValidators final {
|
||||
*/
|
||||
static CustomValidator accountMarkerValidator;
|
||||
|
||||
/**
|
||||
* @brief Provides a validator for account type.
|
||||
*
|
||||
* A type accepts canonical names of owned ledger entry types (case insensitive) or short names.
|
||||
* Used by account_objects.
|
||||
*/
|
||||
static CustomValidator accountTypeValidator;
|
||||
|
||||
/**
|
||||
* @brief Provides a commonly used validator for uint160(AccountID) hex string.
|
||||
*
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
@@ -88,6 +89,18 @@ AccountInfoHandler::process(AccountInfoHandler::Input const& input, Context cons
|
||||
|
||||
auto const isDisallowIncomingEnabled = isEnabled(Amendments::DisallowIncoming);
|
||||
auto const isClawbackEnabled = isEnabled(Amendments::Clawback);
|
||||
auto const isTokenEscrowEnabled = isEnabled(Amendments::TokenEscrow);
|
||||
|
||||
Output out{
|
||||
.ledgerIndex = lgrInfo.seq,
|
||||
.ledgerHash = ripple::strHex(lgrInfo.hash),
|
||||
.accountData = sle,
|
||||
.isDisallowIncomingEnabled = isDisallowIncomingEnabled,
|
||||
.isClawbackEnabled = isClawbackEnabled,
|
||||
.isTokenEscrowEnabled = isTokenEscrowEnabled,
|
||||
.apiVersion = ctx.apiVersion,
|
||||
.signerLists = std::nullopt
|
||||
};
|
||||
|
||||
// Return SignerList(s) if that is requested.
|
||||
if (input.signerLists) {
|
||||
@@ -98,7 +111,6 @@ AccountInfoHandler::process(AccountInfoHandler::Input const& input, Context cons
|
||||
// This code will need to be revisited if in the future we
|
||||
// support multiple SignerLists on one account.
|
||||
auto const signers = sharedPtrBackend_->fetchLedgerObject(signersKey.key, lgrInfo.seq, ctx.yield);
|
||||
std::vector<ripple::STLedgerEntry> signerList;
|
||||
|
||||
if (signers) {
|
||||
ripple::STLedgerEntry const sleSigners{
|
||||
@@ -108,23 +120,11 @@ AccountInfoHandler::process(AccountInfoHandler::Input const& input, Context cons
|
||||
if (!signersKey.check(sleSigners))
|
||||
return Error{Status{RippledError::rpcDB_DESERIALIZATION}};
|
||||
|
||||
signerList.push_back(sleSigners);
|
||||
out.signerLists = std::vector<ripple::STLedgerEntry>{sleSigners};
|
||||
}
|
||||
|
||||
return Output(
|
||||
lgrInfo.seq,
|
||||
ripple::strHex(lgrInfo.hash),
|
||||
sle,
|
||||
isDisallowIncomingEnabled,
|
||||
isClawbackEnabled,
|
||||
ctx.apiVersion,
|
||||
signerList
|
||||
);
|
||||
}
|
||||
|
||||
return Output(
|
||||
lgrInfo.seq, ripple::strHex(lgrInfo.hash), sle, isDisallowIncomingEnabled, isClawbackEnabled, ctx.apiVersion
|
||||
);
|
||||
return out;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -159,9 +159,11 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountInfoHandl
|
||||
lsFlags.insert(lsFlags.end(), disallowIncomingFlags.begin(), disallowIncomingFlags.end());
|
||||
}
|
||||
|
||||
if (output.isClawbackEnabled) {
|
||||
if (output.isClawbackEnabled)
|
||||
lsFlags.emplace_back("allowTrustLineClawback", ripple::lsfAllowTrustLineClawback);
|
||||
}
|
||||
|
||||
if (output.isTokenEscrowEnabled)
|
||||
lsFlags.emplace_back("allowTrustLineLocking", ripple::lsfAllowTrustLineLocking);
|
||||
|
||||
boost::json::object acctFlags;
|
||||
for (auto const& lsf : lsFlags)
|
||||
|
||||
@@ -61,40 +61,11 @@ public:
|
||||
ripple::STLedgerEntry accountData;
|
||||
bool isDisallowIncomingEnabled = false;
|
||||
bool isClawbackEnabled = false;
|
||||
bool isTokenEscrowEnabled = false;
|
||||
uint32_t apiVersion;
|
||||
std::optional<std::vector<ripple::STLedgerEntry>> signerLists;
|
||||
// validated should be sent via framework
|
||||
bool validated = true;
|
||||
|
||||
/**
|
||||
* @brief Construct a new Output object
|
||||
*
|
||||
* @param ledgerId The ledger index
|
||||
* @param ledgerHash The ledger hash
|
||||
* @param sle The account data
|
||||
* @param isDisallowIncomingEnabled Whether disallow incoming is enabled
|
||||
* @param isClawbackEnabled Whether clawback is enabled
|
||||
* @param version The API version
|
||||
* @param signerLists The signer lists
|
||||
*/
|
||||
Output(
|
||||
uint32_t ledgerId,
|
||||
std::string ledgerHash,
|
||||
ripple::STLedgerEntry sle,
|
||||
bool isDisallowIncomingEnabled,
|
||||
bool isClawbackEnabled,
|
||||
uint32_t version,
|
||||
std::optional<std::vector<ripple::STLedgerEntry>> signerLists = std::nullopt
|
||||
)
|
||||
: ledgerIndex(ledgerId)
|
||||
, ledgerHash(std::move(ledgerHash))
|
||||
, accountData(std::move(sle))
|
||||
, isDisallowIncomingEnabled(isDisallowIncomingEnabled)
|
||||
, isClawbackEnabled(isClawbackEnabled)
|
||||
, apiVersion(version)
|
||||
, signerLists(std::move(signerLists))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -159,8 +159,10 @@ tag_invoke(boost::json::value_to_tag<AccountObjectsHandler::Input>, boost::json:
|
||||
}
|
||||
}
|
||||
|
||||
if (jsonObject.contains(JS(type)))
|
||||
input.type = util::LedgerTypes::getLedgerEntryTypeFromStr(boost::json::value_to<std::string>(jv.at(JS(type))));
|
||||
if (jsonObject.contains(JS(type))) {
|
||||
input.type =
|
||||
util::LedgerTypes::getAccountOwnedLedgerTypeFromStr(boost::json::value_to<std::string>(jv.at(JS(type))));
|
||||
}
|
||||
|
||||
if (jsonObject.contains(JS(limit)))
|
||||
input.limit = jv.at(JS(limit)).as_int64();
|
||||
|
||||
@@ -25,11 +25,9 @@
|
||||
#include "rpc/common/Specs.hpp"
|
||||
#include "rpc/common/Types.hpp"
|
||||
#include "rpc/common/Validators.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
@@ -107,7 +105,6 @@ public:
|
||||
static RpcSpecConstRef
|
||||
spec([[maybe_unused]] uint32_t apiVersion)
|
||||
{
|
||||
auto const& accountOwnedTypes = util::LedgerTypes::getAccountOwnedLedgerTypeStrList();
|
||||
static auto const kRPC_SPEC = RpcSpec{
|
||||
{JS(account), validation::Required{}, validation::CustomValidators::accountValidator},
|
||||
{JS(ledger_hash), validation::CustomValidators::uint256HexStringValidator},
|
||||
@@ -116,9 +113,7 @@ public:
|
||||
validation::Type<uint32_t>{},
|
||||
validation::Min(1u),
|
||||
modifiers::Clamp<int32_t>(kLIMIT_MIN, kLIMIT_MAX)},
|
||||
{JS(type),
|
||||
validation::Type<std::string>{},
|
||||
validation::OneOf<std::string>(accountOwnedTypes.cbegin(), accountOwnedTypes.cend())},
|
||||
{JS(type), validation::CustomValidators::accountTypeValidator},
|
||||
{JS(marker), validation::CustomValidators::accountMarkerValidator},
|
||||
{JS(deletion_blockers_only), validation::Type<bool>{}},
|
||||
};
|
||||
|
||||
@@ -124,7 +124,13 @@ GetAggregatePriceHandler::process(GetAggregatePriceHandler::Input const& input,
|
||||
|
||||
auto const latestTime = timestampPricesBiMap.left.begin()->first;
|
||||
|
||||
Output out(latestTime, ripple::to_string(lgrInfo.hash), lgrInfo.seq);
|
||||
Output out{
|
||||
.time = latestTime,
|
||||
.trimStats = std::nullopt,
|
||||
.ledgerHash = ripple::to_string(lgrInfo.hash),
|
||||
.ledgerIndex = lgrInfo.seq,
|
||||
.median = ""
|
||||
};
|
||||
|
||||
if (input.timeThreshold) {
|
||||
auto const oldestTime = timestampPricesBiMap.left.rbegin()->first;
|
||||
|
||||
@@ -43,7 +43,6 @@
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace rpc {
|
||||
@@ -59,8 +58,9 @@ public:
|
||||
* @brief A struct to hold the statistics
|
||||
*/
|
||||
struct Stats {
|
||||
ripple::STAmount avg;
|
||||
ripple::Number sd; // standard deviation
|
||||
ripple::STAmount avg{}; // NOLINT(readability-redundant-member-init)
|
||||
// standard deviation
|
||||
ripple::Number sd{}; // NOLINT(readability-redundant-member-init)
|
||||
uint32_t size{0};
|
||||
};
|
||||
|
||||
@@ -69,23 +69,12 @@ public:
|
||||
*/
|
||||
struct Output {
|
||||
uint32_t time;
|
||||
Stats extireStats;
|
||||
Stats extireStats{};
|
||||
std::optional<Stats> trimStats;
|
||||
std::string ledgerHash;
|
||||
uint32_t ledgerIndex;
|
||||
std::string median;
|
||||
bool validated = true;
|
||||
|
||||
/**
|
||||
* @brief Construct a new Output object
|
||||
* @param time The time of the latest oracle data
|
||||
* @param ledgerHash The hash of the ledger
|
||||
* @param ledgerIndex The index of the ledger
|
||||
*/
|
||||
Output(uint32_t time, std::string ledgerHash, uint32_t ledgerIndex)
|
||||
: time(time), ledgerHash(std::move(ledgerHash)), ledgerIndex(ledgerIndex)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -20,14 +20,12 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
#include "rpc/common/Checkers.hpp"
|
||||
#include "rpc/common/MetaProcessors.hpp"
|
||||
#include "rpc/common/Specs.hpp"
|
||||
#include "rpc/common/Types.hpp"
|
||||
#include "rpc/common/Validators.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/json/array.hpp>
|
||||
@@ -36,7 +34,6 @@
|
||||
#include <boost/json/value.hpp>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/ErrorCodes.h>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
@@ -113,7 +110,6 @@ public:
|
||||
static RpcSpecConstRef
|
||||
spec([[maybe_unused]] uint32_t apiVersion)
|
||||
{
|
||||
auto const& ledgerTypeStrs = util::LedgerTypes::getLedgerEntryTypeStrList();
|
||||
static auto const kRPC_SPEC = RpcSpec{
|
||||
{JS(binary), validation::Type<bool>{}},
|
||||
{"out_of_order", validation::Type<bool>{}},
|
||||
@@ -123,11 +119,7 @@ public:
|
||||
{JS(marker),
|
||||
validation::Type<uint32_t, std::string>{},
|
||||
meta::IfType<std::string>{validation::CustomValidators::uint256HexStringValidator}},
|
||||
{JS(type),
|
||||
meta::WithCustomError{
|
||||
validation::Type<std::string>{}, Status{ripple::rpcINVALID_PARAMS, "Invalid field 'type', not string."}
|
||||
},
|
||||
validation::OneOf<std::string>(ledgerTypeStrs.cbegin(), ledgerTypeStrs.cend())},
|
||||
{JS(type), validation::CustomValidators::ledgerTypeValidator},
|
||||
{JS(ledger), check::Deprecated{}},
|
||||
};
|
||||
return kRPC_SPEC;
|
||||
|
||||
@@ -54,7 +54,7 @@ OnAssert::resetAction()
|
||||
void
|
||||
OnAssert::defaultAction(std::string_view message)
|
||||
{
|
||||
if (LogService::enabled()) {
|
||||
if (LogServiceState::initialized()) {
|
||||
LOG(LogService::fatal()) << message;
|
||||
} else {
|
||||
std::cerr << message;
|
||||
|
||||
@@ -8,6 +8,7 @@ target_sources(
|
||||
Coroutine.cpp
|
||||
CoroutineGroup.cpp
|
||||
log/Logger.cpp
|
||||
log/PrettyPath.cpp
|
||||
prometheus/Http.cpp
|
||||
prometheus/Label.cpp
|
||||
prometheus/MetricBase.cpp
|
||||
@@ -23,6 +24,7 @@ target_sources(
|
||||
requests/WsConnection.cpp
|
||||
requests/impl/SslContext.cpp
|
||||
ResponseExpirationCache.cpp
|
||||
Shasum.cpp
|
||||
SignalsHandler.cpp
|
||||
StopHelper.cpp
|
||||
StringHash.cpp
|
||||
@@ -50,8 +52,6 @@ target_link_libraries(
|
||||
clio_util
|
||||
PUBLIC Boost::headers
|
||||
Boost::iostreams
|
||||
Boost::log
|
||||
Boost::log_setup
|
||||
fmt::fmt
|
||||
openssl::openssl
|
||||
xrpl::libxrpl
|
||||
@@ -59,6 +59,7 @@ target_link_libraries(
|
||||
clio_options
|
||||
clio_rpc_center
|
||||
clio_build_version
|
||||
PRIVATE spdlog::spdlog
|
||||
)
|
||||
|
||||
# FIXME: needed on gcc-12, clang-16 and AppleClang for now (known boost 1.82 issue for some compilers)
|
||||
|
||||
@@ -19,9 +19,13 @@
|
||||
|
||||
#include "util/LedgerUtils.hpp"
|
||||
|
||||
#include "util/JsonUtils.hpp"
|
||||
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
@@ -30,16 +34,52 @@ namespace util {
|
||||
ripple::LedgerEntryType
|
||||
LedgerTypes::getLedgerEntryTypeFromStr(std::string const& entryName)
|
||||
{
|
||||
static std::unordered_map<std::string, ripple::LedgerEntryType> kTYPE_MAP = []() {
|
||||
std::unordered_map<std::string, ripple::LedgerEntryType> map;
|
||||
std::ranges::for_each(kLEDGER_TYPES, [&map](auto const& item) { map[item.name_] = item.type_; });
|
||||
return map;
|
||||
}();
|
||||
if (auto const result = getLedgerTypeAttributeFromStr(entryName); result.has_value()) {
|
||||
return result->get().type_;
|
||||
}
|
||||
return ripple::ltANY;
|
||||
}
|
||||
|
||||
if (!kTYPE_MAP.contains(entryName))
|
||||
return ripple::ltANY;
|
||||
ripple::LedgerEntryType
|
||||
LedgerTypes::getAccountOwnedLedgerTypeFromStr(std::string const& entryName)
|
||||
{
|
||||
if (auto const result = getLedgerTypeAttributeFromStr(entryName);
|
||||
result.has_value() && result->get().category_ != LedgerTypeAttribute::LedgerCategory::Chain) {
|
||||
return result->get().type_;
|
||||
}
|
||||
|
||||
return kTYPE_MAP.at(entryName);
|
||||
return ripple::ltANY;
|
||||
}
|
||||
|
||||
std::optional<std::reference_wrapper<impl::LedgerTypeAttribute const>>
|
||||
LedgerTypes::getLedgerTypeAttributeFromStr(std::string const& entryName)
|
||||
{
|
||||
static std::unordered_map<std::string, std::reference_wrapper<impl::LedgerTypeAttribute const>> const kNAME_MAP =
|
||||
[]() {
|
||||
std::unordered_map<std::string, std::reference_wrapper<impl::LedgerTypeAttribute const>> map;
|
||||
std::ranges::for_each(kLEDGER_TYPES, [&map](auto const& item) {
|
||||
map.insert({util::toLower(item.name_), item});
|
||||
});
|
||||
return map;
|
||||
}();
|
||||
|
||||
static std::unordered_map<std::string, std::reference_wrapper<impl::LedgerTypeAttribute const>> const
|
||||
kRPC_NAME_MAP = []() {
|
||||
std::unordered_map<std::string, std::reference_wrapper<impl::LedgerTypeAttribute const>> map;
|
||||
std::ranges::for_each(kLEDGER_TYPES, [&map](auto const& item) { map.insert({item.rpcName_, item}); });
|
||||
return map;
|
||||
}();
|
||||
|
||||
if (auto const it = kRPC_NAME_MAP.find(entryName); it != kRPC_NAME_MAP.end()) {
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto const entryNameLowercase = util::toLower(entryName);
|
||||
if (auto const it = kNAME_MAP.find(entryNameLowercase); it != kNAME_MAP.end()) {
|
||||
return it->second;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace util
|
||||
|
||||
@@ -31,6 +31,8 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
@@ -40,6 +42,7 @@ namespace util {
|
||||
class LedgerTypes;
|
||||
|
||||
namespace impl {
|
||||
|
||||
class LedgerTypeAttribute {
|
||||
enum class LedgerCategory {
|
||||
Invalid,
|
||||
@@ -50,33 +53,40 @@ class LedgerTypeAttribute {
|
||||
|
||||
ripple::LedgerEntryType type_ = ripple::ltANY;
|
||||
char const* name_ = nullptr;
|
||||
char const* rpcName_ = nullptr;
|
||||
LedgerCategory category_ = LedgerCategory::Invalid;
|
||||
|
||||
constexpr LedgerTypeAttribute(char const* name, ripple::LedgerEntryType type, LedgerCategory category)
|
||||
: type_(type), name_(name), category_(category)
|
||||
constexpr LedgerTypeAttribute(
|
||||
char const* name,
|
||||
char const* rpcName,
|
||||
ripple::LedgerEntryType type,
|
||||
LedgerCategory category
|
||||
)
|
||||
: type_{type}, name_{name}, rpcName_{rpcName}, category_{category}
|
||||
{
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr LedgerTypeAttribute
|
||||
chainLedgerType(char const* name, ripple::LedgerEntryType type)
|
||||
chainLedgerType(char const* name, char const* rpcName, ripple::LedgerEntryType type)
|
||||
{
|
||||
return LedgerTypeAttribute(name, type, LedgerCategory::Chain);
|
||||
return LedgerTypeAttribute(name, rpcName, type, LedgerCategory::Chain);
|
||||
}
|
||||
|
||||
static constexpr LedgerTypeAttribute
|
||||
accountOwnedLedgerType(char const* name, ripple::LedgerEntryType type)
|
||||
accountOwnedLedgerType(char const* name, char const* rpcName, ripple::LedgerEntryType type)
|
||||
{
|
||||
return LedgerTypeAttribute(name, type, LedgerCategory::AccountOwned);
|
||||
return LedgerTypeAttribute(name, rpcName, type, LedgerCategory::AccountOwned);
|
||||
}
|
||||
|
||||
static constexpr LedgerTypeAttribute
|
||||
deletionBlockerLedgerType(char const* name, ripple::LedgerEntryType type)
|
||||
deletionBlockerLedgerType(char const* name, char const* rpcName, ripple::LedgerEntryType type)
|
||||
{
|
||||
return LedgerTypeAttribute(name, type, LedgerCategory::DeletionBlocker);
|
||||
return LedgerTypeAttribute(name, rpcName, type, LedgerCategory::DeletionBlocker);
|
||||
}
|
||||
friend class util::LedgerTypes;
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
/**
|
||||
@@ -88,38 +98,51 @@ class LedgerTypes {
|
||||
using LedgerTypeAttributeList = LedgerTypeAttribute[];
|
||||
|
||||
static constexpr LedgerTypeAttributeList const kLEDGER_TYPES{
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(account), ripple::ltACCOUNT_ROOT),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(amendments), ripple::ltAMENDMENTS),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(check), ripple::ltCHECK),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(deposit_preauth), ripple::ltDEPOSIT_PREAUTH),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(AccountRoot), JS(account), ripple::ltACCOUNT_ROOT),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(Amendments), JS(amendments), ripple::ltAMENDMENTS),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(Check), JS(check), ripple::ltCHECK),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(DepositPreauth), JS(deposit_preauth), ripple::ltDEPOSIT_PREAUTH),
|
||||
// dir node belongs to account, but can not be filtered from account_objects
|
||||
LedgerTypeAttribute::chainLedgerType(JS(directory), ripple::ltDIR_NODE),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(escrow), ripple::ltESCROW),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(fee), ripple::ltFEE_SETTINGS),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(hashes), ripple::ltLEDGER_HASHES),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(offer), ripple::ltOFFER),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(payment_channel), ripple::ltPAYCHAN),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(signer_list), ripple::ltSIGNER_LIST),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(state), ripple::ltRIPPLE_STATE),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(ticket), ripple::ltTICKET),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(nft_offer), ripple::ltNFTOKEN_OFFER),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(nft_page), ripple::ltNFTOKEN_PAGE),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(amm), ripple::ltAMM),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(bridge), ripple::ltBRIDGE),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(xchain_owned_claim_id), ripple::ltXCHAIN_OWNED_CLAIM_ID),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(DirectoryNode), JS(directory), ripple::ltDIR_NODE),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(Escrow), JS(escrow), ripple::ltESCROW),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(FeeSettings), JS(fee), ripple::ltFEE_SETTINGS),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(LedgerHashes), JS(hashes), ripple::ltLEDGER_HASHES),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(Offer), JS(offer), ripple::ltOFFER),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(PayChannel), JS(payment_channel), ripple::ltPAYCHAN),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(SignerList), JS(signer_list), ripple::ltSIGNER_LIST),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(RippleState), JS(state), ripple::ltRIPPLE_STATE),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(Ticket), JS(ticket), ripple::ltTICKET),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(NFTokenOffer), JS(nft_offer), ripple::ltNFTOKEN_OFFER),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(NFTokenPage), JS(nft_page), ripple::ltNFTOKEN_PAGE),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(AMM), JS(amm), ripple::ltAMM),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(Bridge), JS(bridge), ripple::ltBRIDGE),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(
|
||||
JS(XChainOwnedClaimID),
|
||||
JS(xchain_owned_claim_id),
|
||||
ripple::ltXCHAIN_OWNED_CLAIM_ID
|
||||
),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(
|
||||
JS(XChainOwnedCreateAccountClaimID),
|
||||
JS(xchain_owned_create_account_claim_id),
|
||||
ripple::ltXCHAIN_OWNED_CREATE_ACCOUNT_CLAIM_ID
|
||||
),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(did), ripple::ltDID),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(oracle), ripple::ltORACLE),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(credential), ripple::ltCREDENTIAL),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(vault), ripple::ltVAULT),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(nunl), ripple::ltNEGATIVE_UNL),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(mpt_issuance), ripple::ltMPTOKEN_ISSUANCE),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(mptoken), ripple::ltMPTOKEN),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(permissioned_domain), ripple::ltPERMISSIONED_DOMAIN),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(delegate), ripple::ltDELEGATE),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(DID), JS(did), ripple::ltDID),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(Oracle), JS(oracle), ripple::ltORACLE),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(Credential), JS(credential), ripple::ltCREDENTIAL),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(Vault), JS(vault), ripple::ltVAULT),
|
||||
LedgerTypeAttribute::chainLedgerType(JS(NegativeUNL), JS(nunl), ripple::ltNEGATIVE_UNL),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(
|
||||
JS(MPTokenIssuance),
|
||||
JS(mpt_issuance),
|
||||
ripple::ltMPTOKEN_ISSUANCE
|
||||
),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(JS(MPToken), JS(mptoken), ripple::ltMPTOKEN),
|
||||
LedgerTypeAttribute::deletionBlockerLedgerType(
|
||||
JS(PermissionedDomain),
|
||||
JS(permissioned_domain),
|
||||
ripple::ltPERMISSIONED_DOMAIN
|
||||
),
|
||||
LedgerTypeAttribute::accountOwnedLedgerType(JS(Delegate), JS(delegate), ripple::ltDELEGATE),
|
||||
};
|
||||
|
||||
public:
|
||||
@@ -131,32 +154,7 @@ public:
|
||||
getLedgerEntryTypeStrList()
|
||||
{
|
||||
std::array<char const*, std::size(kLEDGER_TYPES)> res{};
|
||||
std::ranges::transform(kLEDGER_TYPES, std::begin(res), [](auto const& item) { return item.name_; });
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns a list of all account owned ledger entry type as string.
|
||||
*
|
||||
* @return A list of all account owned ledger entry type as string.
|
||||
*/
|
||||
static constexpr auto
|
||||
getAccountOwnedLedgerTypeStrList()
|
||||
{
|
||||
constexpr auto kFILTER = [](auto const& item) {
|
||||
return item.category_ != LedgerTypeAttribute::LedgerCategory::Chain;
|
||||
};
|
||||
|
||||
constexpr auto kACCOUNT_OWNED_COUNT =
|
||||
std::count_if(std::begin(kLEDGER_TYPES), std::end(kLEDGER_TYPES), kFILTER);
|
||||
std::array<char const*, kACCOUNT_OWNED_COUNT> res{};
|
||||
auto it = std::begin(res);
|
||||
std::ranges::for_each(kLEDGER_TYPES, [&](auto const& item) {
|
||||
if (kFILTER(item)) {
|
||||
*it = item.name_;
|
||||
++it;
|
||||
}
|
||||
});
|
||||
std::ranges::transform(kLEDGER_TYPES, std::begin(res), [](auto const& item) { return item.rpcName_; });
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -188,11 +186,25 @@ public:
|
||||
/**
|
||||
* @brief Returns the ripple::LedgerEntryType from the given string.
|
||||
*
|
||||
* @param entryName The name of the ledger entry type
|
||||
* @param entryName The name or canonical name (case-insensitive) of the ledger entry type for all categories
|
||||
* @return The ripple::LedgerEntryType of the given string, returns ltANY if not found.
|
||||
*/
|
||||
static ripple::LedgerEntryType
|
||||
getLedgerEntryTypeFromStr(std::string const& entryName);
|
||||
|
||||
/**
|
||||
* @brief Returns the ripple::LedgerEntryType from the given string.
|
||||
*
|
||||
* @param entryName The name or canonical name (case-insensitive) of the ledger entry type for account owned
|
||||
* category
|
||||
* @return The ripple::LedgerEntryType of the given string, returns ltANY if not found.
|
||||
*/
|
||||
static ripple::LedgerEntryType
|
||||
getAccountOwnedLedgerTypeFromStr(std::string const& entryName);
|
||||
|
||||
private:
|
||||
static std::optional<std::reference_wrapper<impl::LedgerTypeAttribute const>>
|
||||
getLedgerTypeAttributeFromStr(std::string const& entryName);
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
57
src/util/ScopeGuard.hpp
Normal file
57
src/util/ScopeGuard.hpp
Normal file
@@ -0,0 +1,57 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace util {
|
||||
|
||||
/**
|
||||
* @brief Run a function when the scope is exited
|
||||
*/
|
||||
template <typename Func>
|
||||
class ScopeGuard {
|
||||
public:
|
||||
ScopeGuard(ScopeGuard const&) = delete;
|
||||
ScopeGuard(ScopeGuard&&) = delete;
|
||||
ScopeGuard&
|
||||
operator=(ScopeGuard const&) = delete;
|
||||
ScopeGuard&
|
||||
operator=(ScopeGuard&&) = delete;
|
||||
|
||||
/**
|
||||
* @brief Create ScopeGuard object.
|
||||
*
|
||||
* @param func The function to run when the scope is exited.
|
||||
*/
|
||||
ScopeGuard(Func func) : func_(std::move(func))
|
||||
{
|
||||
}
|
||||
|
||||
~ScopeGuard()
|
||||
{
|
||||
func_();
|
||||
}
|
||||
|
||||
private:
|
||||
Func func_;
|
||||
};
|
||||
|
||||
} // namespace util
|
||||
48
src/util/Shasum.cpp
Normal file
48
src/util/Shasum.cpp
Normal file
@@ -0,0 +1,48 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "util/Shasum.hpp"
|
||||
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
namespace util {
|
||||
|
||||
ripple::uint256
|
||||
sha256sum(std::string_view s)
|
||||
{
|
||||
ripple::sha256_hasher hasher;
|
||||
hasher(s.data(), s.size());
|
||||
auto const hashData = static_cast<ripple::sha256_hasher::result_type>(hasher);
|
||||
ripple::uint256 sha256;
|
||||
std::memcpy(sha256.data(), hashData.data(), hashData.size());
|
||||
return sha256;
|
||||
}
|
||||
|
||||
std::string
|
||||
sha256sumString(std::string_view s)
|
||||
{
|
||||
return ripple::to_string(sha256sum(s));
|
||||
}
|
||||
|
||||
} // namespace util
|
||||
@@ -1,7 +1,7 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2024, the clio developers.
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -19,31 +19,28 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/config/ConfigFileInterface.hpp"
|
||||
#include "util/config/Types.hpp"
|
||||
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
// TODO: implement when we support yaml
|
||||
namespace util {
|
||||
/**
|
||||
* @brief Calculates the SHA256 sum of a string.
|
||||
*
|
||||
* @param s The string to hash.
|
||||
* @return The SHA256 sum as a ripple::uint256.
|
||||
*/
|
||||
ripple::uint256
|
||||
sha256sum(std::string_view s);
|
||||
|
||||
namespace util::config {
|
||||
/**
|
||||
* @brief Calculates the SHA256 sum of a string and returns it as a hex string.
|
||||
*
|
||||
* @param s The string to hash.
|
||||
* @return The SHA256 sum as a hex string.
|
||||
*/
|
||||
std::string
|
||||
sha256sumString(std::string_view s);
|
||||
|
||||
/** @brief Yaml representation of config */
|
||||
class ConfigFileYaml final : public ConfigFileInterface {
|
||||
public:
|
||||
ConfigFileYaml() = default;
|
||||
|
||||
Value
|
||||
getValue(std::string_view key) const override;
|
||||
|
||||
std::vector<Value>
|
||||
getArray(std::string_view key) const override;
|
||||
|
||||
bool
|
||||
containsKey(std::string_view key) const override;
|
||||
};
|
||||
|
||||
} // namespace util::config
|
||||
} // namespace util
|
||||
@@ -206,7 +206,7 @@ class TagDecoratorFactory final {
|
||||
if (boost::iequals(style, "uuid"))
|
||||
return TagDecoratorFactory::Type::UUID;
|
||||
|
||||
ASSERT(false, "log_tag_style does not have valid value");
|
||||
ASSERT(false, "log.tag_style does not have valid value");
|
||||
std::unreachable();
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ public:
|
||||
* @param config The configuration as a json object
|
||||
*/
|
||||
explicit TagDecoratorFactory(util::config::ClioConfigDefinition const& config)
|
||||
: type_{getLogTagType(config.get<std::string>("log_tag_style"))}
|
||||
: type_{getLogTagType(config.get<std::string>("log.tag_style"))}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user