Compare commits

..

5 Commits

Author SHA1 Message Date
Sergey Kuznetsov
7558dfc7f5 chore: Commits for 2.5.0 (#2352) 2025-07-22 11:43:18 +01:00
Sergey Kuznetsov
d83be17ded chore: Commits for 2.5.0 (#2330) 2025-07-14 15:55:34 +01:00
Sergey Kuznetsov
f04d2a97ec chore: Commits for 2.5.0 (#2268) 2025-06-30 11:32:26 +01:00
Sergey Kuznetsov
b8b82e5dd9 chore: Commits for 2.5.0-b3 (#2197) 2025-06-09 11:48:12 +01:00
Sergey Kuznetsov
764601e7fc chore: Commits for 2.5.0-b2 (#2146) 2025-05-20 16:53:06 +01:00
395 changed files with 8341 additions and 12735 deletions

View File

@@ -16,7 +16,6 @@ coverage:
#
# More info: https://github.com/XRPLF/clio/pull/2066
ignore:
- "benchmarks"
- "tests"
- "src/data/cassandra/"
- "src/data/CassandraBackend.hpp"

View File

@@ -24,7 +24,6 @@ inputs:
dockerhub_repo:
description: DockerHub repository name
required: false
default: ""
dockerhub_description:
description: Short description of the image
required: false
@@ -34,14 +33,14 @@ runs:
steps:
- name: Login to DockerHub
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_PW }}
- name: Login to GitHub Container Registry
if: ${{ inputs.push_image == 'true' }}
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@@ -52,7 +51,7 @@ runs:
cache-image: false
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
- uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
id: meta
with:
images: ${{ inputs.images }}

View File

@@ -1,73 +0,0 @@
name: Run CMake
description: Run CMake to generate build files
inputs:
build_dir:
description: Build directory
required: false
default: "build"
conan_profile:
description: Conan profile name
required: true
build_type:
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
required: true
default: "Release"
integration_tests:
description: Whether to generate target integration tests
required: true
default: "true"
benchmark:
description: Whether to generate targets for benchmarks
required: true
default: "true"
code_coverage:
description: Whether to enable code coverage
required: true
default: "false"
static:
description: Whether Clio is to be statically linked
required: true
default: "false"
time_trace:
description: Whether to enable compiler trace reports
required: true
default: "false"
package:
description: Whether to generate Debian package
required: true
default: "false"
runs:
using: composite
steps:
- name: Run cmake
shell: bash
env:
BUILD_TYPE: "${{ inputs.build_type }}"
SANITIZER_OPTION: |-
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
endsWith(inputs.conan_profile, '.tsan') && '-Dsan=thread' ||
endsWith(inputs.conan_profile, '.ubsan') && '-Dsan=undefined' ||
'' }}
INTEGRATION_TESTS: "${{ inputs.integration_tests == 'true' && 'ON' || 'OFF' }}"
BENCHMARK: "${{ inputs.benchmark == 'true' && 'ON' || 'OFF' }}"
COVERAGE: "${{ inputs.code_coverage == 'true' && 'ON' || 'OFF' }}"
STATIC: "${{ inputs.static == 'true' && 'ON' || 'OFF' }}"
TIME_TRACE: "${{ inputs.time_trace == 'true' && 'ON' || 'OFF' }}"
PACKAGE: "${{ inputs.package == 'true' && 'ON' || 'OFF' }}"
run: |
cmake \
-B ${{inputs.build_dir}} \
-S . \
-G Ninja \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
"${SANITIZER_OPTION}" \
-Dtests=ON \
-Dintegration_tests="${INTEGRATION_TESTS}" \
-Dbenchmark="${BENCHMARK}" \
-Dcoverage="${COVERAGE}" \
-Dstatic="${STATIC}" \
-Dtime_trace="${TIME_TRACE}" \
-Dpackage="${PACKAGE}"

View File

@@ -15,7 +15,6 @@ runs:
shell: bash
run: |
gcovr \
-e benchmarks \
-e tests \
-e src/data/cassandra \
-e src/data/CassandraBackend.hpp \

View File

@@ -1,38 +0,0 @@
name: Run Conan
description: Run conan to install dependencies
inputs:
build_dir:
description: Build directory
required: false
default: "build"
conan_profile:
description: Conan profile name
required: true
force_conan_source_build:
description: Whether conan should build all dependencies from source
required: true
default: "false"
build_type:
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
required: true
default: "Release"
runs:
using: composite
steps:
- name: Create build directory
shell: bash
run: mkdir -p "${{ inputs.build_dir }}"
- name: Run conan
shell: bash
env:
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
run: |
conan \
install . \
-of build \
-b "$CONAN_BUILD_OPTION" \
-s "build_type=${{ inputs.build_type }}" \
--profile:all "${{ inputs.conan_profile }}"

79
.github/actions/generate/action.yml vendored Normal file
View File

@@ -0,0 +1,79 @@
name: Run conan and cmake
description: Run conan and cmake
inputs:
conan_profile:
description: Conan profile name
required: true
force_conan_source_build:
description: Whether conan should build all dependencies from source
required: true
default: "false"
build_type:
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
required: true
default: "Release"
build_integration_tests:
description: Whether to build integration tests
required: true
default: "true"
code_coverage:
description: Whether conan's coverage option should be on or not
required: true
default: "false"
static:
description: Whether Clio is to be statically linked
required: true
default: "false"
time_trace:
description: Whether to enable compiler trace reports
required: true
default: "false"
runs:
using: composite
steps:
- name: Create build directory
shell: bash
run: mkdir -p build
- name: Run conan
shell: bash
env:
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
STATIC_OPTION: "${{ inputs.static == 'true' && 'True' || 'False' }}"
INTEGRATION_TESTS_OPTION: "${{ inputs.build_integration_tests == 'true' && 'True' || 'False' }}"
TIME_TRACE: "${{ inputs.time_trace == 'true' && 'True' || 'False' }}"
run: |
cd build
conan \
install .. \
-of . \
-b "$CONAN_BUILD_OPTION" \
-s "build_type=${{ inputs.build_type }}" \
-o "&:static=${STATIC_OPTION}" \
-o "&:tests=True" \
-o "&:integration_tests=${INTEGRATION_TESTS_OPTION}" \
-o "&:lint=False" \
-o "&:coverage=${CODE_COVERAGE}" \
-o "&:time_trace=${TIME_TRACE}" \
--profile:all "${{ inputs.conan_profile }}"
- name: Run cmake
shell: bash
env:
BUILD_TYPE: "${{ inputs.build_type }}"
SANITIZER_OPTION: |-
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
endsWith(inputs.conan_profile, '.tsan') && '-Dsan=thread' ||
endsWith(inputs.conan_profile, '.ubsan') && '-Dsan=undefined' ||
'' }}
run: |
cd build
cmake \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
"${SANITIZER_OPTION}" \
.. \
-G Ninja

View File

@@ -0,0 +1,77 @@
name: Prepare runner
description: Install packages, set environment variables, create directories
inputs:
disable_ccache:
description: Whether ccache should be disabled
required: true
runs:
using: composite
steps:
- name: Install packages on mac
if: ${{ runner.os == 'macOS' }}
shell: bash
run: |
brew install --quiet \
bison \
ca-certificates \
ccache \
clang-build-analyzer \
conan \
gh \
jq \
llvm@14 \
ninja \
pkg-config
echo "/opt/homebrew/opt/conan@2/bin" >> $GITHUB_PATH
- name: Install CMake 3.31.6 on mac
if: ${{ runner.os == 'macOS' }}
shell: bash
run: |
# Uninstall any existing cmake
brew uninstall --formula cmake --ignore-dependencies || true
# Download specific cmake formula
FORMULA_URL="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e46db74e74a8c1650b38b1da222284ce1ec5ce/Formula/c/cmake.rb"
FORMULA_EXPECTED_SHA256="c7ec95d86f0657638835441871e77541165e0a2581b53b3dd657cf13ad4228d4"
mkdir -p /tmp/homebrew-formula
curl -s -L "$FORMULA_URL" -o /tmp/homebrew-formula/cmake.rb
echo "$FORMULA_EXPECTED_SHA256 /tmp/homebrew-formula/cmake.rb" | shasum -a 256 -c
# Install cmake from the specific formula with force flag
brew install --formula --quiet --force /tmp/homebrew-formula/cmake.rb
- name: Fix git permissions on Linux
if: ${{ runner.os == 'Linux' }}
shell: bash
run: git config --global --add safe.directory "$PWD"
- name: Set env variables for macOS
if: ${{ runner.os == 'macOS' }}
shell: bash
run: |
echo "CCACHE_DIR=${{ github.workspace }}/.ccache" >> $GITHUB_ENV
echo "CONAN_HOME=${{ github.workspace }}/.conan2" >> $GITHUB_ENV
- name: Set env variables for Linux
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
echo "CCACHE_DIR=/root/.ccache" >> $GITHUB_ENV
echo "CONAN_HOME=/root/.conan2" >> $GITHUB_ENV
- name: Set CCACHE_DISABLE=1
if: ${{ inputs.disable_ccache == 'true' }}
shell: bash
run: |
echo "CCACHE_DISABLE=1" >> $GITHUB_ENV
- name: Create directories
shell: bash
run: |
mkdir -p "$CCACHE_DIR"
mkdir -p "$CONAN_HOME"

View File

@@ -39,19 +39,6 @@ updates:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/cmake/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
reviewers:
- XRPLF/clio-dev-team
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/code_coverage/
schedule:
@@ -66,7 +53,7 @@ updates:
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/conan/
directory: .github/actions/create_issue/
schedule:
interval: weekly
day: monday
@@ -79,7 +66,7 @@ updates:
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/create_issue/
directory: .github/actions/generate/
schedule:
interval: weekly
day: monday
@@ -117,6 +104,19 @@ updates:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/prepare_runner/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
reviewers:
- XRPLF/clio-dev-team
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/restore_cache/
schedule:

View File

@@ -0,0 +1,8 @@
[settings]
arch={{detect_api.detect_arch()}}
build_type=Release
compiler=apple-clang
compiler.cppstd=20
compiler.libcxx=libc++
compiler.version=16
os=Macos

View File

@@ -3,7 +3,7 @@ import itertools
import json
LINUX_OS = ["heavy", "heavy-arm64"]
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }']
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:latest" }']
LINUX_COMPILERS = ["gcc", "clang"]
MACOS_OS = ["macos15"]

View File

@@ -8,11 +8,10 @@ REPO_DIR="$(cd "$CURRENT_DIR/../../../" && pwd)"
CONAN_DIR="${CONAN_HOME:-$HOME/.conan2}"
PROFILES_DIR="$CONAN_DIR/profiles"
# When developers' compilers are updated, these profiles might be different
if [[ -z "$CI" ]]; then
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-17.profile"
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-local.profile"
else
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-17.profile"
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-ci.profile"
fi
GCC_PROFILE="$REPO_DIR/docker/ci/conan/gcc.profile"
@@ -22,7 +21,7 @@ SANITIZER_TEMPLATE_FILE="$REPO_DIR/docker/ci/conan/sanitizer_template.profile"
rm -rf "$CONAN_DIR"
conan remote add --index 0 xrplf https://conan.ripplex.io
conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
cp "$REPO_DIR/docker/ci/conan/global.conf" "$CONAN_DIR/global.conf"

View File

@@ -28,9 +28,8 @@ on:
workflow_dispatch:
concurrency:
# Develop branch: Each run gets unique group (using run_number) for parallel execution
# Other branches: Shared group with cancel-in-progress to stop old runs when new commits are pushed
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.ref == 'refs/heads/develop' && github.run_number || 'branch' }}
# Only cancel in-progress jobs or runs for the current workflow - matches against branch & tags
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
@@ -43,10 +42,7 @@ jobs:
os: [heavy]
conan_profile: [gcc, clang]
build_type: [Release, Debug]
container:
[
'{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }',
]
container: ['{ "image": "ghcr.io/xrplf/clio-ci:latest" }']
static: [true]
include:
@@ -62,8 +58,6 @@ jobs:
container: ${{ matrix.container }}
conan_profile: ${{ matrix.conan_profile }}
build_type: ${{ matrix.build_type }}
download_ccache: true
upload_ccache: true
static: ${{ matrix.static }}
run_unit_tests: true
run_integration_tests: false
@@ -75,11 +69,10 @@ jobs:
uses: ./.github/workflows/build_impl.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
conan_profile: gcc
build_type: Debug
download_ccache: true
upload_ccache: false
disable_cache: false
code_coverage: true
static: true
upload_clio_server: false
@@ -88,35 +81,17 @@ jobs:
secrets:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
package:
name: Build packages
uses: ./.github/workflows/build_impl.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
conan_profile: gcc
build_type: Release
download_ccache: true
upload_ccache: false
code_coverage: false
static: true
upload_clio_server: false
package: true
targets: package
analyze_build_time: false
check_config:
name: Check Config Description
needs: build-and-test
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
image: ghcr.io/xrplf/clio-ci:latest
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v5
- uses: actions/download-artifact@v4
with:
name: clio_server_Linux_Release_gcc

View File

@@ -23,14 +23,8 @@ on:
required: true
type: string
download_ccache:
description: Whether to download ccache from the cache
required: false
type: boolean
default: true
upload_ccache:
description: Whether to upload ccache to the cache
disable_cache:
description: Whether ccache should be disabled
required: false
type: boolean
default: false
@@ -63,18 +57,6 @@ on:
type: string
default: all
expected_version:
description: Expected version of the clio_server binary
required: false
type: string
default: ""
package:
description: Whether to generate Debian package
required: false
type: boolean
default: false
jobs:
build:
uses: ./.github/workflows/build_impl.yml
@@ -83,15 +65,12 @@ jobs:
container: ${{ inputs.container }}
conan_profile: ${{ inputs.conan_profile }}
build_type: ${{ inputs.build_type }}
download_ccache: ${{ inputs.download_ccache }}
upload_ccache: ${{ inputs.upload_ccache }}
disable_cache: ${{ inputs.disable_cache }}
code_coverage: false
static: ${{ inputs.static }}
upload_clio_server: ${{ inputs.upload_clio_server }}
targets: ${{ inputs.targets }}
analyze_build_time: false
expected_version: ${{ inputs.expected_version }}
package: ${{ inputs.package }}
test:
needs: build

View File

@@ -48,7 +48,7 @@ jobs:
- name: Download Clio binary from artifact
if: ${{ inputs.artifact_name != null }}
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifact_name }}
path: ./docker/clio/artifact/
@@ -73,8 +73,7 @@ jobs:
elif [[ $artifact == *.tar.gz ]]; then
tar -xvf $artifact
fi
chmod +x ./clio_server
mv ./clio_server ../
mv clio_server ../
cd ../
rm -rf ./artifact
@@ -83,11 +82,6 @@ jobs:
shell: bash
run: strip ./docker/clio/clio_server
- name: Set GHCR_REPO
id: set-ghcr-repo
run: |
echo "GHCR_REPO=$(echo ghcr.io/${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_OUTPUT}
- name: Build Docker image
uses: ./.github/actions/build_docker_image
env:
@@ -96,11 +90,11 @@ jobs:
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
with:
images: |
ghcr.io/${{ steps.set-ghcr-repo.outputs.GHCR_REPO }}/clio
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio' || '' }}
ghcr.io/xrplf/clio
rippleci/clio
push_image: ${{ inputs.publish_image }}
directory: docker/clio
tags: ${{ inputs.tags }}
platforms: linux/amd64
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio' || '' }}
dockerhub_repo: rippleci/clio
dockerhub_description: Clio is an XRP Ledger API server.

View File

@@ -23,17 +23,10 @@ on:
required: true
type: string
download_ccache:
description: Whether to download ccache from the cache
disable_cache:
description: Whether ccache should be disabled
required: false
type: boolean
default: true
upload_ccache:
description: Whether to upload ccache to the cache
required: false
type: boolean
default: false
code_coverage:
description: Whether to enable code coverage
@@ -60,17 +53,6 @@ on:
required: true
type: boolean
expected_version:
description: Expected version of the clio_server binary
required: false
type: string
default: ""
package:
description: Whether to generate Debian package
required: false
type: boolean
secrets:
CODECOV_TOKEN:
required: false
@@ -82,30 +64,26 @@ jobs:
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
steps:
- name: Cleanup workspace
- name: Clean workdir
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
uses: kuznetsss/workspace-cleanup@80b9863b45562c148927c3d53621ef354e5ae7ce # v1.0
- uses: actions/checkout@v4
with:
fetch-depth: 0
# We need to fetch tags to have correct version in the release
# The workaround is based on https://github.com/actions/checkout/issues/1467
fetch-tags: true
ref: ${{ github.ref }}
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
uses: ./.github/actions/prepare_runner
with:
disable_ccache: ${{ !inputs.download_ccache }}
disable_ccache: ${{ inputs.disable_cache }}
- name: Setup conan on macOS
if: ${{ runner.os == 'macOS' }}
if: runner.os == 'macOS'
shell: bash
run: ./.github/scripts/conan/init.sh
- name: Restore cache
if: ${{ inputs.download_ccache }}
if: ${{ !inputs.disable_cache }}
uses: ./.github/actions/restore_cache
id: restore_cache
with:
@@ -114,21 +92,14 @@ jobs:
build_type: ${{ inputs.build_type }}
code_coverage: ${{ inputs.code_coverage }}
- name: Run conan
uses: ./.github/actions/conan
with:
conan_profile: ${{ inputs.conan_profile }}
build_type: ${{ inputs.build_type }}
- name: Run CMake
uses: ./.github/actions/cmake
- name: Run conan and cmake
uses: ./.github/actions/generate
with:
conan_profile: ${{ inputs.conan_profile }}
build_type: ${{ inputs.build_type }}
code_coverage: ${{ inputs.code_coverage }}
static: ${{ inputs.static }}
time_trace: ${{ inputs.analyze_build_time }}
package: ${{ inputs.package }}
- name: Build Clio
uses: ./.github/actions/build_clio
@@ -151,7 +122,7 @@ jobs:
path: build_time_report.txt
- name: Show ccache's statistics
if: ${{ inputs.download_ccache }}
if: ${{ !inputs.disable_cache }}
shell: bash
id: ccache_stats
run: |
@@ -169,35 +140,28 @@ jobs:
run: strip build/clio_integration_tests
- name: Upload clio_server
if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }}
if: inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time
uses: actions/upload-artifact@v4
with:
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_server
- name: Upload clio_tests
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time }}
uses: actions/upload-artifact@v4
with:
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_tests
- name: Upload clio_integration_tests
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time }}
uses: actions/upload-artifact@v4
with:
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/clio_integration_tests
- name: Upload Clio Linux package
if: ${{ inputs.package }}
uses: actions/upload-artifact@v4
with:
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
path: build/*.deb
- name: Save cache
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
if: ${{ !inputs.disable_cache && github.ref == 'refs/heads/develop' }}
uses: ./.github/actions/save_cache
with:
conan_profile: ${{ inputs.conan_profile }}
@@ -210,6 +174,7 @@ jobs:
# This is run as part of the build job, because it requires the following:
# - source code
# - generated source code (Build.cpp)
# - conan packages
# - .gcno files in build directory
#
@@ -218,18 +183,6 @@ jobs:
if: ${{ inputs.code_coverage }}
uses: ./.github/actions/code_coverage
- name: Verify expected version
if: ${{ inputs.expected_version != '' }}
shell: bash
run: |
set -e
EXPECTED_VERSION="clio-${{ inputs.expected_version }}"
actual_version=$(./build/clio_server --version)
if [[ "$actual_version" != "$EXPECTED_VERSION" ]]; then
echo "Expected version '$EXPECTED_VERSION', but got '$actual_version'"
exit 1
fi
# `codecov/codecov-action` will rerun `gcov` if it's available and build directory is present
# To prevent this from happening, we run this action in a separate workflow
#

View File

@@ -17,36 +17,31 @@ jobs:
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
image: ghcr.io/xrplf/clio-ci:latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
with:
disable_ccache: true
- name: Update libXRPL version requirement
shell: bash
run: |
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.conan_ref }}'|g" conanfile.py
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.version }}'|g" conanfile.py
rm -f conanfile.py.bak
- name: Update conan lockfile
shell: bash
run: |
conan lock create . --profile:all ${{ env.CONAN_PROFILE }}
conan lock create . -o '&:tests=True' -o '&:benchmark=True'
- name: Run conan
uses: ./.github/actions/conan
- name: Prepare runner
uses: ./.github/actions/prepare_runner
with:
conan_profile: ${{ env.CONAN_PROFILE }}
disable_ccache: true
- name: Run CMake
uses: ./.github/actions/cmake
- name: Run conan and cmake
uses: ./.github/actions/generate
with:
conan_profile: ${{ env.CONAN_PROFILE }}
@@ -67,10 +62,10 @@ jobs:
needs: build
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
image: ghcr.io/xrplf/clio-ci:latest
steps:
- uses: actions/download-artifact@v5
- uses: actions/download-artifact@v4
with:
name: clio_tests_check_libxrpl
@@ -100,7 +95,6 @@ jobs:
labels: "compatibility,bug"
title: "Proposed libXRPL check failed"
body: >
Clio build or tests failed against `libXRPL ${{ github.event.client_payload.conan_ref }}`.
Clio build or tests failed against `libXRPL ${{ github.event.client_payload.version }}`.
PR: ${{ github.event.client_payload.pr_url }}
Workflow run: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # v1.4.2
- uses: ytanikin/pr-conventional-commits@8267db1bacc237419f9ed0228bb9d94e94271a1d # v1.4.1
with:
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
add_label: false

View File

@@ -1,8 +1,6 @@
name: Clang-tidy check
on:
push:
branches: [develop]
schedule:
- cron: "0 9 * * 1-5"
workflow_dispatch:
@@ -24,10 +22,9 @@ env:
jobs:
clang_tidy:
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
image: ghcr.io/xrplf/clio-ci:latest
permissions:
contents: write
@@ -40,7 +37,7 @@ jobs:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
uses: ./.github/actions/prepare_runner
with:
disable_ccache: true
@@ -51,13 +48,8 @@ jobs:
conan_profile: ${{ env.CONAN_PROFILE }}
ccache_dir: ${{ env.CCACHE_DIR }}
- name: Run conan
uses: ./.github/actions/conan
with:
conan_profile: ${{ env.CONAN_PROFILE }}
- name: Run CMake
uses: ./.github/actions/cmake
- name: Run conan and cmake
uses: ./.github/actions/generate
with:
conan_profile: ${{ env.CONAN_PROFILE }}

View File

@@ -0,0 +1,30 @@
name: Restart clang-tidy workflow
on:
push:
branches: [develop]
workflow_dispatch:
jobs:
restart_clang_tidy:
runs-on: ubuntu-latest
permissions:
actions: write
steps:
- uses: actions/checkout@v4
- name: Check last commit matches clang-tidy auto fixes
id: check
shell: bash
run: |
passed=$(if [[ "$(git log -1 --pretty=format:%s | grep 'style: clang-tidy auto fixes')" ]]; then echo 'true' ; else echo 'false' ; fi)
echo "passed=\"$passed\"" >> $GITHUB_OUTPUT
- name: Run clang-tidy workflow
if: ${{ contains(steps.check.outputs.passed, 'true') }}
shell: bash
env:
GH_TOKEN: ${{ github.token }}
GH_REPO: ${{ github.repository }}
run: gh workflow run clang-tidy.yml

View File

@@ -14,7 +14,7 @@ jobs:
build:
runs-on: ubuntu-latest
container:
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
image: ghcr.io/xrplf/clio-ci:latest
steps:
- name: Checkout
@@ -23,7 +23,7 @@ jobs:
lfs: true
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
uses: ./.github/actions/prepare_runner
with:
disable_ccache: true
@@ -42,7 +42,7 @@ jobs:
uses: actions/configure-pages@v5
- name: Upload artifact
uses: actions/upload-pages-artifact@v4
uses: actions/upload-pages-artifact@v3
with:
path: build_docs/html
name: docs-develop

View File

@@ -39,17 +39,17 @@ jobs:
conan_profile: gcc
build_type: Release
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
- os: heavy
conan_profile: gcc
build_type: Debug
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
- os: heavy
conan_profile: gcc.ubsan
build_type: Release
static: false
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
uses: ./.github/workflows/build_and_test.yml
with:
@@ -61,8 +61,7 @@ jobs:
run_unit_tests: true
run_integration_tests: true
upload_clio_server: true
download_ccache: false
upload_ccache: false
disable_cache: true
analyze_build_time:
name: Analyze Build Time
@@ -73,7 +72,7 @@ jobs:
include:
- os: heavy
conan_profile: clang
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
static: true
- os: macos15
conan_profile: apple-clang
@@ -85,8 +84,7 @@ jobs:
container: ${{ matrix.container }}
conan_profile: ${{ matrix.conan_profile }}
build_type: Release
download_ccache: false
upload_ccache: false
disable_cache: true
code_coverage: false
static: ${{ matrix.static }}
upload_clio_server: false
@@ -102,6 +100,8 @@ jobs:
title: "Clio development (nightly) build"
version: nightly
header: >
# Release notes
> **Note:** Please remember that this is a development release and it is not recommended for production use.
Changelog (including previous releases): <https://github.com/XRPLF/clio/commits/nightly>

View File

@@ -4,19 +4,47 @@ on:
# every first day of the month
schedule:
- cron: "0 0 1 * *"
pull_request:
branches: [release/*, develop]
paths:
- ".pre-commit-config.yaml"
# on demand
workflow_dispatch:
jobs:
auto-update:
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@afbcbdafbe0ce5439492fb87eda6441371086386
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
sign_commit: true
committer: "Clio CI <skuznetsov@ripple.com>"
python-version: 3.x
- run: pip install pre-commit
- run: pre-commit autoupdate --freeze
- run: pre-commit run --all-files || true
- uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0
if: github.event_name != 'pull_request'
with:
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
git_user_signingkey: true
git_commit_gpgsign: true
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
if: always()
env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}
with:
commit-message: "style: Update pre-commit hooks"
committer: Clio CI <skuznetsov@ripple.com>
branch: update/pre-commit-hooks
branch-suffix: timestamp
delete-branch: true
title: "style: Update pre-commit hooks"
body: Update versions of pre-commit hooks to latest version.
reviewers: "godexsoft,kuznetsss,PeterChen13579,mathbunnyru"
secrets:
GPG_PRIVATE_KEY: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
GPG_PASSPHRASE: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}

View File

@@ -8,7 +8,20 @@ on:
jobs:
run-hooks:
uses: XRPLF/actions/.github/workflows/pre-commit.yml@afbcbdafbe0ce5439492fb87eda6441371086386
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:latest
steps:
- name: Checkout Repo ⚡️
uses: actions/checkout@v4
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
fetch-depth: 0
- name: Prepare runner
uses: ./.github/actions/prepare_runner
with:
disable_ccache: true
- name: Run pre-commit ✅
run: pre-commit run --all-files

View File

@@ -29,7 +29,7 @@ jobs:
conan_profile: gcc
build_type: Release
static: true
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
uses: ./.github/workflows/build_and_test.yml
with:
@@ -41,9 +41,7 @@ jobs:
run_unit_tests: true
run_integration_tests: true
upload_clio_server: true
download_ccache: false
upload_ccache: false
expected_version: ${{ github.event_name == 'push' && github.ref_name || '' }}
disable_cache: true
release:
needs: build-and-test
@@ -54,6 +52,6 @@ jobs:
title: "${{ github.ref_name}}"
version: "${{ github.ref_name }}"
header: >
${{ contains(github.ref_name, '-') && '> **Note:** Please remember that this is a release candidate and it is not recommended for production use.' || '' }}
generate_changelog: ${{ !contains(github.ref_name, '-') }}
# Introducing Clio version ${{ github.ref_name }}
generate_changelog: true
draft: true

View File

@@ -42,7 +42,7 @@ jobs:
release:
runs-on: heavy
container:
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
image: ghcr.io/xrplf/clio-ci:latest
env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}
@@ -56,11 +56,11 @@ jobs:
fetch-depth: 0
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
uses: ./.github/actions/prepare_runner
with:
disable_ccache: true
- uses: actions/download-artifact@v5
- uses: actions/download-artifact@v4
with:
path: release_artifacts
pattern: clio_server_*
@@ -68,24 +68,34 @@ jobs:
- name: Create release notes
shell: bash
run: |
echo "# Release notes" > "${RUNNER_TEMP}/release_notes.md"
echo "" >> "${RUNNER_TEMP}/release_notes.md"
printf '%s\n' "${{ inputs.header }}" >> "${RUNNER_TEMP}/release_notes.md"
printf '%s\n' "${{ inputs.header }}" > "${RUNNER_TEMP}/release_notes.md"
- name: Generate changelog
shell: bash
if: ${{ inputs.generate_changelog }}
run: |
LAST_TAG="$(gh release view --json tagName -q .tagName --repo XRPLF/clio)"
LAST_TAG="$(gh release view --json tagName -q .tagName)"
LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)"
BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)"
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b|-rc"
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b"
cat CHANGELOG.md >> "${RUNNER_TEMP}/release_notes.md"
- name: Prepare release artifacts
shell: bash
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
- name: Append sha256 checksums
shell: bash
working-directory: release_artifacts
run: |
{
echo '## SHA256 checksums'
echo
echo '```'
cat *.sha256sum
echo '```'
} >> "${RUNNER_TEMP}/release_notes.md"
- name: Upload release notes
uses: actions/upload-artifact@v4
with:

View File

@@ -44,9 +44,8 @@ jobs:
uses: ./.github/workflows/build_and_test.yml
with:
runs_on: heavy
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
download_ccache: false
upload_ccache: false
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
disable_cache: true
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
build_type: ${{ matrix.build_type }}
static: false

View File

@@ -39,22 +39,22 @@ jobs:
runs-on: ${{ inputs.runs_on }}
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
if: ${{ inputs.run_unit_tests }}
if: inputs.run_unit_tests
env:
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || inputs.conan_profile == 'clang.asan' || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.asan') || endsWith(inputs.conan_profile, '.tsan') }}
steps:
- name: Cleanup workspace
- name: Clean workdir
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
uses: kuznetsss/workspace-cleanup@80b9863b45562c148927c3d53621ef354e5ae7ce # v1.0
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/download-artifact@v5
- uses: actions/download-artifact@v4
with:
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
@@ -63,15 +63,15 @@ jobs:
run: chmod +x ./clio_tests
- name: Run clio_tests (regular)
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'false' }}
if: env.SANITIZER_IGNORE_ERRORS == 'false'
run: ./clio_tests
- name: Run clio_tests (sanitizer errors ignored)
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
if: env.SANITIZER_IGNORE_ERRORS == 'true'
run: ./.github/scripts/execute-tests-under-sanitizer ./clio_tests
- name: Check for sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
if: env.SANITIZER_IGNORE_ERRORS == 'true'
shell: bash
id: check_report
run: |
@@ -82,7 +82,7 @@ jobs:
fi
- name: Upload sanitizer report
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
if: env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true'
uses: actions/upload-artifact@v4
with:
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
@@ -90,7 +90,7 @@ jobs:
include-hidden-files: true
- name: Create an issue
if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
if: false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true'
uses: ./.github/actions/create_issue
env:
GH_TOKEN: ${{ github.token }}
@@ -108,7 +108,7 @@ jobs:
runs-on: ${{ inputs.runs_on }}
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
if: ${{ inputs.run_integration_tests }}
if: inputs.run_integration_tests
services:
scylladb:
@@ -120,9 +120,9 @@ jobs:
--health-retries 5
steps:
- name: Cleanup workspace
- name: Clean workdir
if: ${{ runner.os == 'macOS' }}
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
uses: kuznetsss/workspace-cleanup@80b9863b45562c148927c3d53621ef354e5ae7ce # v1.0
- name: Spin up scylladb
if: ${{ runner.os == 'macOS' }}
@@ -144,7 +144,7 @@ jobs:
sleep 5
done
- uses: actions/download-artifact@v5
- uses: actions/download-artifact@v4
with:
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}

View File

@@ -30,8 +30,8 @@ concurrency:
env:
CLANG_MAJOR_VERSION: 19
GCC_MAJOR_VERSION: 15
GCC_VERSION: 15.2.0
GCC_MAJOR_VERSION: 14
GCC_VERSION: 14.3.0
jobs:
repo:
@@ -56,12 +56,12 @@ jobs:
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/compilers/gcc/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
@@ -69,7 +69,7 @@ jobs:
with:
images: |
${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_gcc' || '' }}
rippleci/clio_gcc
push_image: ${{ github.event_name != 'pull_request' }}
directory: docker/compilers/gcc
tags: |
@@ -81,7 +81,7 @@ jobs:
build_args: |
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
GCC_VERSION=${{ env.GCC_VERSION }}
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_gcc' || '' }}
dockerhub_repo: rippleci/clio_gcc
dockerhub_description: GCC compiler for XRPLF/clio.
gcc-arm64:
@@ -94,12 +94,12 @@ jobs:
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/compilers/gcc/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
@@ -107,7 +107,7 @@ jobs:
with:
images: |
${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_gcc' || '' }}
rippleci/clio_gcc
push_image: ${{ github.event_name != 'pull_request' }}
directory: docker/compilers/gcc
tags: |
@@ -119,7 +119,7 @@ jobs:
build_args: |
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
GCC_VERSION=${{ env.GCC_VERSION }}
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_gcc' || '' }}
dockerhub_repo: rippleci/clio_gcc
dockerhub_description: GCC compiler for XRPLF/clio.
gcc-merge:
@@ -132,7 +132,7 @@ jobs:
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/compilers/gcc/**"
@@ -140,26 +140,24 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to DockerHub
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_PW }}
- name: Create and push multi-arch manifest
if: ${{ github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true' }}
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
run: |
push_image() {
image=$1
for image in ${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc rippleci/clio_gcc; do
docker buildx imagetools create \
-t $image:latest \
-t $image:${{ env.GCC_MAJOR_VERSION }} \
@@ -167,11 +165,7 @@ jobs:
-t $image:${{ github.sha }} \
$image:arm64-latest \
$image:amd64-latest
}
push_image ${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc
if [[ ${{ github.repository_owner }} == 'XRPLF' ]]; then
push_image rippleci/clio_clang
fi
done
clang:
name: Build and push Clang docker image
@@ -183,12 +177,12 @@ jobs:
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/compilers/clang/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
@@ -196,7 +190,7 @@ jobs:
with:
images: |
${{ needs.repo.outputs.GHCR_REPO }}/clio-clang
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_clang' || '' }}
rippleci/clio_clang
push_image: ${{ github.event_name != 'pull_request' }}
directory: docker/compilers/clang
tags: |
@@ -206,7 +200,7 @@ jobs:
platforms: linux/amd64,linux/arm64
build_args: |
CLANG_MAJOR_VERSION=${{ env.CLANG_MAJOR_VERSION }}
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_clang' || '' }}
dockerhub_repo: rippleci/clio_clang
dockerhub_description: Clang compiler for XRPLF/clio.
tools-amd64:
@@ -219,12 +213,12 @@ jobs:
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/tools/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -250,12 +244,12 @@ jobs:
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/tools/**"
- uses: ./.github/actions/build_docker_image
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
if: steps.changed-files.outputs.any_changed == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -281,7 +275,7 @@ jobs:
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
with:
files: "docker/tools/**"
@@ -289,15 +283,15 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create and push multi-arch manifest
if: ${{ github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true' }}
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
run: |
image=${{ needs.repo.outputs.GHCR_REPO }}/clio-tools
docker buildx imagetools create \
@@ -321,7 +315,7 @@ jobs:
with:
images: |
${{ needs.repo.outputs.GHCR_REPO }}/clio-ci
${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_ci' || '' }}
rippleci/clio_ci
push_image: ${{ github.event_name != 'pull_request' }}
directory: docker/ci
tags: |
@@ -334,5 +328,5 @@ jobs:
CLANG_MAJOR_VERSION=${{ env.CLANG_MAJOR_VERSION }}
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
GCC_VERSION=${{ env.GCC_VERSION }}
dockerhub_repo: ${{ github.repository_owner == 'XRPLF' && 'rippleci/clio_ci' || '' }}
dockerhub_repo: rippleci/clio_ci
dockerhub_description: CI image for XRPLF/clio.

View File

@@ -10,18 +10,15 @@ on:
required: false
default: false
type: boolean
force_upload:
description: "Force upload of all dependencies"
required: false
default: false
type: boolean
pull_request:
branches: [develop]
paths:
- .github/workflows/upload_conan_deps.yml
- .github/actions/conan/action.yml
- .github/actions/generate/action.yml
- .github/actions/prepare_runner/action.yml
- ".github/scripts/conan/**"
- "!.github/scripts/conan/apple-clang-local.profile"
- conanfile.py
- conan.lock
@@ -30,8 +27,10 @@ on:
paths:
- .github/workflows/upload_conan_deps.yml
- .github/actions/conan/action.yml
- .github/actions/generate/action.yml
- .github/actions/prepare_runner/action.yml
- ".github/scripts/conan/**"
- "!.github/scripts/conan/apple-clang-local.profile"
- conanfile.py
- conan.lock
@@ -60,7 +59,6 @@ jobs:
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
max-parallel: 10
runs-on: ${{ matrix.os }}
container: ${{ matrix.container != '' && fromJson(matrix.container) || null }}
@@ -72,20 +70,20 @@ jobs:
- uses: actions/checkout@v4
- name: Prepare runner
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
uses: ./.github/actions/prepare_runner
with:
disable_ccache: true
- name: Setup conan on macOS
if: ${{ runner.os == 'macOS' }}
if: runner.os == 'macOS'
shell: bash
run: ./.github/scripts/conan/init.sh
- name: Show conan profile
run: conan profile show --profile:all ${{ env.CONAN_PROFILE }}
- name: Run conan
uses: ./.github/actions/conan
- name: Run conan and cmake
uses: ./.github/actions/generate
with:
conan_profile: ${{ env.CONAN_PROFILE }}
# We check that everything builds fine from source on scheduled runs
@@ -94,9 +92,9 @@ jobs:
build_type: ${{ matrix.build_type }}
- name: Login to Conan
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} xrplf ${{ secrets.CONAN_USERNAME }}
if: github.event_name != 'pull_request'
run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ripple ${{ secrets.CONAN_USERNAME }}
- name: Upload Conan packages
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
if: github.event_name != 'pull_request' && github.event_name != 'schedule'
run: conan upload "*" -r=ripple --confirm

View File

@@ -18,14 +18,14 @@ jobs:
fetch-depth: 0
- name: Download report artifact
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
name: coverage-report.xml
path: build
- name: Upload coverage report
if: ${{ hashFiles('build/coverage_report.xml') != '' }}
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
with:
files: build/coverage_report.xml
fail_ci_if_error: true

1
.gitignore vendored
View File

@@ -9,3 +9,4 @@
.sanitizer-report
CMakeUserPresets.json
config.json
src/util/build/Build.cpp

View File

@@ -16,7 +16,7 @@ exclude: ^(docs/doxygen-awesome-theme/|conan\.lock$)
repos:
# `pre-commit sample-config` default hooks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0
rev: cef0300fd0fc4d2a87a85fa2093c6b283ea36f4b # frozen: v5.0.0
hooks:
- id: check-added-large-files
- id: check-executables-have-shebangs
@@ -37,13 +37,13 @@ repos:
exclude: LICENSE.md
- repo: https://github.com/hadolint/hadolint
rev: 4e697ba704fd23b2409b947a319c19c3ee54d24f # frozen: v2.14.0
rev: c3dc18df7a501f02a560a2cc7ba3c69a85ca01d3 # frozen: v2.13.1-beta
hooks:
- id: hadolint-docker
# hadolint-docker is a special hook that runs hadolint in a Docker container
# Docker is not installed in the environment where pre-commit is run
stages: [manual]
entry: hadolint/hadolint:v2.14 hadolint
entry: hadolint/hadolint:v2.12.1-beta hadolint
- repo: https://github.com/codespell-project/codespell
rev: 63c8f8312b7559622c0d82815639671ae42132ac # frozen: v2.4.1
@@ -55,6 +55,12 @@ repos:
--ignore-words=pre-commit-hooks/codespell_ignore.txt,
]
- repo: https://github.com/trufflesecurity/trufflehog
rev: 6641d4ba5b684fffe195b9820345de1bf19f3181 # frozen: v3.89.2
hooks:
- id: trufflehog
entry: trufflehog git file://. --since-commit HEAD --no-verification --fail
# Running some C++ hooks before clang-format
# to ensure that the style is consistent.
- repo: local
@@ -80,7 +86,7 @@ repos:
language: script
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 719856d56a62953b8d2839fb9e851f25c3cfeef8 # frozen: v21.1.2
rev: 6b9072cd80691b1b48d80046d884409fb1d962d1 # frozen: v20.1.7
hooks:
- id: clang-format
args: [--style=file]

View File

@@ -1,5 +1,7 @@
cmake_minimum_required(VERSION 3.20)
set(CMAKE_PROJECT_INCLUDE_BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/cmake/ClioVersion.cmake)
project(clio VERSION ${CLIO_VERSION} HOMEPAGE_URL "https://github.com/XRPLF/clio"
DESCRIPTION "An XRP Ledger API Server"
)
@@ -11,7 +13,7 @@ option(integration_tests "Build integration tests" FALSE)
option(benchmark "Build benchmarks" FALSE)
option(docs "Generate doxygen docs" FALSE)
option(coverage "Build test coverage report" FALSE)
option(package "Create distribution packages" FALSE)
option(packaging "Create distribution packages" FALSE)
option(lint "Run clang-tidy checks during compilation" FALSE)
option(static "Statically linked Clio" FALSE)
option(snapshot "Build snapshot tool" FALSE)
@@ -29,7 +31,6 @@ set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
include(Ccache)
include(CheckCXXCompilerFlag)
include(ClangTidy)
include(Linker)
add_library(clio_options INTERFACE)
target_compile_features(clio_options INTERFACE cxx_std_23) # Clio needs c++23 but deps can remain c++20 for now
@@ -39,6 +40,11 @@ if (verbose)
set(CMAKE_VERBOSE_MAKEFILE TRUE)
endif ()
if (packaging)
add_definitions(-DPKG=1)
target_compile_definitions(clio_options INTERFACE PKG=1)
endif ()
# Clio tweaks and checks
include(CheckCompiler)
include(Settings)
@@ -52,7 +58,6 @@ include(deps/Threads)
include(deps/libfmt)
include(deps/cassandra)
include(deps/libbacktrace)
include(deps/spdlog)
add_subdirectory(src)
add_subdirectory(tests)
@@ -88,8 +93,8 @@ if (docs)
endif ()
include(install/install)
if (package)
include(ClioPackage)
if (packaging)
include(cmake/packaging.cmake) # This file exists only in build runner
endif ()
if (snapshot)

View File

@@ -7,12 +7,10 @@ target_sources(
Playground.cpp
# ExecutionContext
util/async/ExecutionContextBenchmarks.cpp
# Logger
util/log/LoggerBenchmark.cpp
)
include(deps/gbench)
target_include_directories(clio_benchmark PRIVATE .)
target_link_libraries(clio_benchmark PUBLIC clio_util benchmark::benchmark_main spdlog::spdlog)
target_link_libraries(clio_benchmark PUBLIC clio_etl benchmark::benchmark_main)
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,149 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "util/config/ConfigDefinition.hpp"
#include "util/log/Logger.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <benchmark/benchmark.h>
#include <fmt/format.h>
#include <spdlog/async.h>
#include <spdlog/async_logger.h>
#include <spdlog/spdlog.h>
#include <barrier>
#include <chrono>
#include <cstddef>
#include <filesystem>
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include <vector>
using namespace util;
static constexpr auto kLOG_FORMAT = "%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v";
struct BenchmarkLoggingInitializer {
[[nodiscard]] static std::shared_ptr<spdlog::sinks::sink>
createFileSink(LogService::FileLoggingParams const& params)
{
return LogService::createFileSink(params, kLOG_FORMAT);
}
static Logger
getLogger(std::shared_ptr<spdlog::logger> logger)
{
return Logger(std::move(logger));
}
};
namespace {
std::string
uniqueLogDir()
{
auto const epochTime = std::chrono::high_resolution_clock::now().time_since_epoch();
auto const tmpDir = std::filesystem::temp_directory_path();
std::string const dirName =
fmt::format("logs_{}", std::chrono::duration_cast<std::chrono::microseconds>(epochTime).count());
return tmpDir / "clio_benchmark" / dirName;
}
} // anonymous namespace
static void
benchmarkConcurrentFileLogging(benchmark::State& state)
{
auto const numThreads = static_cast<size_t>(state.range(0));
auto const messagesPerThread = static_cast<size_t>(state.range(1));
PrometheusService::init(config::getClioConfig());
auto const logDir = uniqueLogDir();
for (auto _ : state) {
state.PauseTiming();
std::filesystem::create_directories(logDir);
static constexpr size_t kQUEUE_SIZE = 8192;
static constexpr size_t kTHREAD_COUNT = 1;
spdlog::init_thread_pool(kQUEUE_SIZE, kTHREAD_COUNT);
auto fileSink = BenchmarkLoggingInitializer::createFileSink({
.logDir = logDir,
.rotationSizeMB = 5,
.dirMaxFiles = 25,
});
std::vector<std::thread> threads;
threads.reserve(numThreads);
std::chrono::high_resolution_clock::time_point start;
std::barrier barrier(numThreads, [&state, &start]() {
state.ResumeTiming();
start = std::chrono::high_resolution_clock::now();
});
for (size_t threadNum = 0; threadNum < numThreads; ++threadNum) {
threads.emplace_back([threadNum, messagesPerThread, fileSink, &barrier]() {
std::string const channel = fmt::format("Thread_{}", threadNum);
auto logger = std::make_shared<spdlog::async_logger>(
channel, fileSink, spdlog::thread_pool(), spdlog::async_overflow_policy::block
);
spdlog::register_logger(logger);
Logger const threadLogger = BenchmarkLoggingInitializer::getLogger(std::move(logger));
barrier.arrive_and_wait();
for (size_t messageNum = 0; messageNum < messagesPerThread; ++messageNum) {
LOG(threadLogger.info()) << "Test log message #" << messageNum;
}
});
}
for (auto& thread : threads) {
thread.join();
}
spdlog::shutdown();
auto const end = std::chrono::high_resolution_clock::now();
state.SetIterationTime(std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count());
std::filesystem::remove_all(logDir);
}
auto const totalMessages = numThreads * messagesPerThread;
state.counters["TotalMessagesRate"] = benchmark::Counter(totalMessages, benchmark::Counter::kIsRate);
state.counters["Threads"] = numThreads;
state.counters["MessagesPerThread"] = messagesPerThread;
}
// One line of log message is around 110 bytes
// So, 100K messages is around 10.5MB
BENCHMARK(benchmarkConcurrentFileLogging)
->ArgsProduct({
// Number of threads
{1, 2, 4, 8},
// Messages per thread
{10'000, 100'000, 500'000, 1'000'000, 10'000'000},
})
->UseManualTime()
->Unit(benchmark::kMillisecond);

View File

@@ -23,15 +23,12 @@
namespace util::build {
#ifndef CLIO_VERSION
#error "CLIO_VERSION must be defined"
#endif
static constexpr char kVERSION_STRING[] = CLIO_VERSION;
static constexpr char versionString[] = "@CLIO_VERSION@"; // NOLINT(readability-identifier-naming)
std::string const&
getClioVersionString()
{
static std::string const value = kVERSION_STRING; // NOLINT(readability-identifier-naming)
static std::string const value = versionString; // NOLINT(readability-identifier-naming)
return value;
}

View File

@@ -1,8 +0,0 @@
include("${CMAKE_CURRENT_LIST_DIR}/ClioVersion.cmake")
set(CPACK_PACKAGING_INSTALL_PREFIX "/opt/clio")
set(CPACK_PACKAGE_VERSION "${CLIO_VERSION}")
set(CPACK_STRIP_FILES TRUE)
include(pkg/deb)
include(CPack)

View File

@@ -1,3 +1,7 @@
#[===================================================================[
write version to source
#]===================================================================]
find_package(Git REQUIRED)
set(GIT_COMMAND describe --tags --exact-match)
@@ -6,17 +10,15 @@ execute_process(
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE TAG
RESULT_VARIABLE RC
ERROR_VARIABLE ERR
OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if (RC EQUAL 0)
message(STATUS "Found tag '${TAG}' in git. Will use it as Clio version")
# if we are on a tag, use the tag name
set(CLIO_VERSION "${TAG}")
set(DOC_CLIO_VERSION "${TAG}")
else ()
message(STATUS "Error finding tag in git: ${ERR}")
message(STATUS "Will use 'YYYYMMDDHMS-<branch>-<git-rev>' as Clio version")
# if not, use YYYYMMDDHMS-<branch>-<git-rev>
set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd)
execute_process(
@@ -45,3 +47,5 @@ if (CMAKE_BUILD_TYPE MATCHES Debug)
endif ()
message(STATUS "Build version: ${CLIO_VERSION}")
configure_file(${CMAKE_CURRENT_LIST_DIR}/Build.cpp.in ${CMAKE_CURRENT_LIST_DIR}/../src/util/build/Build.cpp)

View File

@@ -1,11 +0,0 @@
if (DEFINED CMAKE_LINKER_TYPE)
message(STATUS "Custom linker is already set: ${CMAKE_LINKER_TYPE}")
return()
endif ()
find_program(MOLD_PATH mold)
if (MOLD_PATH AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
message(STATUS "Using Mold linker: ${MOLD_PATH}")
set(CMAKE_LINKER_TYPE MOLD)
endif ()

View File

@@ -1,5 +0,0 @@
find_package(spdlog REQUIRED)
if (NOT TARGET spdlog::spdlog)
message(FATAL_ERROR "spdlog::spdlog target not found")
endif ()

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Clio XRPL API server
Documentation=https://github.com/XRPLF/clio.git
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json
Restart=on-failure
User=clio
Group=clio
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -1,13 +1,13 @@
set(CLIO_INSTALL_DIR "/opt/clio")
set(CMAKE_INSTALL_PREFIX "${CLIO_INSTALL_DIR}" CACHE PATH "Install prefix" FORCE)
set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}")
include(GNUInstallDirs)
install(TARGETS clio_server DESTINATION "${CMAKE_INSTALL_BINDIR}")
install(TARGETS clio_server DESTINATION bin)
file(READ docs/examples/config/example-config.json config)
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
configure_file("${CMAKE_SOURCE_DIR}/cmake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)

View File

@@ -1,12 +0,0 @@
set(CPACK_GENERATOR "DEB")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/XRPLF/clio")
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Ripple Labs Inc. <support@ripple.com>")
set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT)
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA ${CMAKE_SOURCE_DIR}/cmake/pkg/postinst)
# We must replace "-" with "~" otherwise dpkg will sort "X.Y.Z-b1" as greater than "X.Y.Z"
string(REPLACE "-" "~" git "${CPACK_PACKAGE_VERSION}")

View File

@@ -1,46 +0,0 @@
#!/bin/sh
set -e
USER_NAME=clio
GROUP_NAME="${USER_NAME}"
CLIO_EXECUTABLE="clio_server"
CLIO_PREFIX="/opt/clio"
CLIO_BIN="$CLIO_PREFIX/bin/${CLIO_EXECUTABLE}"
CLIO_CONFIG="$CLIO_PREFIX/etc/config.json"
case "$1" in
configure)
if ! id -u "$USER_NAME" >/dev/null 2>&1; then
# Users who should not have a home directory should have their home directory set to /nonexistent
# https://www.debian.org/doc/debian-policy/ch-opersys.html#non-existent-home-directories
useradd \
--system \
--home-dir /nonexistent \
--no-create-home \
--shell /usr/sbin/nologin \
--comment "system user for ${CLIO_EXECUTABLE}" \
--user-group \
${USER_NAME}
fi
install -d -o "$USER_NAME" -g "$GROUP_NAME" /var/log/clio
if [ -f "$CLIO_CONFIG" ]; then
chown "$USER_NAME:$GROUP_NAME" "$CLIO_CONFIG"
fi
chown -R "$USER_NAME:$GROUP_NAME" "$CLIO_PREFIX"
ln -sf "$CLIO_BIN" "/usr/bin/${CLIO_EXECUTABLE}"
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
exit 0

View File

@@ -1,49 +1,48 @@
{
"version": "0.5",
"requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
"xrpl/2.6.1#973af2bf9631f239941dd9f5a100bb84%1759275059.342",
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
"spdlog/1.15.3#3ca0e9e6b83af4d0151e26541d140c86%1754401846.61",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1754325007.656",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"openssl/1.1.1w#a8f0792d7c5121b954578a7149d23e03%1756223730.729",
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1754325004.374",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
"libuv/1.46.0#dc28c1f653fa197f00db5b577a6f6011%1754325003.592",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1754325001.385",
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1754325000.842",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
"fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1754324999.086",
"doctest/2.4.11#a4211dfc329a16ba9f280f9574025659%1756234220.819",
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1754324997.363",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368%1754325043.336",
"benchmark/1.9.4#ce4403f7a24d3e1f907cd9da4b678be4%1754578869.672",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1756234220.907"
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1752006674.465",
"xxhash/0.8.2#7856c968c985b2981b707ee8f2413b2b%1752006674.334",
"xrpl/2.5.0#7880d1696f11fceb1d498570f1a184c8%1752006708.218",
"sqlite3/3.47.0#7a0904fd061f5f8a2366c294f9387830%1752006674.338",
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1752006674.465",
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1752006674.077",
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1752006673.227",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1752006673.172",
"openssl/1.1.1v#216374e4fb5b2e0f5ab1fb6f27b5b434%1752006673.069",
"nudb/2.0.8#63990d3e517038e04bf529eb8167f69f%1752006673.862",
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1752006672.983",
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1752006672.825",
"libuv/1.46.0#78565d142ac7102776256328a26cdf60%1752006672.827",
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1752006672.826",
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1752006672.822",
"libarchive/3.7.6#e0453864b2a4d225f06b3304903cb2b7%1752006672.917",
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1752006672.658",
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1752006671.555",
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1752006671.777",
"fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1752006671.557",
"date/3.0.3#cf28fe9c0aab99fe12da08aa42df65e1%1752006671.553",
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1752006671.654",
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1752006671.554",
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1752006671.549",
"boost/1.83.0#5bcb2a14a35875e328bf312e080d3562%1752006671.557",
"benchmark/1.8.3#1a2ce62c99e2b3feaa57b1f0c15a8c46%1752006671.408",
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1752006671.555"
],
"build_requires": [
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
"cmake/3.31.8#dde3bde00bb843687e55aea5afa0e220%1756234232.89",
"b2/5.3.3#107c15377719889654eb9a162a673975%1756234226.28"
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1752006674.465",
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1752006673.172",
"protobuf/3.21.9#64ce20e1d9ea24f3d6c504015d5f6fa8%1752006673.173",
"cmake/3.31.7#57c3e118bcf267552c0ea3f8bee1e7d5%1752006671.64",
"b2/5.3.2#7b5fabfe7088ae933fb3e78302343ea0%1752006671.407"
],
"python_requires": [],
"overrides": {
"boost/1.83.0": [
null,
"boost/1.83.0#5d975011d65b51abb2d2f6eb8386b368"
"boost/1.83.0#5bcb2a14a35875e328bf312e080d3562"
],
"protobuf/3.21.12": [
"protobuf/3.21.9": [
null,
"protobuf/3.21.12"
],
@@ -51,7 +50,7 @@
"lz4/1.10.0"
],
"sqlite3/3.44.2": [
"sqlite3/3.49.1"
"sqlite3/3.47.0"
]
},
"config_requires": []

View File

@@ -9,7 +9,19 @@ class ClioConan(ConanFile):
url = 'https://github.com/xrplf/clio'
description = 'Clio RPC server'
settings = 'os', 'compiler', 'build_type', 'arch'
options = {}
options = {
'static': [True, False], # static linkage
'verbose': [True, False],
'tests': [True, False], # build unit tests; create `clio_tests` binary
'integration_tests': [True, False], # build integration tests; create `clio_integration_tests` binary
'benchmark': [True, False], # build benchmarks; create `clio_benchmarks` binary
'docs': [True, False], # doxygen API docs; create custom target 'docs'
'packaging': [True, False], # create distribution packages
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
'lint': [True, False], # run clang-tidy checks during compilation
'snapshot': [True, False], # build export/import snapshot tool
'time_trace': [True, False] # build using -ftime-trace to create compiler trace reports
}
requires = [
'boost/1.83.0',
@@ -17,14 +29,25 @@ class ClioConan(ConanFile):
'fmt/11.2.0',
'protobuf/3.21.12',
'grpc/1.50.1',
'openssl/1.1.1w',
'xrpl/2.6.1',
'openssl/1.1.1v',
'xrpl/2.5.0',
'zlib/1.3.1',
'libbacktrace/cci.20210118',
'spdlog/1.15.3',
'libbacktrace/cci.20210118'
]
default_options = {
'static': False,
'verbose': False,
'tests': False,
'integration_tests': False,
'benchmark': False,
'packaging': False,
'coverage': False,
'lint': False,
'docs': False,
'snapshot': False,
'time_trace': False,
'xrpl/*:tests': False,
'xrpl/*:rocksdb': False,
'cassandra-cpp-driver/*:shared': False,
@@ -45,8 +68,10 @@ class ClioConan(ConanFile):
)
def requirements(self):
if self.options.tests or self.options.integration_tests:
self.requires('gtest/1.14.0')
self.requires('benchmark/1.9.4')
if self.options.benchmark:
self.requires('benchmark/1.8.3')
def configure(self):
if self.settings.compiler == 'apple-clang':
@@ -62,6 +87,8 @@ class ClioConan(ConanFile):
def generate(self):
tc = CMakeToolchain(self)
for option_name, option_value in self.options.items():
tc.variables[option_name] = option_value
tc.generate()
def build(self):

View File

@@ -20,60 +20,43 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"]
USER root
WORKDIR /root
# Install common tools and dependencies
ARG LLVM_TOOLS_VERSION=20
# Add repositories
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
curl \
dpkg-dev \
file \
git \
git-lfs \
gnupg \
graphviz \
jq \
# libgmp, libmpfr and libncurses are gdb dependencies
libgmp-dev \
libmpfr-dev \
libncurses-dev \
make \
ninja-build \
wget \
zip \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Python tools
ARG PYTHON_VERSION=3.13
RUN add-apt-repository ppa:deadsnakes/ppa \
&& apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
python${PYTHON_VERSION} \
python${PYTHON_VERSION}-venv \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* \
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}
# Create a virtual environment for python tools
RUN python${PYTHON_VERSION} -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install -q --no-cache-dir \
cmake \
conan==2.20.1 \
gcovr \
pre-commit
# Install LLVM tools
ARG LLVM_TOOLS_VERSION=20
RUN echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${LLVM_TOOLS_VERSION} main" >> /etc/apt/sources.list \
&& echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${LLVM_TOOLS_VERSION} main" >> /etc/apt/sources.list \
&& wget --progress=dot:giga -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
# Install packages
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
clang-tidy-${LLVM_TOOLS_VERSION} \
clang-tools-${LLVM_TOOLS_VERSION} \
git \
git-lfs \
graphviz \
jq \
make \
ninja-build \
python3 \
python3-pip \
zip \
&& pip3 install -q --upgrade --no-cache-dir pip \
&& pip3 install -q --no-cache-dir \
# TODO: Remove this once we switch to newer Ubuntu base image
# lxml 6.0.0 is not compatible with our image
'lxml<6.0.0' \
\
cmake==3.31.6 \
conan==2.17.0 \
gcovr \
pre-commit \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
@@ -109,13 +92,12 @@ COPY --from=clio-tools \
/usr/local/bin/ClangBuildAnalyzer \
/usr/local/bin/git-cliff \
/usr/local/bin/gh \
/usr/local/bin/gdb \
/usr/local/bin/
WORKDIR /root
# Setup conan
RUN conan remote add --index 0 xrplf https://conan.ripplex.io
RUN conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
WORKDIR /root/.conan2
COPY conan/global.conf ./global.conf

View File

@@ -8,14 +8,12 @@ The image is based on Ubuntu 20.04 and contains:
- ccache 4.11.3
- Clang 19
- ClangBuildAnalyzer 1.6.0
- Conan 2.20.1
- Conan 2.17.0
- Doxygen 1.12
- GCC 15.2.0
- GDB 16.3
- GCC 14.3.0
- gh 2.74
- git-cliff 2.9.1
- mold 2.40.1
- Python 3.13
- and some other useful tools
Conan is set up to build Clio without any additional steps.

View File

@@ -4,8 +4,8 @@ build_type=Release
compiler=gcc
compiler.cppstd=20
compiler.libcxx=libstdc++11
compiler.version=15
compiler.version=14
os=Linux
[conf]
tools.build:compiler_executables={"c": "/usr/bin/gcc-15", "cpp": "/usr/bin/g++-15"}
tools.build:compiler_executables={"c": "/usr/bin/gcc-14", "cpp": "/usr/bin/g++-14"}

View File

@@ -1,20 +1,18 @@
FROM ubuntu:22.04
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
libatomic1 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN groupadd -g 10001 clio \
&& useradd -u 10000 -g 10001 -s /bin/bash clio
COPY ./clio_server /opt/clio/bin/clio_server
RUN ln -s /opt/clio/bin/clio_server /usr/local/bin/clio_server \
&& mkdir -p /opt/clio/etc/ \
&& mkdir -p /opt/clio/log/ \
&& chown clio:clio /opt/clio/log
&& groupadd -g 10001 clio \
&& useradd -u 10000 -g 10001 -s /bin/bash clio \
&& chown clio:clio /opt/clio/log \
&& apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
libatomic1 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
USER clio
ENTRYPOINT ["/opt/clio/bin/clio_server"]

View File

@@ -13,8 +13,8 @@ Clio repository provides an [example](https://github.com/XRPLF/clio/blob/develop
Config file recommendations:
- Set `log.enable_console` to `false` if you want to avoid logs being written to `stdout`.
- Set `log.directory` to `/opt/clio/log` to store logs in a volume.
- Set `log_to_console` to `false` if you want to avoid logs being written to `stdout`.
- Set `log_directory` to `/opt/clio/log` to store logs in a volume.
## Usage

View File

@@ -18,7 +18,7 @@ RUN apt-get update \
ARG CLANG_MAJOR_VERSION=invalid
# Bump this version to force rebuild of the image
ARG BUILD_VERSION=1
ARG BUILD_VERSION=0
RUN wget --progress=dot:giga https://apt.llvm.org/llvm.sh \
&& chmod +x llvm.sh \

View File

@@ -8,7 +8,7 @@ ARG UBUNTU_VERSION
ARG GCC_MAJOR_VERSION
ARG BUILD_VERSION=1
ARG BUILD_VERSION=0
ARG DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH

View File

@@ -1,4 +1,4 @@
Package: gcc-15-ubuntu-UBUNTUVERSION
Package: gcc-14-ubuntu-UBUNTUVERSION
Version: VERSION
Architecture: TARGETARCH
Maintainer: Alex Kremer <akremer@ripple.com>

View File

@@ -1,6 +1,6 @@
services:
clio_develop:
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
image: ghcr.io/xrplf/clio-ci:latest
volumes:
- clio_develop_conan_data:/root/.conan2/p
- clio_develop_ccache:/root/.ccache

View File

@@ -8,17 +8,19 @@ ARG TARGETARCH
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ARG BUILD_VERSION=2
ARG BUILD_VERSION=1
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
bison \
flex \
ninja-build \
python3 \
python3-pip \
software-properties-common \
wget \
&& pip3 install -q --no-cache-dir \
cmake \
cmake==3.31.6 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
@@ -44,13 +46,6 @@ RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download
&& ninja install \
&& rm -rf /tmp/* /var/tmp/*
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
bison \
flex \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG DOXYGEN_VERSION=1.12.0
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
@@ -83,22 +78,4 @@ RUN wget --progress=dot:giga "https://github.com/cli/cli/releases/download/v${GH
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/local/bin/gh \
&& rm -rf /tmp/* /var/tmp/*
RUN apt-get update \
&& apt-get install -y --no-install-recommends --no-install-suggests \
libgmp-dev \
libmpfr-dev \
libncurses-dev \
make \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG GDB_VERSION=16.3
RUN wget --progress=dot:giga "https://sourceware.org/pub/gdb/releases/gdb-${GDB_VERSION}.tar.gz" \
&& tar xf "gdb-${GDB_VERSION}.tar.gz" \
&& cd "gdb-${GDB_VERSION}" \
&& ./configure --prefix=/usr/local \
&& make -j "$(nproc)" \
&& make install-gdb \
&& rm -rf /tmp/* /var/tmp/*
WORKDIR /root

View File

@@ -3,8 +3,6 @@ PROJECT_LOGO = ${SOURCE}/docs/img/xrpl-logo.svg
PROJECT_NUMBER = ${DOC_CLIO_VERSION}
PROJECT_BRIEF = The XRP Ledger API server.
DOT_GRAPH_MAX_NODES = 100
EXTRACT_ALL = NO
EXTRACT_PRIVATE = NO
EXTRACT_PACKAGE = YES
@@ -34,18 +32,12 @@ SORT_MEMBERS_CTORS_1ST = YES
GENERATE_TREEVIEW = YES
DISABLE_INDEX = NO
FULL_SIDEBAR = NO
HTML_HEADER = ${SOURCE}/docs/doxygen-awesome-theme/doxygen-custom/header.html
HTML_HEADER = ${SOURCE}/docs/doxygen-awesome-theme/header.html
HTML_EXTRA_STYLESHEET = ${SOURCE}/docs/doxygen-awesome-theme/doxygen-awesome.css \
${SOURCE}/docs/doxygen-awesome-theme/doxygen-awesome-sidebar-only.css \
${SOURCE}/docs/doxygen-awesome-theme/doxygen-awesome-sidebar-only-darkmode-toggle.css \
${SOURCE}/docs/doxygen-awesome-theme/doxygen-custom/custom.css \
${SOURCE}/docs/doxygen-awesome-theme/doxygen-custom/theme-robot.css \
${SOURCE}/docs/doxygen-awesome-theme/doxygen-custom/theme-round.css \
${SOURCE}/docs/github-corner-disable.css
${SOURCE}/docs/doxygen-awesome-theme/doxygen-awesome-sidebar-only-darkmode-toggle.css
HTML_EXTRA_FILES = ${SOURCE}/docs/doxygen-awesome-theme/doxygen-awesome-darkmode-toggle.js \
${SOURCE}/docs/doxygen-awesome-theme/doxygen-awesome-fragment-copy-button.js \
${SOURCE}/docs/doxygen-awesome-theme/doxygen-awesome-interactive-toc.js \
${SOURCE}/docs/doxygen-awesome-theme/doxygen-custom/toggle-alternative-theme.js
${SOURCE}/docs/doxygen-awesome-theme/doxygen-awesome-interactive-toc.js
HTML_COLORSTYLE = LIGHT
HTML_COLORSTYLE_HUE = 209

View File

@@ -6,22 +6,16 @@
## Minimum Requirements
- [Python 3.7](https://www.python.org/downloads/)
- [Conan 2.20.1](https://conan.io/downloads.html)
- [CMake 3.20](https://cmake.org/download/)
- [Conan 2.17.0](https://conan.io/downloads.html)
- [CMake 3.20, <4.0](https://cmake.org/download/)
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html): needed for code coverage generation
- [**Optional**] [CCache](https://ccache.dev/): speeds up compilation if you are going to compile Clio often
We use our Docker image `ghcr.io/XRPLF/clio-ci` to build `Clio`, see [Building Clio with Docker](#building-clio-with-docker).
You can find information about exact compiler versions and tools in the [image's README](https://github.com/XRPLF/clio/blob/develop/docker/ci/README.md).
The following compiler version are guaranteed to work.
Any compiler with lower version may not be able to build Clio:
| Compiler | Version |
| ----------- | ------- |
| GCC | 15.2 |
| Clang | 19 |
| Apple Clang | 17 |
| GCC | 12.3 |
| Clang | 16 |
| Apple Clang | 15 |
### Conan Configuration
@@ -90,7 +84,7 @@ core.upload:parallel={{os.cpu_count()}}
Make sure artifactory is setup with Conan.
```sh
conan remote add --index 0 xrplf https://conan.ripplex.io
conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
```
Now you should be able to download the prebuilt dependencies (including `xrpl` package) on supported platforms.
@@ -104,100 +98,79 @@ It is implicitly used when running `conan` commands, you don't need to specify i
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
> [!NOTE]
> Conan uses local cache by default when creating a lockfile.
>
> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile.
To create a new lockfile, run the following commands in the repository root:
To do that, run the following command in the repository root:
```bash
conan remove '*' --confirm
rm conan.lock
# This ensure that xrplf remote is the first to be consulted
conan remote add --force --index 0 xrplf https://conan.ripplex.io
conan lock create .
conan lock create . -o '&:tests=True' -o '&:benchmark=True'
```
> [!NOTE]
> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all <PROFILE>`.
## Building Clio
1. Navigate to Clio's root directory and run:
Navigate to Clio's root directory and run:
```sh
mkdir build && cd build
```
```sh
mkdir build && cd build
# You can also specify profile explicitly by adding `--profile:all <PROFILE_NAME>`
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True'
# You can also add -GNinja to use Ninja build system instead of Make
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
```
2. Install dependencies through conan.
> [!TIP]
> You can omit the `-o '&:tests=True'` if you don't want to build `clio_tests`.
```sh
conan install .. --output-folder . --build missing --settings build_type=Release
```
If successful, `conan install` will find the required packages and `cmake` will do the rest. You should see `clio_server` and `clio_tests` in the `build` directory (the current directory).
> You can add `--profile:all <PROFILE_NAME>` to choose a specific conan profile.
> [!TIP]
> To generate a Code Coverage report, include `-o '&:coverage=True'` in the `conan install` command above, along with `-o '&:tests=True'` to enable tests.
> After running the `cmake` commands, execute `make clio_tests-ccov`.
> The coverage report will be found at `clio_tests-llvm-cov/index.html`.
3. Configure and generate build files with CMake.
```sh
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
```
> You can add `-GNinja` to use the Ninja build system (instead of Make).
4. Now, you can build all targets or specific ones:
```sh
# builds all targets
cmake --build . --parallel 8
# builds only clio_server target
cmake --build . --parallel 8 --target clio_server
```
You should see `clio_server` and `clio_tests` in the current directory.
<!-- markdownlint-disable-line MD028 -->
> [!NOTE]
> If you've built Clio before and the build is now failing, it's likely due to updated dependencies. Try deleting the build folder and then rerunning the Conan and CMake commands mentioned above.
### CMake options
There are several CMake options you can use to customize the build:
| CMake Option | Default | CMake Target | Description |
| --------------------- | ------- | -------------------------------------------------------- | ------------------------------------- |
| `-Dcoverage` | OFF | `clio_tests-ccov` | Enables code coverage generation |
| `-Dtests` | OFF | `clio_tests` | Enables unit tests |
| `-Dintegration_tests` | OFF | `clio_integration_tests` | Enables integration tests |
| `-Dbenchmark` | OFF | `clio_benchmark` | Enables benchmark executable |
| `-Ddocs` | OFF | `docs` | Enables API documentation generation |
| `-Dlint` | OFF | See [#clang-tidy](#using-clang-tidy-for-static-analysis) | Enables `clang-tidy` static analysis |
| `-Dsan` | N/A | N/A | Enables Sanitizer (asan, tsan, ubsan) |
| `-Dpackage` | OFF | N/A | Creates a debian package |
### Generating API docs for Clio
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.12.0 on your system.
To generate the API docs, please use CMake option `-Ddocs=ON` as described above and build the `docs` target.
To generate the API docs:
To view the generated files, go to `build/docs/html`.
Open the `index.html` file in your browser to see the documentation pages.
1. First, include `-o '&:docs=True'` in the conan install command. For example:
![API index page](./img/doxygen-docs-output.png "API index page")
```sh
mkdir build && cd build
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:docs=True'
```
2. Once that has completed successfully, run the `cmake` command and add the `--target docs` option:
```sh
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --parallel 8 --target docs
```
3. Go to `build/docs/html` to view the generated files.
Open the `index.html` file in your browser to see the documentation pages.
![API index page](./img/doxygen-docs-output.png "API index page")
## Building Clio with Docker
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
```sh
docker run -it ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
docker run -it ghcr.io/xrplf/clio-ci:latest
git clone https://github.com/XRPLF/clio
cd clio
mkdir build && cd build
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True'
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
```
Follow the same steps in the [Building Clio](#building-clio) section. You can use `--profile:all gcc` or `--profile:all clang` with the `conan install` command to choose the desired compiler.
## Developing against `rippled` in standalone mode
If you wish to develop against a `rippled` instance running in standalone mode there are a few quirks of both Clio and `rippled` that you need to keep in mind. You must:
@@ -250,10 +223,10 @@ Sometimes, during development, you need to build against a custom version of `li
## Using `clang-tidy` for static analysis
Clang-tidy can be run by CMake when building the project.
To achieve this, you just need to provide the option `-Dlint=ON` when generating CMake files:
To achieve this, you just need to provide the option `-o '&:lint=True'` for the `conan install` command:
```sh
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dlint=ON ..
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:lint=True' --profile:all clang
```
By default CMake will try to find `clang-tidy` automatically in your system.

View File

@@ -3,9 +3,7 @@
This document provides a list of all available Clio configuration properties in detail.
> [!NOTE]
> Dot notation in configuration key names represents nested fields.
> For example, **database.scylladb** refers to the _scylladb_ field inside the _database_ object.
> If a key name includes "[]", it indicates that the nested field is an array (e.g., etl_sources.[]).
> Dot notation in configuration key names represents nested fields. For example, **database.scylladb** refers to the _scylladb_ field inside the _database_ object. If a key name includes "[]", it indicates that the nested field is an array (e.g., etl_sources.[]).
## Configuration Details
@@ -89,14 +87,6 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: Represents the number of threads that will be used for database operations.
### database.cassandra.provider
- **Required**: True
- **Type**: string
- **Default value**: `cassandra`
- **Constraints**: The value must be one of the following: `cassandra`, `aws_keyspace`.
- **Description**: The specific database backend provider we are using.
### database.cassandra.core_connections_per_host
- **Required**: True
@@ -165,7 +155,7 @@ This document provides a list of all available Clio configuration properties in
- **Required**: True
- **Type**: boolean
- **Default value**: `False`
- **Default value**: `True`
- **Constraints**: None
- **Description**: If set to `True`, allows Clio to start without any ETL source.
@@ -337,27 +327,11 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: Maximum queue size for sending subscription data to clients. This queue buffers data when a client is slow to receive it, ensuring delivery once the client is ready.
### server.proxy.ips.[]
- **Required**: True
- **Type**: string
- **Default value**: None
- **Constraints**: None
- **Description**: List of proxy ip addresses. When Clio receives a request from proxy it will use `Forwarded` value (if any) as client ip. When this option is used together with `server.proxy.tokens` Clio will identify proxy by ip or by token.
### server.proxy.tokens.[]
- **Required**: True
- **Type**: string
- **Default value**: None
- **Constraints**: None
- **Description**: List of tokens in identifying request as a request from proxy. Token should be provided in `X-Proxy-Token` header, e.g. `X-Proxy-Token: <very_secret_token>'. When Clio receives a request from proxy it will use 'Forwarded` value (if any) to get client ip. When this option is used together with 'server.proxy.ips' Clio will identify proxy by ip or by token.
### prometheus.enabled
- **Required**: True
- **Type**: boolean
- **Default value**: `True`
- **Default value**: `False`
- **Constraints**: None
- **Description**: Enables or disables Prometheus metrics.
@@ -365,7 +339,7 @@ This document provides a list of all available Clio configuration properties in
- **Required**: True
- **Type**: boolean
- **Default value**: `True`
- **Default value**: `False`
- **Constraints**: None
- **Description**: Enables or disables compression of Prometheus responses.
@@ -441,7 +415,7 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The value must be one of the following: `sync`, `async`, `none`.
- **Description**: The strategy used for Cache loading.
### log.channels.[].channel
### log_channels.[].channel
- **Required**: False
- **Type**: string
@@ -449,63 +423,39 @@ This document provides a list of all available Clio configuration properties in
- **Constraints**: The value must be one of the following: `General`, `WebServer`, `Backend`, `RPC`, `ETL`, `Subscriptions`, `Performance`, `Migration`.
- **Description**: The name of the log channel.
### log.channels.[].level
### log_channels.[].log_level
- **Required**: False
- **Type**: string
- **Default value**: None
- **Constraints**: The value must be one of the following: `trace`, `debug`, `info`, `warning`, `error`, `fatal`.
- **Constraints**: The value must be one of the following: `trace`, `debug`, `info`, `warning`, `error`, `fatal`, `count`.
- **Description**: The log level for the specific log channel.
### log.level
### log_level
- **Required**: True
- **Type**: string
- **Default value**: `info`
- **Constraints**: The value must be one of the following: `trace`, `debug`, `info`, `warning`, `error`, `fatal`.
- **Constraints**: The value must be one of the following: `trace`, `debug`, `info`, `warning`, `error`, `fatal`, `count`.
- **Description**: The general logging level of Clio. This level is applied to all log channels that do not have an explicitly defined logging level.
### log.format
### log_format
- **Required**: True
- **Type**: string
- **Default value**: `%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v`
- **Default value**: `%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%`
- **Constraints**: None
- **Description**: The format string for log messages using spdlog format patterns.
- **Description**: The format string for log messages. The format is described here: <https://www.boost.org/doc/libs/1_83_0/libs/log/doc/html/log/tutorial/formatters.html>.
Each of the variables expands like so:
- `%Y-%m-%d %H:%M:%S.%f`: The full date and time of the log entry with microsecond precision
- `%^`: Start color range
- `%3!l`: The severity (aka log level) the entry was sent at stripped to 3 characters
- `%n`: The logger name (channel) that this log entry was sent to
- `%$`: End color range
- `%v`: The actual log message
Some additional variables that might be useful:
- `%@`: A partial path to the C++ file and the line number in the said file (`src/file/path:linenumber`)
- `%t`: The ID of the thread the log entry is written from
Documentation can be found at: <https://github.com/gabime/spdlog/wiki/Custom-formatting>.
### log.is_async
### log_to_console
- **Required**: True
- **Type**: boolean
- **Default value**: `True`
- **Constraints**: None
- **Description**: Whether spdlog is asynchronous or not.
### log.enable_console
- **Required**: True
- **Type**: boolean
- **Default value**: `False`
- **Constraints**: None
- **Description**: Enables or disables logging to the console.
### log.directory
### log_directory
- **Required**: False
- **Type**: string
@@ -513,7 +463,7 @@ Documentation can be found at: <https://github.com/gabime/spdlog/wiki/Custom-for
- **Constraints**: None
- **Description**: The directory path for the log files.
### log.rotation_size
### log_rotation_size
- **Required**: True
- **Type**: int
@@ -521,15 +471,23 @@ Documentation can be found at: <https://github.com/gabime/spdlog/wiki/Custom-for
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: The log rotation size in megabytes. When the log file reaches this particular size, a new log file starts.
### log.directory_max_files
### log_directory_max_size
- **Required**: True
- **Type**: int
- **Default value**: `25`
- **Default value**: `51200`
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: The maximum number of log files in the directory.
- **Description**: The maximum size of the log directory in megabytes.
### log.tag_style
### log_rotation_hour_interval
- **Required**: True
- **Type**: int
- **Default value**: `12`
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
- **Description**: Represents the interval (in hours) for log rotation. If the current log file reaches this value in logging, a new log file starts.
### log_tag_style
- **Required**: True
- **Type**: string
@@ -549,7 +507,7 @@ Documentation can be found at: <https://github.com/gabime/spdlog/wiki/Custom-for
- **Required**: True
- **Type**: boolean
- **Default value**: `False`
- **Default value**: `True`
- **Constraints**: None
- **Description**: Indicates if the server is allowed to write data to the database.

View File

@@ -88,15 +88,13 @@ Exactly equal password gains admin rights for the request or a websocket connect
Clio can cache requests to ETL sources to reduce the load on the ETL source.
Only following commands are cached: `server_info`, `server_state`, `server_definitions`, `fee`, `ledger_closed`.
By default the forwarding cache is off.
To enable the caching for a source, `forwarding.cache_timeout` value should be added to the configuration file, e.g.:
To enable the caching for a source, `forwarding_cache_timeout` value should be added to the configuration file, e.g.:
```json
"forwarding": {
"cache_timeout": 0.250,
}
"forwarding_cache_timeout": 0.250,
```
`forwarding.cache_timeout` defines for how long (in seconds) a cache entry will be valid after being placed into the cache.
`forwarding_cache_timeout` defines for how long (in seconds) a cache entry will be valid after being placed into the cache.
Zero value turns off the cache feature.
## Graceful shutdown (not fully implemented yet)

View File

@@ -1,10 +1,29 @@
// SPDX-License-Identifier: MIT
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2021 - 2025 jothepro
MIT License
Copyright (c) 2021 - 2023 jothepro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/

View File

@@ -1,66 +0,0 @@
// SPDX-License-Identifier: MIT
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2022 - 2025 jothepro
*/
class DoxygenAwesomeFragmentCopyButton extends HTMLElement {
constructor() {
super();
this.onclick=this.copyContent
}
static title = "Copy to clipboard"
static copyIcon = `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M23.04,10.322c0,-2.582 -2.096,-4.678 -4.678,-4.678l-6.918,-0c-2.582,-0 -4.678,2.096 -4.678,4.678c0,-0 0,8.04 0,8.04c0,2.582 2.096,4.678 4.678,4.678c0,-0 6.918,-0 6.918,-0c2.582,-0 4.678,-2.096 4.678,-4.678c0,-0 0,-8.04 0,-8.04Zm-2.438,-0l-0,8.04c-0,1.236 -1.004,2.24 -2.24,2.24l-6.918,-0c-1.236,-0 -2.239,-1.004 -2.239,-2.24l-0,-8.04c-0,-1.236 1.003,-2.24 2.239,-2.24c0,0 6.918,0 6.918,0c1.236,0 2.24,1.004 2.24,2.24Z"/><path d="M5.327,16.748c-0,0.358 -0.291,0.648 -0.649,0.648c0,0 0,0 0,0c-2.582,0 -4.678,-2.096 -4.678,-4.678c0,0 0,-8.04 0,-8.04c0,-2.582 2.096,-4.678 4.678,-4.678l6.918,0c2.168,0 3.994,1.478 4.523,3.481c0.038,0.149 0.005,0.306 -0.09,0.428c-0.094,0.121 -0.239,0.191 -0.392,0.191c-0.451,0.005 -1.057,0.005 -1.457,0.005c-0.238,0 -0.455,-0.14 -0.553,-0.357c-0.348,-0.773 -1.128,-1.31 -2.031,-1.31c-0,0 -6.918,0 -6.918,0c-1.236,0 -2.24,1.004 -2.24,2.24l0,8.04c0,1.236 1.004,2.24 2.24,2.24l0,-0c0.358,-0 0.649,0.29 0.649,0.648c-0,0.353 -0,0.789 -0,1.142Z" style="fill-opacity:0.6;"/></svg>`
static successIcon = `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24"><path d="M8.084,16.111c-0.09,0.09 -0.212,0.141 -0.34,0.141c-0.127,-0 -0.249,-0.051 -0.339,-0.141c-0.746,-0.746 -2.538,-2.538 -3.525,-3.525c-0.375,-0.375 -0.983,-0.375 -1.357,0c-0.178,0.178 -0.369,0.369 -0.547,0.547c-0.375,0.375 -0.375,0.982 -0,1.357c1.135,1.135 3.422,3.422 4.75,4.751c0.27,0.27 0.637,0.421 1.018,0.421c0.382,0 0.749,-0.151 1.019,-0.421c2.731,-2.732 10.166,-10.167 12.454,-12.455c0.375,-0.375 0.375,-0.982 -0,-1.357c-0.178,-0.178 -0.369,-0.369 -0.547,-0.547c-0.375,-0.375 -0.982,-0.375 -1.357,0c-2.273,2.273 -9.567,9.567 -11.229,11.229Z"/></svg>`
static successDuration = 980
static init() {
$(function() {
$(document).ready(function() {
if(navigator.clipboard) {
const fragments = document.getElementsByClassName("fragment")
for(const fragment of fragments) {
const fragmentWrapper = document.createElement("div")
fragmentWrapper.className = "doxygen-awesome-fragment-wrapper"
const fragmentCopyButton = document.createElement("doxygen-awesome-fragment-copy-button")
fragmentCopyButton.innerHTML = DoxygenAwesomeFragmentCopyButton.copyIcon
fragmentCopyButton.title = DoxygenAwesomeFragmentCopyButton.title
fragment.parentNode.replaceChild(fragmentWrapper, fragment)
fragmentWrapper.appendChild(fragment)
fragmentWrapper.appendChild(fragmentCopyButton)
}
}
})
})
}
copyContent() {
const content = this.previousSibling.cloneNode(true)
// filter out line number from file listings
content.querySelectorAll(".lineno, .ttc").forEach((node) => {
node.remove()
})
let textContent = content.textContent
// remove trailing newlines that appear in file listings
let numberOfTrailingNewlines = 0
while(textContent.charAt(textContent.length - (numberOfTrailingNewlines + 1)) == '\n') {
numberOfTrailingNewlines++;
}
textContent = textContent.substring(0, textContent.length - numberOfTrailingNewlines)
navigator.clipboard.writeText(textContent);
this.classList.add("success")
this.innerHTML = DoxygenAwesomeFragmentCopyButton.successIcon
window.setTimeout(() => {
this.classList.remove("success")
this.innerHTML = DoxygenAwesomeFragmentCopyButton.copyIcon
}, DoxygenAwesomeFragmentCopyButton.successDuration);
}
}
customElements.define("doxygen-awesome-fragment-copy-button", DoxygenAwesomeFragmentCopyButton)

View File

@@ -1,10 +1,29 @@
// SPDX-License-Identifier: MIT
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2022 - 2025 jothepro
MIT License
Copyright (c) 2022 - 2023 jothepro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
@@ -36,7 +55,9 @@ class DoxygenAwesomeInteractiveToc {
headerNode: document.getElementById(id)
})
document.getElementById("doc-content")?.addEventListener("scroll",this.throttle(DoxygenAwesomeInteractiveToc.update, 100))
document.getElementById("doc-content")?.addEventListener("scroll", () => {
DoxygenAwesomeInteractiveToc.update()
})
})
DoxygenAwesomeInteractiveToc.update()
}
@@ -57,16 +78,4 @@ class DoxygenAwesomeInteractiveToc {
active?.classList.add("active")
active?.classList.remove("aboveActive")
}
static throttle(func, delay) {
let lastCall = 0;
return function (...args) {
const now = new Date().getTime();
if (now - lastCall < delay) {
return;
}
lastCall = now;
return setTimeout(() => {func(...args)}, delay);
};
}
}

View File

@@ -1,10 +1,30 @@
/* SPDX-License-Identifier: MIT */
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2021 - 2025 jothepro
MIT License
Copyright (c) 2021 - 2023 jothepro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/

View File

@@ -1,10 +1,29 @@
/* SPDX-License-Identifier: MIT */
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2021 - 2025 jothepro
MIT License
Copyright (c) 2021 - 2023 jothepro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
@@ -41,6 +60,10 @@ html {
height: calc(100vh - var(--top-height)) !important;
}
#nav-tree {
padding: 0;
}
#top {
display: block;
border-bottom: none;
@@ -50,24 +73,22 @@ html {
overflow: hidden;
background: var(--side-nav-background);
}
#main-nav {
float: left;
padding-right: 0;
}
.ui-resizable-handle {
display: none;
}
.ui-resizable-e {
width: 0;
cursor: default;
width: 1px !important;
background: var(--separator-color);
box-shadow: 0 calc(-2 * var(--top-height)) 0 0 var(--separator-color);
}
#nav-path {
position: fixed;
right: 0;
left: calc(var(--side-nav-fixed-width) + 1px);
left: var(--side-nav-fixed-width);
bottom: 0;
width: auto;
}
@@ -92,14 +113,4 @@ html {
left: var(--spacing-medium) !important;
right: auto;
}
#nav-sync {
bottom: 4px;
right: auto;
left: 300px;
width: 35px;
top: auto !important;
user-select: none;
position: fixed
}
}

View File

@@ -1,10 +1,29 @@
/* SPDX-License-Identifier: MIT */
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2021 - 2025 jothepro
MIT License
Copyright (c) 2021 - 2023 jothepro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
@@ -13,9 +32,6 @@ html {
--primary-color: #1779c4;
--primary-dark-color: #335c80;
--primary-light-color: #70b1e9;
--on-primary-color: #ffffff;
--link-color: var(--primary-color);
/* page base colors */
--page-background-color: #ffffff;
@@ -26,15 +42,14 @@ html {
--separator-color: #dedede;
/* border radius for all rounded components. Will affect many components, like dropdowns, memitems, codeblocks, ... */
--border-radius-large: 10px;
--border-radius-small: 5px;
--border-radius-medium: 8px;
--border-radius-large: 8px;
--border-radius-small: 4px;
--border-radius-medium: 6px;
/* default spacings. Most components reference these values for spacing, to provide uniform spacing on the page. */
--spacing-small: 5px;
--spacing-medium: 10px;
--spacing-large: 16px;
--spacing-xlarge: 20px;
/* default box shadow used for raising an element above the normal content. Used in dropdowns, search result, ... */
--box-shadow: 0 2px 8px 0 rgba(0,0,0,.075);
@@ -98,7 +113,7 @@ html {
*/
--menu-display: block;
--menu-focus-foreground: var(--on-primary-color);
--menu-focus-foreground: var(--page-background-color);
--menu-focus-background: var(--primary-color);
--menu-selected-background: rgba(0,0,0,.05);
@@ -295,11 +310,10 @@ body {
font-size: var(--page-font-size);
}
body, table, div, p, dl, #nav-tree .label, #nav-tree a, .title,
body, table, div, p, dl, #nav-tree .label, .title,
.sm-dox a, .sm-dox a:hover, .sm-dox a:focus, #projectname,
.SelectItem, #MSearchField, .navpath li.navelem a,
.navpath li.navelem a:hover, p.reference, p.definition, div.toc li, div.toc h3,
#page-nav ul.page-outline li a {
.navpath li.navelem a:hover, p.reference, p.definition, div.toc li, div.toc h3 {
font-family: var(--font-family);
}
@@ -318,13 +332,8 @@ p.reference, p.definition {
}
a:link, a:visited, a:hover, a:focus, a:active {
color: var(--link-color) !important;
color: var(--primary-color) !important;
font-weight: 500;
background: none;
}
a:hover {
text-decoration: underline;
}
a.anchor {
@@ -339,8 +348,6 @@ a.anchor {
#top {
background: var(--header-background);
border-bottom: 1px solid var(--separator-color);
position: relative;
z-index: 99;
}
@media screen and (min-width: 768px) {
@@ -355,7 +362,6 @@ a.anchor {
#main-nav {
flex-grow: 5;
padding: var(--spacing-small) var(--spacing-medium);
border-bottom: 0;
}
#titlearea {
@@ -435,36 +441,19 @@ a.anchor {
}
.sm-dox a span.sub-arrow {
top: 15px;
right: 10px;
box-sizing: content-box;
padding: 0;
margin: 0;
display: inline-block;
width: 5px;
height: 5px;
transform: rotate(45deg);
border-width: 0;
border-right: 2px solid var(--header-foreground);
border-bottom: 2px solid var(--header-foreground);
background: none;
border-color: var(--header-foreground) transparent transparent transparent;
}
.sm-dox a:hover span.sub-arrow {
border-color: var(--menu-focus-foreground);
background: none;
border-color: var(--menu-focus-foreground) transparent transparent transparent;
}
.sm-dox ul a span.sub-arrow {
transform: rotate(-45deg);
border-width: 0;
border-right: 2px solid var(--header-foreground);
border-bottom: 2px solid var(--header-foreground);
border-color: transparent transparent transparent var(--page-foreground-color);
}
.sm-dox ul a:hover span.sub-arrow {
border-color: var(--menu-focus-foreground);
background: none;
border-color: transparent transparent transparent var(--menu-focus-foreground);
}
}
@@ -491,7 +480,7 @@ a.anchor {
.sm-dox ul a {
color: var(--page-foreground-color) !important;
background: none;
background: var(--page-background-color);
font-size: var(--navigation-font-size);
}
@@ -563,13 +552,6 @@ a.anchor {
box-shadow: none;
display: block;
margin-top: 0;
margin-right: 0;
}
@media (min-width: 768px) {
.sm-dox li {
padding: 0;
}
}
/* until Doxygen 1.9.4 */
@@ -591,17 +573,6 @@ a.anchor {
padding-left: 0
}
/* Doxygen 1.14.0 */
.search-icon::before {
background: none;
top: 5px;
}
.search-icon::after {
background: none;
top: 12px;
}
.SelectionMark {
user-select: none;
}
@@ -805,15 +776,12 @@ html.dark-mode iframe#MSearchResults {
*/
#side-nav {
padding: 0 !important;
background: var(--side-nav-background);
min-width: 8px;
max-width: 50vw;
}
#nav-tree, #top {
border-right: 1px solid var(--separator-color);
}
@media screen and (max-width: 767px) {
#side-nav {
display: none;
@@ -822,95 +790,34 @@ html.dark-mode iframe#MSearchResults {
#doc-content {
margin-left: 0 !important;
}
#top {
border-right: none;
}
}
#nav-tree {
background: var(--side-nav-background);
margin-right: -1px;
padding: 0;
background: transparent;
margin-right: 1px;
}
#nav-tree .label {
font-size: var(--navigation-font-size);
line-height: var(--tree-item-height);
}
#nav-tree span.label a:hover {
background: none;
}
#nav-tree .item {
height: var(--tree-item-height);
line-height: var(--tree-item-height);
overflow: hidden;
text-overflow: ellipsis;
margin: 0;
padding: 0;
}
#nav-tree-contents {
margin: 0;
}
#main-menu > li:last-child {
height: auto;
}
#nav-tree .item > a:focus {
outline: none;
}
#nav-sync {
bottom: var(--spacing-medium);
right: var(--spacing-medium) !important;
bottom: 12px;
right: 12px;
top: auto !important;
user-select: none;
}
div.nav-sync-icon {
border: 1px solid var(--separator-color);
border-radius: var(--border-radius-medium);
background: var(--page-background-color);
width: 30px;
height: 20px;
}
div.nav-sync-icon:hover {
background: var(--page-background-color);
}
span.sync-icon-left, div.nav-sync-icon:hover span.sync-icon-left {
border-left: 2px solid var(--primary-color);
border-top: 2px solid var(--primary-color);
top: 5px;
left: 6px;
}
span.sync-icon-right, div.nav-sync-icon:hover span.sync-icon-right {
border-right: 2px solid var(--primary-color);
border-bottom: 2px solid var(--primary-color);
top: 5px;
left: initial;
right: 6px;
}
div.nav-sync-icon.active::after, div.nav-sync-icon.active:hover::after {
border-top: 2px solid var(--primary-color);
top: 9px;
left: 6px;
width: 19px;
}
#nav-tree .selected {
text-shadow: none;
background-image: none;
background-color: transparent;
position: relative;
color: var(--primary-color) !important;
font-weight: 500;
}
#nav-tree .selected::after {
@@ -936,27 +843,9 @@ div.nav-sync-icon.active::after, div.nav-sync-icon.active:hover::after {
#nav-tree .arrow {
opacity: var(--side-nav-arrow-opacity);
background: none;
}
#nav-tree span.arrowhead {
margin: 0 0 1px 2px;
}
span.arrowhead {
border-color: var(--primary-light-color);
}
.selected span.arrowhead {
border-color: var(--primary-color);
}
#nav-tree ul li:first-child > div > a {
opacity: 0;
pointer-events: none;
}
.contents .arrow {
.arrow {
color: inherit;
cursor: pointer;
font-size: 45%;
@@ -964,7 +853,7 @@ span.arrowhead {
margin-right: 2px;
font-family: serif;
height: auto;
padding-bottom: 4px;
text-align: right;
}
#nav-tree div.item:hover .arrow, #nav-tree a:focus .arrow {
@@ -978,11 +867,9 @@ span.arrowhead {
}
.ui-resizable-e {
background: none;
}
.ui-resizable-e:hover {
background: var(--separator-color);
width: 4px;
background: transparent;
box-shadow: inset -1px 0 0 0 var(--separator-color);
}
/*
@@ -991,7 +878,7 @@ span.arrowhead {
div.header {
border-bottom: 1px solid var(--separator-color);
background: none;
background-color: var(--page-background-color);
background-image: none;
}
@@ -1030,7 +917,7 @@ div.headertitle {
div.header .title {
font-weight: 600;
font-size: 225%;
padding: var(--spacing-medium) var(--spacing-xlarge);
padding: var(--spacing-medium) var(--spacing-large);
word-break: break-word;
}
@@ -1047,10 +934,9 @@ td.memSeparator {
span.mlabel {
background: var(--primary-color);
color: var(--on-primary-color);
border: none;
padding: 4px 9px;
border-radius: var(--border-radius-large);
border-radius: 12px;
margin-right: var(--spacing-medium);
}
@@ -1059,7 +945,7 @@ span.mlabel:last-of-type {
}
div.contents {
padding: 0 var(--spacing-xlarge);
padding: 0 var(--spacing-large);
}
div.contents p, div.contents li {
@@ -1070,16 +956,6 @@ div.contents div.dyncontent {
margin: var(--spacing-medium) 0;
}
@media screen and (max-width: 767px) {
div.contents {
padding: 0 var(--spacing-large);
}
div.header .title {
padding: var(--spacing-medium) var(--spacing-large);
}
}
@media (prefers-color-scheme: dark) {
html:not(.light-mode) div.contents div.dyncontent img,
html:not(.light-mode) div.contents center img,
@@ -1103,7 +979,7 @@ html.dark-mode div.contents .dotgraph iframe
filter: brightness(89%) hue-rotate(180deg) invert();
}
td h2.groupheader, h2.groupheader {
h2.groupheader {
border-bottom: 0px;
color: var(--page-foreground-color);
box-shadow:
@@ -1164,7 +1040,7 @@ blockquote::after {
blockquote p {
margin: var(--spacing-small) 0 var(--spacing-medium) 0;
}
.paramname, .paramname em {
.paramname {
font-weight: 600;
color: var(--primary-dark-color);
}
@@ -1214,7 +1090,7 @@ div.contents .toc {
border: 0;
border-left: 1px solid var(--separator-color);
border-radius: 0;
background-color: var(--page-background-color);
background-color: transparent;
box-shadow: none;
position: sticky;
top: var(--toc-sticky-top);
@@ -1322,115 +1198,24 @@ div.toc li a.aboveActive {
}
}
/*
Page Outline (Doxygen >= 1.14.0)
*/
#page-nav {
background: var(--page-background-color);
border-left: 1px solid var(--separator-color);
}
#page-nav #page-nav-resize-handle {
background: var(--separator-color);
}
#page-nav #page-nav-resize-handle::after {
border-left: 1px solid var(--primary-color);
border-right: 1px solid var(--primary-color);
}
#page-nav #page-nav-tree #page-nav-contents {
top: var(--spacing-large);
}
#page-nav ul.page-outline {
margin: 0;
padding: 0;
}
#page-nav ul.page-outline li a {
font-size: var(--toc-font-size) !important;
color: var(--page-secondary-foreground-color) !important;
display: inline-block;
line-height: calc(2 * var(--toc-font-size));
}
#page-nav ul.page-outline li a a.anchorlink {
display: none;
}
#page-nav ul.page-outline li.vis ~ * a {
color: var(--page-foreground-color) !important;
}
#page-nav ul.page-outline li.vis:not(.vis ~ .vis) a, #page-nav ul.page-outline li a:hover {
color: var(--primary-color) !important;
}
#page-nav ul.page-outline .vis {
background: var(--page-background-color);
position: relative;
}
#page-nav ul.page-outline .vis::after {
content: "";
position: absolute;
top: 0;
bottom: 0;
left: 0;
width: 4px;
background: var(--page-secondary-foreground-color);
}
#page-nav ul.page-outline .vis:not(.vis ~ .vis)::after {
top: 1px;
border-top-right-radius: var(--border-radius-small);
}
#page-nav ul.page-outline .vis:not(:has(~ .vis))::after {
bottom: 1px;
border-bottom-right-radius: var(--border-radius-small);
}
#page-nav ul.page-outline .arrow {
display: inline-block;
}
#page-nav ul.page-outline .arrow span {
display: none;
}
@media screen and (max-width: 767px) {
#container {
grid-template-columns: initial !important;
}
#page-nav {
display: none;
}
}
/*
Code & Fragments
*/
code, div.fragment, pre.fragment, span.tt {
code, div.fragment, pre.fragment {
border-radius: var(--border-radius-small);
border: 1px solid var(--separator-color);
overflow: hidden;
}
code, span.tt {
code {
display: inline;
background: var(--code-background);
color: var(--code-foreground);
padding: 2px 6px;
border-radius: var(--border-radius-small);
}
div.fragment, pre.fragment {
border-radius: var(--border-radius-medium);
margin: var(--spacing-medium) 0;
padding: calc(var(--spacing-large) - (var(--spacing-large) / 6)) var(--spacing-large);
background: var(--fragment-background);
@@ -1488,7 +1273,7 @@ div.fragment, pre.fragment {
}
}
code, code a, pre.fragment, div.fragment, div.fragment .line, div.fragment span, div.fragment .line a, div.fragment .line span, span.tt {
code, code a, pre.fragment, div.fragment, div.fragment .line, div.fragment span, div.fragment .line a, div.fragment .line span {
font-family: var(--font-family-monospace);
font-size: var(--code-font-size) !important;
}
@@ -1562,10 +1347,6 @@ div.line.glow {
dl warning, attention, note, deprecated, bug, ...
*/
dl {
line-height: calc(1.65 * var(--page-font-size));
}
dl.bug dt a, dl.deprecated dt a, dl.todo dt a {
font-weight: bold !important;
}
@@ -1731,7 +1512,6 @@ div.memitem {
border-top-right-radius: var(--border-radius-medium);
border-bottom-right-radius: var(--border-radius-medium);
border-bottom-left-radius: var(--border-radius-medium);
border-top-left-radius: 0;
overflow: hidden;
display: block !important;
}
@@ -1963,7 +1743,7 @@ table.fieldtable th {
color: var(--tablehead-foreground);
}
table.fieldtable td.fieldtype, .fieldtable td.fieldname, .fieldtable td.fieldinit, .fieldtable td.fielddoc, .fieldtable th {
table.fieldtable td.fieldtype, .fieldtable td.fieldname, .fieldtable td.fielddoc, .fieldtable th {
border-bottom: 1px solid var(--separator-color);
border-right: 1px solid var(--separator-color);
}
@@ -1998,10 +1778,8 @@ table.memberdecls tr[class^='memitem'] .memTemplParams {
white-space: normal;
}
table.memberdecls tr.heading + tr[class^='memitem'] td.memItemLeft,
table.memberdecls tr.heading + tr[class^='memitem'] td.memItemRight,
table.memberdecls td.memItemLeft,
table.memberdecls td.memItemRight,
table.memberdecls .memItemLeft,
table.memberdecls .memItemRight,
table.memberdecls .memTemplItemLeft,
table.memberdecls .memTemplItemRight,
table.memberdecls .memTemplParams {
@@ -2013,34 +1791,8 @@ table.memberdecls .memTemplParams {
background-color: var(--fragment-background);
}
@media screen and (min-width: 768px) {
tr.heading + tr[class^='memitem'] td.memItemRight, tr.groupHeader + tr[class^='memitem'] td.memItemRight, tr.inherit_header + tr[class^='memitem'] td.memItemRight {
border-top-right-radius: var(--border-radius-small);
}
table.memberdecls tr:last-child td.memItemRight, table.memberdecls tr:last-child td.mdescRight, table.memberdecls tr[class^='memitem']:has(+ tr.groupHeader) td.memItemRight, table.memberdecls tr[class^='memitem']:has(+ tr.inherit_header) td.memItemRight, table.memberdecls tr[class^='memdesc']:has(+ tr.groupHeader) td.mdescRight, table.memberdecls tr[class^='memdesc']:has(+ tr.inherit_header) td.mdescRight {
border-bottom-right-radius: var(--border-radius-small);
}
table.memberdecls tr:last-child td.memItemLeft, table.memberdecls tr:last-child td.mdescLeft, table.memberdecls tr[class^='memitem']:has(+ tr.groupHeader) td.memItemLeft, table.memberdecls tr[class^='memitem']:has(+ tr.inherit_header) td.memItemLeft, table.memberdecls tr[class^='memdesc']:has(+ tr.groupHeader) td.mdescLeft, table.memberdecls tr[class^='memdesc']:has(+ tr.inherit_header) td.mdescLeft {
border-bottom-left-radius: var(--border-radius-small);
}
tr.heading + tr[class^='memitem'] td.memItemLeft, tr.groupHeader + tr[class^='memitem'] td.memItemLeft, tr.inherit_header + tr[class^='memitem'] td.memItemLeft {
border-top-left-radius: var(--border-radius-small);
}
}
table.memname td.memname {
font-size: var(--memname-font-size);
}
table.memberdecls .memTemplItemLeft,
table.memberdecls .template .memItemLeft,
table.memberdecls .memTemplItemRight,
table.memberdecls .template .memItemRight {
table.memberdecls .memTemplItemRight {
padding-top: 2px;
}
@@ -2052,13 +1804,13 @@ table.memberdecls .memTemplParams {
padding-bottom: var(--spacing-small);
}
table.memberdecls .memTemplItemLeft, table.memberdecls .template .memItemLeft {
table.memberdecls .memTemplItemLeft {
border-radius: 0 0 0 var(--border-radius-small);
border-left: 1px solid var(--separator-color);
border-top: 0;
}
table.memberdecls .memTemplItemRight, table.memberdecls .template .memItemRight {
table.memberdecls .memTemplItemRight {
border-radius: 0 0 var(--border-radius-small) 0;
border-right: 1px solid var(--separator-color);
padding-left: 0;
@@ -2084,14 +1836,8 @@ table.memberdecls .mdescLeft, table.memberdecls .mdescRight {
background: none;
color: var(--page-foreground-color);
padding: var(--spacing-small) 0;
border: 0;
}
table.memberdecls [class^="memdesc"] {
box-shadow: none;
}
table.memberdecls .memItemLeft,
table.memberdecls .memTemplItemLeft {
padding-right: var(--spacing-medium);
@@ -2114,10 +1860,6 @@ table.memberdecls .inherit_header td {
color: var(--page-secondary-foreground-color);
}
table.memberdecls span.dynarrow {
left: 10px;
}
table.memberdecls img[src="closed.png"],
table.memberdecls img[src="open.png"],
div.dynheader img[src="open.png"],
@@ -2134,10 +1876,6 @@ div.dynheader img[src="closed.png"] {
transition: transform var(--animation-duration) ease-out;
}
tr.heading + tr[class^='memitem'] td.memItemLeft, tr.groupHeader + tr[class^='memitem'] td.memItemLeft, tr.inherit_header + tr[class^='memitem'] td.memItemLeft, tr.heading + tr[class^='memitem'] td.memItemRight, tr.groupHeader + tr[class^='memitem'] td.memItemRight, tr.inherit_header + tr[class^='memitem'] td.memItemRight {
border-top: 1px solid var(--separator-color);
}
table.memberdecls img {
margin-right: 10px;
}
@@ -2162,10 +1900,7 @@ div.dynheader img[src="closed.png"] {
table.memberdecls .mdescRight,
table.memberdecls .memTemplItemLeft,
table.memberdecls .memTemplItemRight,
table.memberdecls .memTemplParams,
table.memberdecls .template .memItemLeft,
table.memberdecls .template .memItemRight,
table.memberdecls .template .memParams {
table.memberdecls .memTemplParams {
display: block;
text-align: left;
padding-left: var(--spacing-large);
@@ -2178,14 +1913,12 @@ div.dynheader img[src="closed.png"] {
table.memberdecls .memItemLeft,
table.memberdecls .mdescLeft,
table.memberdecls .memTemplItemLeft,
table.memberdecls .template .memItemLeft {
border-bottom: 0 !important;
padding-bottom: 0 !important;
table.memberdecls .memTemplItemLeft {
border-bottom: 0;
padding-bottom: 0;
}
table.memberdecls .memTemplItemLeft,
table.memberdecls .template .memItemLeft {
table.memberdecls .memTemplItemLeft {
padding-top: 0;
}
@@ -2195,12 +1928,10 @@ div.dynheader img[src="closed.png"] {
table.memberdecls .memItemRight,
table.memberdecls .mdescRight,
table.memberdecls .memTemplItemRight,
table.memberdecls .template .memItemRight {
border-top: 0 !important;
padding-top: 0 !important;
table.memberdecls .memTemplItemRight {
border-top: 0;
padding-top: 0;
padding-right: var(--spacing-large);
padding-bottom: var(--spacing-medium);
overflow-x: auto;
}
@@ -2235,22 +1966,6 @@ div.dynheader img[src="closed.png"] {
max-height: 200px;
}
}
tr.heading + tr[class^='memitem'] td.memItemRight, tr.groupHeader + tr[class^='memitem'] td.memItemRight, tr.inherit_header + tr[class^='memitem'] td.memItemRight {
border-top-right-radius: 0;
}
table.memberdecls tr:last-child td.memItemRight, table.memberdecls tr:last-child td.mdescRight, table.memberdecls tr[class^='memitem']:has(+ tr.groupHeader) td.memItemRight, table.memberdecls tr[class^='memitem']:has(+ tr.inherit_header) td.memItemRight, table.memberdecls tr[class^='memdesc']:has(+ tr.groupHeader) td.mdescRight, table.memberdecls tr[class^='memdesc']:has(+ tr.inherit_header) td.mdescRight {
border-bottom-right-radius: 0;
}
table.memberdecls tr:last-child td.memItemLeft, table.memberdecls tr:last-child td.mdescLeft, table.memberdecls tr[class^='memitem']:has(+ tr.groupHeader) td.memItemLeft, table.memberdecls tr[class^='memitem']:has(+ tr.inherit_header) td.memItemLeft, table.memberdecls tr[class^='memdesc']:has(+ tr.groupHeader) td.mdescLeft, table.memberdecls tr[class^='memdesc']:has(+ tr.inherit_header) td.mdescLeft {
border-bottom-left-radius: 0;
}
tr.heading + tr[class^='memitem'] td.memItemLeft, tr.groupHeader + tr[class^='memitem'] td.memItemLeft, tr.inherit_header + tr[class^='memitem'] td.memItemLeft {
border-top-left-radius: 0;
}
}
@@ -2267,16 +1982,14 @@ hr {
}
.contents hr {
box-shadow: 100px 0 var(--separator-color),
-100px 0 var(--separator-color),
500px 0 var(--separator-color),
-500px 0 var(--separator-color),
900px 0 var(--separator-color),
-900px 0 var(--separator-color),
1400px 0 var(--separator-color),
-1400px 0 var(--separator-color),
1900px 0 var(--separator-color),
-1900px 0 var(--separator-color);
box-shadow: 100px 0 0 var(--separator-color),
-100px 0 0 var(--separator-color),
500px 0 0 var(--separator-color),
-500px 0 0 var(--separator-color),
1500px 0 0 var(--separator-color),
-1500px 0 0 var(--separator-color),
2000px 0 0 var(--separator-color),
-2000px 0 0 var(--separator-color);
}
.contents img, .contents .center, .contents center, .contents div.image object {
@@ -2439,7 +2152,9 @@ div.qindex {
background: var(--page-background-color);
border: none;
border-top: 1px solid var(--separator-color);
border-bottom: 1px solid var(--separator-color);
border-bottom: 0;
box-shadow: 0 0.75px 0 var(--separator-color);
font-size: var(--navigation-font-size);
}
@@ -2468,10 +2183,6 @@ address.footer {
color: var(--primary-color) !important;
}
.navpath li.navelem a:hover {
text-shadow: none;
}
.navpath li.navelem b {
color: var(--primary-dark-color);
font-weight: 500;
@@ -2490,11 +2201,7 @@ li.navelem:first-child:before {
display: none;
}
#nav-path ul {
padding-left: 0;
}
#nav-path li.navelem:has(.el):after {
#nav-path li.navelem:after {
content: '';
border: 5px solid var(--page-background-color);
border-bottom-color: transparent;
@@ -2505,21 +2212,7 @@ li.navelem:first-child:before {
margin-left: 6px;
}
#nav-path li.navelem:not(:has(.el)):after {
background: var(--page-background-color);
box-shadow: 1px -1px 0 1px var(--separator-color);
border-radius: 0 var(--border-radius-medium) 0 50px;
}
#nav-path li.navelem:not(:has(.el)) {
margin-left: 0;
}
#nav-path li.navelem:not(:has(.el)):hover, #nav-path li.navelem:not(:has(.el)):hover:after {
background-color: var(--separator-color);
}
#nav-path li.navelem:has(.el):before {
#nav-path li.navelem:before {
content: '';
border: 5px solid var(--separator-color);
border-bottom-color: transparent;
@@ -2645,7 +2338,7 @@ doxygen-awesome-dark-mode-toggle {
height: var(--searchbar-height);
background: none;
border: none;
border-radius: var(--searchbar-border-radius);
border-radius: var(--searchbar-height);
vertical-align: middle;
text-align: center;
line-height: var(--searchbar-height);
@@ -2830,7 +2523,6 @@ h2:hover a.anchorlink, h1:hover a.anchorlink, h3:hover a.anchorlink, h4:hover a.
float: left;
white-space: nowrap;
font-weight: normal;
font-family: var(--font-family);
padding: calc(var(--spacing-large) / 2) var(--spacing-large);
border-radius: var(--border-radius-medium);
transition: background-color var(--animation-duration) ease-in-out, font-weight var(--animation-duration) ease-in-out;
@@ -2975,46 +2667,3 @@ h2:hover a.anchorlink, h1:hover a.anchorlink, h3:hover a.anchorlink, h4:hover a.
border-radius: 0 var(--border-radius-medium) var(--border-radius-medium) 0;
}
}
/*
Bordered image
*/
html.dark-mode .darkmode_inverted_image img, /* < doxygen 1.9.3 */
html.dark-mode .darkmode_inverted_image object[type="image/svg+xml"] /* doxygen 1.9.3 */ {
filter: brightness(89%) hue-rotate(180deg) invert();
}
.bordered_image {
border-radius: var(--border-radius-small);
border: 1px solid var(--separator-color);
display: inline-block;
overflow: hidden;
}
.bordered_image:empty {
border: none;
}
html.dark-mode .bordered_image img, /* < doxygen 1.9.3 */
html.dark-mode .bordered_image object[type="image/svg+xml"] /* doxygen 1.9.3 */ {
border-radius: var(--border-radius-small);
}
/*
Button
*/
.primary-button {
display: inline-block;
cursor: pointer;
background: var(--primary-color);
color: var(--page-background-color) !important;
border-radius: var(--border-radius-medium);
padding: var(--spacing-small) var(--spacing-medium);
text-decoration: none;
}
.primary-button:hover {
background: var(--primary-dark-color);
}

View File

@@ -1,129 +0,0 @@
.github-corner svg {
fill: var(--primary-light-color);
color: var(--page-background-color);
width: 72px;
height: 72px;
}
@media screen and (max-width: 767px) {
.github-corner svg {
width: 50px;
height: 50px;
}
#projectnumber {
margin-right: 22px;
}
}
.title_screenshot {
filter: drop-shadow(0px 3px 10px rgba(0,0,0,0.22));
max-width: 500px;
margin: var(--spacing-large) 0;
}
.title_screenshot .caption {
display: none;
}
#theme-selection {
position: fixed;
bottom: 0;
left: 0;
background: var(--side-nav-background);
padding: 5px 2px 5px 8px;
box-shadow: 0 -4px 4px -2px var(--side-nav-background);
display: flex;
}
#theme-selection label {
border: 1px solid var(--separator-color);
border-right: 0;
color: var(--page-foreground-color);
font-size: var(--toc-font-size);
padding: 0 8px;
display: inline-block;
height: 22px;
box-sizing: border-box;
border-radius: var(--border-radius-medium) 0 0 var(--border-radius-medium);
line-height: 20px;
background: var(--page-background-color);
opacity: 0.7;
}
@media (prefers-color-scheme: dark) {
html:not(.light-mode) #theme-select {
background: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='100' height='100' fill='%23aaaaaa'><polygon points='0,0 100,0 50,50'/></svg>") no-repeat;
background-size: 8px;
background-position: calc(100% - 6px) 65%;
background-color: var(--page-background-color);
}
}
html.dark-mode #theme-select {
background: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='100' height='100' fill='%23aaaaaa'><polygon points='0,0 100,0 50,50'/></svg>") no-repeat;
background-size: 8px;
background-position: calc(100% - 6px) 65%;
background-color: var(--page-background-color);
}
#theme-select {
border: 1px solid var(--separator-color);
border-radius: 0 var(--border-radius-medium) var(--border-radius-medium) 0;
padding: 0;
height: 22px;
font-size: var(--toc-font-size);
font-family: var(--font-family);
width: 215px;
color: var(--primary-color);
border-left: 0;
display: inline-block;
opacity: 0.7;
outline: none;
-webkit-appearance: none;
-moz-appearance: none;
appearance: none;
background: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='100' height='100' fill='%23888888'><polygon points='0,0 100,0 50,50'/></svg>") no-repeat;
background-size: 8px;
background-position: calc(100% - 6px) 65%;
background-repeat: no-repeat;
background-color: var(--page-background-color);
}
#theme-selection:hover #theme-select, #theme-selection:hover label {
opacity: 1;
}
#nav-tree-contents {
margin-bottom: 30px;
}
@media screen and (max-width: 767px) {
#theme-selection {
box-shadow: none;
background: none;
height: 20px;
}
#theme-select {
width: 80px;
opacity: 1;
}
#theme-selection label {
opacity: 1;
}
#nav-path ul li.navelem:first-child {
margin-left: 160px;
}
ul li.footer:not(:first-child) {
display: none;
}
#nav-path {
position: fixed;
bottom: 0;
background: var(--page-background-color);
}
}

View File

@@ -1,98 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!-- BEGIN opengraph metadata -->
<meta property="og:title" content="Doxygen Awesome" />
<meta property="og:image" content="https://repository-images.githubusercontent.com/348492097/4f16df80-88fb-11eb-9d31-4015ff22c452" />
<meta property="og:description" content="Custom CSS theme for doxygen html-documentation with lots of customization parameters." />
<meta property="og:url" content="https://jothepro.github.io/doxygen-awesome-css/" />
<!-- END opengraph metadata -->
<!-- BEGIN twitter metadata -->
<meta name="twitter:image:src" content="https://repository-images.githubusercontent.com/348492097/4f16df80-88fb-11eb-9d31-4015ff22c452" />
<meta name="twitter:title" content="Doxygen Awesome" />
<meta name="twitter:description" content="Custom CSS theme for doxygen html-documentation with lots of customization parameters." />
<!-- END twitter metadata -->
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<link rel="icon" type="image/svg+xml" href="logo.drawio.svg"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
<script type="text/javascript" src="$relpath^doxygen-awesome-darkmode-toggle.js"></script>
<script type="text/javascript" src="$relpath^doxygen-awesome-fragment-copy-button.js"></script>
<script type="text/javascript" src="$relpath^doxygen-awesome-paragraph-link.js"></script>
<script type="text/javascript" src="$relpath^doxygen-awesome-interactive-toc.js"></script>
<script type="text/javascript" src="$relpath^doxygen-awesome-tabs.js"></script>
<script type="text/javascript" src="$relpath^toggle-alternative-theme.js"></script>
<script type="text/javascript">
DoxygenAwesomeFragmentCopyButton.init()
DoxygenAwesomeDarkModeToggle.init()
DoxygenAwesomeParagraphLink.init()
DoxygenAwesomeInteractiveToc.init()
DoxygenAwesomeTabs.init()
</script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
</head>
<body>
<!-- https://tholman.com/github-corners/ -->
<a href="https://github.com/jothepro/doxygen-awesome-css" class="github-corner" title="View source on GitHub" target="_blank" rel="noopener noreferrer">
<svg viewBox="0 0 250 250" width="40" height="40" style="position: absolute; top: 0; border: 0; right: 0; z-index: 99;" aria-hidden="true">
<path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path><path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path></svg></a><style>.github-corner:hover .octo-arm{animation:octocat-wave 560ms ease-in-out}@keyframes octocat-wave{0%,100%{transform:rotate(0)}20%,60%{transform:rotate(-25deg)}40%,80%{transform:rotate(10deg)}}@media (max-width:500px){.github-corner:hover .octo-arm{animation:none}.github-corner .octo-arm{animation:octocat-wave 560ms ease-in-out}}</style>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">$projectname
<!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
<td style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
<!--END !PROJECT_NAME-->
<!--BEGIN DISABLE_INDEX-->
<!--BEGIN SEARCHENGINE-->
<td>$searchbox</td>
<!--END SEARCHENGINE-->
<!--END DISABLE_INDEX-->
</tr>
</tbody>
</table>
<div id="theme-selection">
<label for="theme-select">Theme:</label>
<select id="theme-select">
<option value="theme-default">Default</option>
<option value="theme-round">Round</option>
<option value="theme-robot">Robot</option>
</select>
</div>
</div>
<!--END TITLEAREA-->
<!-- end header part -->

View File

@@ -1,62 +0,0 @@
html.theme-robot {
/* primary theme color. This will affect the entire websites color scheme: links, arrows, labels, ... */
--primary-color: #1c89a4;
--primary-dark-color: #1a6f84;
--primary-light-color: #5abcd4;
--primary-lighter-color: #cae1f1;
--primary-lightest-color: #e9f1f8;
--fragment-background: #ececec;
--code-background: #ececec;
/* page base colors */
--page-background-color: white;
--page-foreground-color: #2c3e50;
--page-secondary-foreground-color: #67727e;
--border-radius-large: 0px;
--border-radius-small: 0px;
--border-radius-medium: 0px;
--spacing-small: 3px;
--spacing-medium: 6px;
--spacing-large: 12px;
--top-height: 125px;
--side-nav-background: var(--page-background-color);
--side-nav-foreground: var(--page-foreground-color);
--header-foreground: var(--side-nav-foreground);
--searchbar-border-radius: var(--border-radius-medium);
--header-background: var(--side-nav-background);
--header-foreground: var(--side-nav-foreground);
--toc-background: rgb(243, 240, 252);
--toc-foreground: var(--page-foreground-color);
--font-family: ui-monospace,SFMono-Regular,SF Mono,Menlo,Consolas,Liberation Mono,monospace;
--page-font-size: 14px;
--box-shadow: none;
--separator-color: #cdcdcd;
}
html.theme-robot.dark-mode {
color-scheme: dark;
--primary-color: #49cad3;
--primary-dark-color: #8ed2d7;
--primary-light-color: #377479;
--primary-lighter-color: #191e21;
--primary-lightest-color: #191a1c;
--fragment-background: #000000;
--code-background: #000000;
--page-background-color: #161616;
--page-foreground-color: #d2dbde;
--page-secondary-foreground-color: #555555;
--separator-color: #545454;
--toc-background: #20142C;
}

View File

@@ -1,55 +0,0 @@
html.theme-round {
/* primary theme color. This will affect the entire websites color scheme: links, arrows, labels, ... */
--primary-color: #AF7FE4;
--primary-dark-color: #9270E4;
--primary-light-color: #d2b7ef;
--primary-lighter-color: #cae1f1;
--primary-lightest-color: #e9f1f8;
/* page base colors */
--page-background-color: white;
--page-foreground-color: #2c3e50;
--page-secondary-foreground-color: #67727e;
--border-radius-large: 22px;
--border-radius-small: 9px;
--border-radius-medium: 14px;
--spacing-small: 8px;
--spacing-medium: 14px;
--spacing-large: 19px;
--spacing-xlarge: 21px;
--top-height: 125px;
--side-nav-background: #324067;
--side-nav-foreground: #F1FDFF;
--header-foreground: var(--side-nav-foreground);
--searchbar-background: var(--side-nav-foreground);
--searchbar-border-radius: var(--border-radius-medium);
--header-background: var(--side-nav-background);
--header-foreground: var(--side-nav-foreground);
--toc-background: rgb(243, 240, 252);
--toc-foreground: var(--page-foreground-color);
}
html.theme-round.dark-mode {
color-scheme: dark;
--primary-color: #AF7FE4;
--primary-dark-color: #715292;
--primary-light-color: #ae97c7;
--primary-lighter-color: #191e21;
--primary-lightest-color: #191a1c;
--page-background-color: #1C1D1F;
--page-foreground-color: #d2dbde;
--page-secondary-foreground-color: #859399;
--separator-color: #3a3246;
--side-nav-background: #171D32;
--side-nav-foreground: #F1FDFF;
--toc-background: #20142C;
--searchbar-background: var(--page-background-color);
}

View File

@@ -1,52 +0,0 @@
// Toggle zwischen drei Theme-Zuständen und speichere im localStorage
const THEME_CLASSES = ['theme-default', 'theme-round', 'theme-robot'];
// Ermögliche das Umschalten per Button/Funktion (z.B. für onclick im HTML)
function toggleThemeVariant() {
let idx = getCurrentThemeIndex();
idx = (idx + 1) % THEME_CLASSES.length;
applyThemeClass(idx);
}
// Funktion global verfügbar machen
window.toggleThemeVariant = toggleThemeVariant;
function getCurrentThemeIndex() {
const stored = localStorage.getItem('theme-variant');
if (stored === null) return 0;
const idx = THEME_CLASSES.indexOf(stored);
return idx === -1 ? 0 : idx;
}
function applyThemeClass(idx) {
document.documentElement.classList.remove(...THEME_CLASSES);
if (THEME_CLASSES[idx] && THEME_CLASSES[idx] !== 'theme-default') {
document.documentElement.classList.add(THEME_CLASSES[idx]);
}
localStorage.setItem('theme-variant', THEME_CLASSES[idx] || 'theme-default');
// Select synchronisieren, falls vorhanden
const select = document.getElementById('theme-select');
if (select) select.value = THEME_CLASSES[idx];
}
function setThemeByName(themeName) {
const idx = THEME_CLASSES.indexOf(themeName);
applyThemeClass(idx === -1 ? 0 : idx);
}
document.addEventListener('DOMContentLoaded', () => {
const select = document.getElementById('theme-select');
if (select) {
// Initialisiere Auswahl aus localStorage
const idx = getCurrentThemeIndex();
select.value = THEME_CLASSES[idx];
applyThemeClass(idx);
// Theme bei Auswahl ändern
select.addEventListener('change', e => {
setThemeByName(e.target.value);
});
} else {
// Fallback: Theme trotzdem setzen
applyThemeClass(getCurrentThemeIndex());
}
});

View File

@@ -0,0 +1,82 @@
<!-- HTML header for doxygen 1.9.7-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="$langISO">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=11"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<!--BEGIN DISABLE_INDEX-->
<!--BEGIN FULL_SIDEBAR-->
<script type="text/javascript">var page_layout=1;</script>
<!--END FULL_SIDEBAR-->
<!--END DISABLE_INDEX-->
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
$darkmode
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
<script type="text/javascript" src="$relpath^doxygen-awesome-darkmode-toggle.js"></script>
<script type="text/javascript">
DoxygenAwesomeDarkModeToggle.init()
</script>
<script type="text/javascript" src="$relpath^doxygen-awesome-interactive-toc.js"></script>
<script type="text/javascript">
DoxygenAwesomeInteractiveToc.init()
</script>
</head>
<body>
<!--BEGIN DISABLE_INDEX-->
<!--BEGIN FULL_SIDEBAR-->
<div id="side-nav" class="ui-resizable side-nav-resizable"><!-- do not remove this div, it is closed by doxygen! -->
<!--END FULL_SIDEBAR-->
<!--END DISABLE_INDEX-->
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr id="projectrow">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td id="projectalign">
<div id="projectname">$projectname<!--BEGIN PROJECT_NUMBER--><span id="projectnumber">&#160;$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
<td>
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
<!--END !PROJECT_NAME-->
<!--BEGIN DISABLE_INDEX-->
<!--BEGIN SEARCHENGINE-->
<!--BEGIN !FULL_SIDEBAR-->
<td>$searchbox</td>
<!--END !FULL_SIDEBAR-->
<!--END SEARCHENGINE-->
<!--END DISABLE_INDEX-->
</tr>
<!--BEGIN SEARCHENGINE-->
<!--BEGIN FULL_SIDEBAR-->
<tr><td colspan="2">$searchbox</td></tr>
<!--END FULL_SIDEBAR-->
<!--END SEARCHENGINE-->
</tbody>
</table>
</div>
<!--END TITLEAREA-->
<!-- end header part -->

View File

@@ -31,7 +31,7 @@
"etl_sources": [
{
"ip": "127.0.0.1",
"ws_port": "6006",
"ws_port": "6005",
"grpc_port": "50051"
}
],
@@ -76,60 +76,38 @@
"parallel_requests_limit": 10, // Optional parameter, used only if "processing_strategy" is "parallel". It limits the number of requests for one client connection processed in parallel. Infinite if not specified.
// Max number of responses to queue up before sent successfully. If a client's waiting queue is too long, the server will close the connection.
"ws_max_sending_queue_size": 1500,
"__ng_web_server": false, // Use ng web server. This is a temporary setting which will be deleted after switching to ng web server
"proxy": {
"ips": [],
"tokens": []
}
"__ng_web_server": false // Use ng web server. This is a temporary setting which will be deleted after switching to ng web server
},
// Time in seconds for graceful shutdown. Defaults to 10 seconds. Not fully implemented yet.
"graceful_period": 10.0,
"log": {
// Overrides log level on a per logging channel.
// Defaults to global "log.level" for each unspecified channel.
"channels": [
// Defaults to global "log_level" for each unspecified channel.
"log_channels": [
{
"channel": "Backend",
"level": "fatal"
"log_level": "fatal"
},
{
"channel": "WebServer",
"level": "info"
"log_level": "info"
},
{
"channel": "Subscriptions",
"level": "info"
"log_level": "info"
},
{
"channel": "RPC",
"level": "error"
"log_level": "error"
},
{
"channel": "ETL",
"level": "debug"
"log_level": "debug"
},
{
"channel": "Performance",
"level": "trace"
"log_level": "trace"
}
],
// The general logging level of Clio. This level is applied to all log channels that do not have an explicitly defined logging level.
"level": "info",
// Log format using spdlog format patterns (this is the default format)
"format": "%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v",
// Whether spdlog is asynchronous or not.
"is_async": true,
// Enables or disables logging to the console.
"enable_console": true,
// Clio logs to file in the specified directory only if "log.directory" is set
// "directory": "./clio_log",
// The log rotation size in megabytes. When the log file reaches this particular size, a new log file starts.
"rotation_size": 2048,
// The maximum number of log files in the directory.
"directory_max_files": 25,
// Log tags style to use
"tag_style": "uint"
},
"cache": {
// Configure this to use either "num_diffs", "num_cursors_from_diff", or "num_cursors_from_account". By default, Clio uses "num_diffs".
"num_diffs": 32, // Generate the cursors from the latest ledger diff, then use the cursors to partition the ledger to load concurrently. The cursors number is affected by the busyness of the network.
@@ -143,6 +121,16 @@
"enabled": true,
"compress_reply": true
},
"log_level": "info",
// Log format (this is the default format)
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%",
"log_to_console": true,
// Clio logs to file in the specified directory only if "log_directory" is set
// "log_directory": "./clio_log",
"log_rotation_size": 2048,
"log_directory_max_size": 51200,
"log_rotation_hour_interval": 12,
"log_tag_style": "uint",
"extractor_threads": 8,
"read_only": false,
// "start_sequence": [integer] the ledger index to start from,

View File

@@ -1,3 +0,0 @@
.github-corner {
display: none !important;
}

76
docs/logging.md Normal file
View File

@@ -0,0 +1,76 @@
# Logging
Clio provides several logging options, which all are configurable via the config file. These are detailed in the following sections.
## `log_level`
The minimum level of severity at which the log message will be outputted by default. Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `info`.
## `log_format`
The format of log lines produced by Clio. Defaults to `"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%"`.
Each of the variables expands like so:
- `TimeStamp`: The full date and time of the log entry
- `SourceLocation`: A partial path to the c++ file and the line number in said file (`source/file/path:linenumber`)
- `ThreadID`: The ID of the thread the log entry is written from
- `Channel`: The channel that this log entry was sent to
- `Severity`: The severity (aka log level) the entry was sent at
- `Message`: The actual log message
## `log_channels`
An array of JSON objects, each overriding properties for a logging `channel`.
> [!IMPORTANT]
> At the time of writing, only `log_level` can be overridden using this mechanism.
Each object is of this format:
```json
{
"channel": "Backend",
"log_level": "fatal"
}
```
If no override is present for a given channel, that channel will log at the severity specified by the global `log_level`.
The log channels that can be overridden are: `Backend`, `WebServer`, `Subscriptions`, `RPC`, `ETL` and `Performance`.
> [!NOTE]
> See [example-config.json](../docs/examples/config/example-config.json) for more details.
## `log_to_console`
Enable or disable log output to console. Options are `true`/`false`. This option defaults to `true`.
## `log_directory`
Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it.
If the option is not specified, the logs are not written to a file.
## `log_rotation_size`
The max size of the log file in **megabytes** before it will rotate into a smaller file. Defaults to 2GB.
## `log_directory_max_size`
The max size of the log directory in **megabytes** before old log files will be deleted to free up space. Defaults to 50GB.
## `log_rotation_hour_interval`
The time interval in **hours** after the last log rotation to automatically rotate the current log file. Defaults to 12 hours.
> [!NOTE]
> Log rotation based on time occurs in conjunction with size-based log rotation. For example, if a size-based log rotation occurs, the timer for the time-based rotation will reset.
## `log_tag_style`
Tag implementation to use. Must be one of:
- `uint`: Lock free and threadsafe but outputs just a simple unsigned integer
- `uuid`: Threadsafe and outputs a UUID tag
- `none`: Doesn't use tagging at all

View File

@@ -5,14 +5,11 @@ import re
from pathlib import Path
PATTERN = r'R"JSON\((.*?)\)JSON"'
def use_uppercase(cpp_content: str) -> str:
return cpp_content.replace('R"json(', 'R"JSON(').replace(')json"', ')JSON"')
def fix_json_style(cpp_content: str) -> str:
cpp_content = cpp_content.replace('R"json(', 'R"JSON(').replace(')json"', ')JSON"')
pattern = r'R"JSON\((.*?)\)JSON"'
def replace_json(match):
raw_json = match.group(1)
@@ -32,51 +29,12 @@ def fix_json_style(cpp_content: str) -> str:
raw_json = raw_json.replace(f'":{digit}', f'": {digit}')
return f'R"JSON({raw_json})JSON"'
return re.sub(PATTERN, replace_json, cpp_content, flags=re.DOTALL)
def fix_colon_spacing(cpp_content: str) -> str:
def replace_json(match):
raw_json = match.group(1)
raw_json = re.sub(r'":\n\s*(\[|\{)', r'": \1', raw_json)
return f'R"JSON({raw_json})JSON"'
return re.sub(PATTERN, replace_json, cpp_content, flags=re.DOTALL)
def fix_indentation(cpp_content: str) -> str:
lines = cpp_content.splitlines()
def find_indentation(line: str) -> int:
return len(line) - len(line.lstrip())
for (line_num, (line, next_line)) in enumerate(zip(lines[:-1], lines[1:])):
if "JSON(" in line and ")JSON" not in line:
indent = find_indentation(line)
next_indent = find_indentation(next_line)
by_how_much = next_indent - (indent + 4)
if by_how_much != 0:
print(
f"Indentation error at line: {line_num + 2}: expected {indent + 4} spaces, found {next_indent} spaces"
)
for i in range(line_num + 1, len(lines)):
if ")JSON" in lines[i]:
lines[i] = " " * indent + lines[i].lstrip()
break
lines[i] = lines[i][by_how_much:] if by_how_much > 0 else " " * (-by_how_much) + lines[i]
return "\n".join(lines) + "\n"
return re.sub(pattern, replace_json, cpp_content, flags=re.DOTALL)
def process_file(file_path: Path, dry_run: bool) -> bool:
content = file_path.read_text(encoding="utf-8")
new_content = content
new_content = use_uppercase(new_content)
new_content = fix_json_style(new_content)
new_content = fix_colon_spacing(new_content)
new_content = fix_indentation(new_content)
new_content = fix_json_style(content)
if new_content != content:
print(f"Processing file: {file_path}")

View File

@@ -23,7 +23,6 @@
#include "util/build/Build.hpp"
#include "util/config/ConfigDescription.hpp"
#include <boost/program_options/errors.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/positional_options.hpp>
@@ -57,22 +56,12 @@ CliArgs::parse(int argc, char const* argv[])
po::positional_options_description positional;
positional.add("conf", 1);
auto const printHelp = [&description]() {
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n" << description;
};
po::variables_map parsed;
try {
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
po::notify(parsed);
} catch (po::error const& e) {
std::cerr << "Error: " << e.what() << std::endl << std::endl;
printHelp();
return Action{Action::Exit{EXIT_FAILURE}};
}
if (parsed.contains("help")) {
printHelp();
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n" << description;
return Action{Action::Exit{EXIT_SUCCESS}};
}

View File

@@ -178,9 +178,7 @@ ClioApplication::run(bool const useNgWebServer)
}
auto const adminVerifier = std::move(expectedAdminVerifier).value();
auto httpServer = web::ng::makeServer(
config_, OnConnectCheck{dosGuard}, IpChangeHook{dosGuard}, DisconnectHook{dosGuard}, ioc
);
auto httpServer = web::ng::makeServer(config_, OnConnectCheck{dosGuard}, DisconnectHook{dosGuard}, ioc);
if (not httpServer.has_value()) {
LOG(util::LogService::error()) << "Error creating web server: " << httpServer.error();
@@ -189,7 +187,6 @@ ClioApplication::run(bool const useNgWebServer)
httpServer->onGet("/metrics", MetricsHandler{adminVerifier});
httpServer->onGet("/health", HealthCheckHandler{});
httpServer->onGet("/cache_state", CacheStateHandler{cache});
auto requestHandler = RequestHandler{adminVerifier, handler};
httpServer->onPost("/", requestHandler);
httpServer->onWs(std::move(requestHandler));
@@ -215,7 +212,7 @@ ClioApplication::run(bool const useNgWebServer)
// Init the web server
auto handler = std::make_shared<web::RPCServerHandler<RPCEngineType>>(config_, backend, rpcEngine, etl, dosGuard);
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler);
// Blocks until stopped.
// When stopped, shared_ptrs fall out of scope

View File

@@ -19,8 +19,6 @@
#include "app/Stopper.hpp"
#include "util/Spawn.hpp"
#include <boost/asio/spawn.hpp>
#include <functional>
@@ -38,7 +36,7 @@ Stopper::~Stopper()
void
Stopper::setOnStop(std::function<void(boost::asio::yield_context)> cb)
{
util::spawn(ctx_, std::move(cb));
boost::asio::spawn(ctx_, std::move(cb));
}
void

View File

@@ -108,8 +108,6 @@ public:
ioc.stop();
LOG(util::LogService::info()) << "io_context stopped";
util::LogService::shutdown();
};
}
};

View File

@@ -44,7 +44,7 @@ parseConfig(std::string_view configPath)
std::cerr << "Error parsing json from config: " << configPath << "\n" << json.error().error << std::endl;
return false;
}
auto const errors = getClioConfig().parse(json.value());
auto const errors = gClioConfig.parse(json.value());
if (errors.has_value()) {
for (auto const& err : errors.value()) {
std::cerr << "Issues found in provided config '" << configPath << "':\n";

View File

@@ -33,7 +33,6 @@
#include <memory>
#include <optional>
#include <string>
#include <utility>
namespace app {
@@ -55,17 +54,6 @@ OnConnectCheck::operator()(web::ng::Connection const& connection)
return {};
}
IpChangeHook::IpChangeHook(web::dosguard::DOSGuardInterface& dosguard) : dosguard_(dosguard)
{
}
void
IpChangeHook::operator()(std::string const& oldIp, std::string const& newIp)
{
dosguard_.get().decrement(oldIp);
dosguard_.get().increment(newIp);
}
DisconnectHook::DisconnectHook(web::dosguard::DOSGuardInterface& dosguard) : dosguard_{dosguard}
{
}
@@ -120,34 +108,4 @@ HealthCheckHandler::operator()(
return web::ng::Response{boost::beast::http::status::ok, kHEALTH_CHECK_HTML, request};
}
web::ng::Response
CacheStateHandler::operator()(
web::ng::Request const& request,
web::ng::ConnectionMetadata&,
web::SubscriptionContextPtr,
boost::asio::yield_context
)
{
static constexpr auto kCACHE_CHECK_LOADED_HTML = R"html(
<!DOCTYPE html>
<html>
<head><title>Cache state</title></head>
<body><h1>Cache state</h1><p>Cache is fully loaded</p></body>
</html>
)html";
static constexpr auto kCACHE_CHECK_NOT_LOADED_HTML = R"html(
<!DOCTYPE html>
<html>
<head><title>Cache state</title></head>
<body><h1>Cache state</h1><p>Cache is not yet loaded</p></body>
</html>
)html";
if (cache_.get().isFull())
return web::ng::Response{boost::beast::http::status::ok, kCACHE_CHECK_LOADED_HTML, request};
return web::ng::Response{boost::beast::http::status::service_unavailable, kCACHE_CHECK_NOT_LOADED_HTML, request};
}
} // namespace app

View File

@@ -19,7 +19,6 @@
#pragma once
#include "data/LedgerCacheInterface.hpp"
#include "rpc/Errors.hpp"
#include "util/log/Logger.hpp"
#include "web/AdminVerificationStrategy.hpp"
@@ -37,7 +36,6 @@
#include <exception>
#include <functional>
#include <memory>
#include <string>
#include <utility>
namespace app {
@@ -66,31 +64,6 @@ public:
operator()(web::ng::Connection const& connection);
};
/**
* @brief A function object that is called when the IP of a connection changes (usually if proxy detected).
* This is used to update the DOS guard.
*/
class IpChangeHook {
std::reference_wrapper<web::dosguard::DOSGuardInterface> dosguard_;
public:
/**
* @brief Construct a new IpChangeHook object.
*
* @param dosguard The DOS guard to use.
*/
IpChangeHook(web::dosguard::DOSGuardInterface& dosguard);
/**
* @brief The call of the function object.
*
* @param oldIp The old IP of the connection.
* @param newIp The new IP of the connection.
*/
void
operator()(std::string const& oldIp, std::string const& newIp);
};
/**
* @brief A function object to be called when a connection is disconnected.
*/
@@ -164,37 +137,6 @@ public:
);
};
/**
* @brief A function object that handles the cache state check endpoint.
*/
class CacheStateHandler {
std::reference_wrapper<data::LedgerCacheInterface const> cache_;
public:
/**
* @brief Construct a new CacheStateHandler object.
*
* @param cache The ledger cache to use.
*/
CacheStateHandler(data::LedgerCacheInterface const& cache) : cache_{cache}
{
}
/**
* @brief The call of the function object.
*
* @param request The request to handle.
* @return The response to the request
*/
web::ng::Response
operator()(
web::ng::Request const& request,
web::ng::ConnectionMetadata&,
web::SubscriptionContextPtr,
boost::asio::yield_context
);
};
/**
* @brief A function object that handles the websocket endpoint.
*

View File

@@ -21,16 +21,10 @@
#include "cluster/ClioNode.hpp"
#include "data/BackendInterface.hpp"
#include "util/Assert.hpp"
#include "util/Spawn.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/bind_cancellation_slot.hpp>
#include <boost/asio/cancellation_type.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/use_future.hpp>
#include <boost/json/parse.hpp>
#include <boost/json/serialize.hpp>
#include <boost/json/value.hpp>
@@ -41,16 +35,11 @@
#include <chrono>
#include <ctime>
#include <latch>
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace {
constexpr auto kTOTAL_WORKERS = 2uz; // 1 reading and 1 writing worker (coroutines)
} // namespace
namespace cluster {
ClusterCommunicationService::ClusterCommunicationService(
@@ -61,7 +50,6 @@ ClusterCommunicationService::ClusterCommunicationService(
: backend_(std::move(backend))
, readInterval_(readInterval)
, writeInterval_(writeInterval)
, finishedCountdown_(kTOTAL_WORKERS)
, selfData_{ClioNode{
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()),
.updateTime = std::chrono::system_clock::time_point{}
@@ -74,42 +62,22 @@ ClusterCommunicationService::ClusterCommunicationService(
void
ClusterCommunicationService::run()
{
ASSERT(not running_ and not stopped_, "Can only be ran once");
running_ = true;
util::spawn(strand_, [this](boost::asio::yield_context yield) {
boost::asio::spawn(strand_, [this](boost::asio::yield_context yield) {
boost::asio::steady_timer timer(yield.get_executor());
boost::system::error_code ec;
while (running_) {
while (true) {
timer.expires_after(readInterval_);
auto token = cancelSignal_.slot();
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
if (ec == boost::asio::error::operation_aborted or not running_)
break;
timer.async_wait(yield);
doRead(yield);
}
finishedCountdown_.count_down(1);
});
util::spawn(strand_, [this](boost::asio::yield_context yield) {
boost::asio::spawn(strand_, [this](boost::asio::yield_context yield) {
boost::asio::steady_timer timer(yield.get_executor());
boost::system::error_code ec;
while (running_) {
while (true) {
doWrite();
timer.expires_after(writeInterval_);
auto token = cancelSignal_.slot();
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
if (ec == boost::asio::error::operation_aborted or not running_)
break;
timer.async_wait(yield);
}
finishedCountdown_.count_down(1);
});
}
@@ -124,19 +92,9 @@ ClusterCommunicationService::stop()
if (stopped_)
return;
stopped_ = true;
// for ASAN to see through concurrency correctly we need to exit all coroutines before joining the ctx
running_ = false;
// cancelSignal_ is not thread safe so we execute emit on the same strand
boost::asio::spawn(
strand_, [this](auto&&) { cancelSignal_.emit(boost::asio::cancellation_type::all); }, boost::asio::use_future
)
.wait();
finishedCountdown_.wait();
ctx_.stop();
ctx_.join();
stopped_ = true;
}
std::shared_ptr<boost::uuids::uuid>
@@ -150,7 +108,7 @@ ClioNode
ClusterCommunicationService::selfData() const
{
ClioNode result{};
util::spawn(strand_, [this, &result](boost::asio::yield_context) { result = selfData_; });
boost::asio::spawn(strand_, [this, &result](boost::asio::yield_context) { result = selfData_; });
return result;
}
@@ -161,7 +119,7 @@ ClusterCommunicationService::clusterData() const
return std::unexpected{"Service is not healthy"};
}
std::vector<ClioNode> result;
util::spawn(strand_, [this, &result](boost::asio::yield_context) {
boost::asio::spawn(strand_, [this, &result](boost::asio::yield_context) {
result = otherNodesData_;
result.push_back(selfData_);
});

View File

@@ -27,15 +27,12 @@
#include "util/prometheus/Gauge.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <boost/asio/cancellation_signal.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/uuid/uuid.hpp>
#include <atomic>
#include <chrono>
#include <latch>
#include <memory>
#include <string>
#include <vector>
@@ -68,14 +65,11 @@ class ClusterCommunicationService : public ClusterCommunicationServiceInterface
std::chrono::steady_clock::duration readInterval_;
std::chrono::steady_clock::duration writeInterval_;
boost::asio::cancellation_signal cancelSignal_;
std::latch finishedCountdown_;
std::atomic_bool running_ = false;
bool stopped_ = false;
ClioNode selfData_;
std::vector<ClioNode> otherNodesData_;
bool stopped_ = false;
public:
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{2100};
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1200};

View File

@@ -68,6 +68,7 @@ struct Amendments {
/** @cond */
// NOLINTBEGIN(readability-identifier-naming)
REGISTER(OwnerPaysFee);
REGISTER(Flow);
REGISTER(FlowCross);
REGISTER(fix1513);
@@ -144,9 +145,6 @@ struct Amendments {
REGISTER(TokenEscrow);
REGISTER(fixAMMv1_3);
REGISTER(fixEnforceNFTokenTrustlineV2);
REGISTER(fixAMMClawbackRounding);
REGISTER(fixMPTDeliveredAmount);
REGISTER(fixPriceOracleOrder);
// Obsolete but supported by libxrpl
REGISTER(CryptoConditionsSuite);
@@ -155,7 +153,6 @@ struct Amendments {
REGISTER(fixNFTokenNegOffer);
// Retired amendments
REGISTER(OwnerPaysFee); // Removed in xrpl 2.6.0 (https://github.com/XRPLF/rippled/pull/5435)
REGISTER(MultiSign);
REGISTER(TrustSetAuth);
REGISTER(FeeEscalation);

View File

@@ -21,7 +21,6 @@
#include "data/BackendInterface.hpp"
#include "data/CassandraBackend.hpp"
#include "data/KeyspaceBackend.hpp"
#include "data/LedgerCacheInterface.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "util/config/ConfigDefinition.hpp"
@@ -56,16 +55,10 @@ makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheI
if (boost::iequals(type, "cassandra")) {
auto const cfg = config.getObject("database." + type);
if (cfg.getValueView("provider").asString() == toString(cassandra::impl::Provider::Keyspace)) {
backend = std::make_shared<data::cassandra::KeyspaceBackend>(
data::cassandra::SettingsProvider{cfg}, cache, readOnly
);
} else {
backend = std::make_shared<data::cassandra::CassandraBackend>(
data::cassandra::SettingsProvider{cfg}, cache, readOnly
);
}
}
if (!backend)
throw std::runtime_error("Invalid database type");

View File

@@ -43,6 +43,11 @@
#include <utility>
#include <vector>
// local to compilation unit loggers
namespace {
util::Logger gLog{"Backend"};
} // namespace
/**
* @brief This namespace implements the data access layer and related components.
*
@@ -53,10 +58,10 @@ namespace data {
bool
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
{
LOG(log_.debug()) << "Want finish writes for " << ledgerSequence;
LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence;
auto commitRes = doFinishWrites();
if (commitRes) {
LOG(log_.debug()) << "Successfully committed. Updating range now to " << ledgerSequence;
LOG(gLog.debug()) << "Successfully committed. Updating range now to " << ledgerSequence;
updateRange(ledgerSequence);
}
return commitRes;
@@ -84,15 +89,15 @@ BackendInterface::fetchLedgerObject(
{
auto obj = cache_.get().get(key, sequence);
if (obj) {
LOG(log_.trace()) << "Cache hit - " << ripple::strHex(key);
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
return obj;
}
auto dbObj = doFetchLedgerObject(key, sequence, yield);
if (!dbObj) {
LOG(log_.trace()) << "Missed cache and missed in db";
LOG(gLog.trace()) << "Missed cache and missed in db";
} else {
LOG(log_.trace()) << "Missed cache but found in db";
LOG(gLog.trace()) << "Missed cache but found in db";
}
return dbObj;
}
@@ -106,7 +111,7 @@ BackendInterface::fetchLedgerObjectSeq(
{
auto seq = doFetchLedgerObjectSeq(key, sequence, yield);
if (!seq)
LOG(log_.trace()) << "Missed in db";
LOG(gLog.trace()) << "Missed in db";
return seq;
}
@@ -128,7 +133,7 @@ BackendInterface::fetchLedgerObjects(
misses.push_back(keys[i]);
}
}
LOG(log_.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
if (!misses.empty()) {
auto objs = doFetchLedgerObjects(misses, sequence, yield);
@@ -153,9 +158,9 @@ BackendInterface::fetchSuccessorKey(
{
auto succ = cache_.get().getSuccessor(key, ledgerSequence);
if (succ) {
LOG(log_.trace()) << "Cache hit - " << ripple::strHex(key);
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
} else {
LOG(log_.trace()) << "Cache miss - " << ripple::strHex(key);
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
}
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
}
@@ -205,7 +210,7 @@ BackendInterface::fetchBookOffers(
numSucc++;
succMillis += getMillis(mid2 - mid1);
if (!offerDir || offerDir->key >= bookEnd) {
LOG(log_.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
break;
}
uTipIndex = offerDir->key;
@@ -218,7 +223,7 @@ BackendInterface::fetchBookOffers(
keys.insert(keys.end(), indexes.begin(), indexes.end());
auto next = sle.getFieldU64(ripple::sfIndexNext);
if (next == 0u) {
LOG(log_.trace()) << "Next is empty. breaking";
LOG(gLog.trace()) << "Next is empty. breaking";
break;
}
auto nextKey = ripple::keylet::page(uTipIndex, next);
@@ -233,13 +238,13 @@ BackendInterface::fetchBookOffers(
auto mid = std::chrono::system_clock::now();
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < keys.size() && i < limit; ++i) {
LOG(log_.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
<< " ledgerSequence = " << ledgerSequence;
ASSERT(!objs[i].empty(), "Ledger object can't be empty");
page.offers.push_back({keys[i], objs[i]});
}
auto end = std::chrono::system_clock::now();
LOG(log_.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
LOG(gLog.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
<< std::to_string(succMillis) << " milliseconds. Fetched next dir " << std::to_string(numSucc)
<< " times"
@@ -270,17 +275,14 @@ BackendInterface::updateRange(uint32_t newMax)
{
std::scoped_lock const lck(rngMtx_);
if (range_.has_value() && newMax < range_->maxSequence) {
ASSERT(
false,
"Range shouldn't exist yet or newMax should be at least range->maxSequence. newMax = {}, "
"range->maxSequence = {}",
!range_ || newMax >= range_->maxSequence,
"Range shouldn't exist yet or newMax should be greater. newMax = {}, range->maxSequence = {}",
newMax,
range_->maxSequence
);
}
if (!range_.has_value()) {
if (!range_) {
range_ = {.minSequence = newMax, .maxSequence = newMax};
} else {
range_->maxSequence = newMax;
@@ -336,13 +338,13 @@ BackendInterface::fetchLedgerPage(
if (!objects[i].empty()) {
page.objects.push_back({keys[i], std::move(objects[i])});
} else if (!outOfOrder) {
LOG(log_.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
<< " - seq = " << ledgerSequence;
std::stringstream msg;
for (size_t j = 0; j < objects.size(); ++j) {
msg << " - " << ripple::strHex(keys[j]);
}
LOG(log_.error()) << msg.str();
LOG(gLog.error()) << msg.str();
if (corruptionDetector_.has_value())
corruptionDetector_->onCorruptionDetected();
@@ -363,7 +365,7 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context
auto bytes = fetchLedgerObject(key, seq, yield);
if (!bytes) {
LOG(log_.error()) << "Could not find fees";
LOG(gLog.error()) << "Could not find fees";
return {};
}

View File

@@ -23,7 +23,6 @@
#include "data/LedgerCacheInterface.hpp"
#include "data/Types.hpp"
#include "etl/CorruptionDetector.hpp"
#include "util/Spawn.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/executor_work_guard.hpp>
@@ -109,12 +108,14 @@ synchronous(FnType&& func)
using R = typename boost::result_of<FnType(boost::asio::yield_context)>::type;
if constexpr (!std::is_same_v<R, void>) {
R res;
util::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) { res = func(yield); });
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) {
res = func(yield);
});
ctx.run();
return res;
} else {
util::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
ctx.run();
}
}
@@ -138,7 +139,6 @@ synchronousAndRetryOnTimeout(FnType&& func)
*/
class BackendInterface {
protected:
util::Logger log_{"Backend"};
mutable std::shared_mutex rngMtx_;
std::optional<LedgerRange> range_;
std::reference_wrapper<LedgerCacheInterface> cache_;
@@ -295,7 +295,7 @@ public:
* @param account The account to fetch transactions for
* @param limit The maximum number of transactions per result page
* @param forward Whether to fetch the page forwards or backwards from the given cursor
* @param txnCursor The cursor to resume fetching from
* @param cursor The cursor to resume fetching from
* @param yield The coroutine context
* @return Results and a cursor to resume from
*/
@@ -304,7 +304,7 @@ public:
ripple::AccountID const& account,
std::uint32_t limit,
bool forward,
std::optional<TransactionsCursor> const& txnCursor,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context yield
) const = 0;

View File

@@ -19,15 +19,20 @@
#pragma once
#include "data/BackendInterface.hpp"
#include "data/DBHelpers.hpp"
#include "data/LedgerCacheInterface.hpp"
#include "data/LedgerHeaderCache.hpp"
#include "data/Types.hpp"
#include "data/cassandra/CassandraBackendFamily.hpp"
#include "data/cassandra/CassandraSchema.hpp"
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Schema.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/ExecutionStrategy.hpp"
#include "util/Assert.hpp"
#include "util/LedgerUtils.hpp"
#include "util/Profiler.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/spawn.hpp>
@@ -45,18 +50,27 @@
#include <xrpl/protocol/nft.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <limits>
#include <optional>
#include <stdexcept>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
class CacheBackendCassandraTest;
namespace data::cassandra {
/**
* @brief Implements @ref CassandraBackendFamily for Cassandra/ScyllaDB.
* @brief Implements @ref BackendInterface for Cassandra/ScyllaDB.
*
* Note: This is a safer and more correct rewrite of the original implementation of the backend.
*
* @tparam SettingsProviderType The settings provider type to use
* @tparam ExecutionStrategyType The execution strategy type to use
@@ -66,46 +80,156 @@ template <
SomeSettingsProvider SettingsProviderType,
SomeExecutionStrategy ExecutionStrategyType,
typename FetchLedgerCacheType = FetchLedgerCache>
class BasicCassandraBackend : public CassandraBackendFamily<
SettingsProviderType,
ExecutionStrategyType,
CassandraSchema<SettingsProviderType>,
FetchLedgerCacheType> {
using DefaultCassandraFamily = CassandraBackendFamily<
SettingsProviderType,
ExecutionStrategyType,
CassandraSchema<SettingsProviderType>,
FetchLedgerCacheType>;
class BasicCassandraBackend : public BackendInterface {
util::Logger log_{"Backend"};
SettingsProviderType settingsProvider_;
Schema<SettingsProviderType> schema_;
std::atomic_uint32_t ledgerSequence_ = 0u;
friend class ::CacheBackendCassandraTest;
// protected because CassandraMigrationBackend inherits from this class
protected:
using DefaultCassandraFamily::executor_;
using DefaultCassandraFamily::ledgerSequence_;
using DefaultCassandraFamily::log_;
using DefaultCassandraFamily::range_;
using DefaultCassandraFamily::schema_;
Handle handle_;
// have to be mutable because BackendInterface constness :(
mutable ExecutionStrategyType executor_;
// TODO: move to interface level
mutable FetchLedgerCacheType ledgerCache_{};
public:
/**
* @brief Inherit the constructors of the base class.
* @brief Create a new cassandra/scylla backend instance.
*
* @param settingsProvider The settings provider to use
* @param cache The ledger cache to use
* @param readOnly Whether the database should be in readonly mode
*/
using DefaultCassandraFamily::DefaultCassandraFamily;
BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
: BackendInterface(cache)
, settingsProvider_{std::move(settingsProvider)}
, schema_{settingsProvider_}
, handle_{settingsProvider_.getSettings()}
, executor_{settingsProvider_.getSettings(), handle_}
{
if (auto const res = handle_.connect(); not res)
throw std::runtime_error("Could not connect to database: " + res.error());
if (not readOnly) {
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
// on datastax, creation of keyspaces can be configured to only be done thru the admin
// interface. this does not mean that the keyspace does not already exist tho.
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
throw std::runtime_error("Could not create keyspace: " + res.error());
}
if (auto const res = handle_.executeEach(schema_.createSchema); not res)
throw std::runtime_error("Could not create schema: " + res.error());
}
try {
schema_.prepareStatements(handle_);
} catch (std::runtime_error const& ex) {
auto const error = fmt::format(
"Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
"node with write access to DB should be started first.",
ex.what(),
readOnly
);
LOG(log_.error()) << error;
throw std::runtime_error(error);
}
LOG(log_.info()) << "Created (revamped) CassandraBackend";
}
/*
* @brief Move constructor is deleted because handle_ is shared by reference with executor
*/
BasicCassandraBackend(BasicCassandraBackend&&) = delete;
TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context yield
) const override
{
auto rng = fetchLedgerRange();
if (!rng)
return {.txns = {}, .cursor = {}};
Statement const statement = [this, forward, &account]() {
if (forward)
return schema_->selectAccountTxForward.bind(account);
return schema_->selectAccountTx.bind(account);
}();
auto cursor = cursorIn;
if (cursor) {
statement.bindAt(1, cursor->asTuple());
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
<< cursor->transactionIndex;
} else {
auto const seq = forward ? rng->minSequence : rng->maxSequence;
auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " idx = " << seq
<< " tuple = " << placeHolder;
}
// FIXME: Limit is a hack to support uint32_t properly for the time
// being. Should be removed later and schema updated to use proper
// types.
statement.bindAt(2, Limit{limit});
auto const res = executor_.read(yield, statement);
auto const& results = res.value();
if (not results.hasRows()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
std::vector<ripple::uint256> hashes = {};
auto numRows = results.numRows();
LOG(log_.info()) << "num_rows = " << numRows;
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
hashes.push_back(hash);
if (--numRows == 0) {
LOG(log_.debug()) << "Setting cursor";
cursor = data;
}
}
auto const txns = fetchTransactions(hashes, yield);
LOG(log_.debug()) << "Txns = " << txns.size();
if (txns.size() == limit) {
LOG(log_.debug()) << "Returning cursor";
return {txns, cursor};
}
return {txns, {}};
}
void
waitForWritesToFinish() override
{
executor_.sync();
}
bool
doFinishWrites() override
{
this->waitForWritesToFinish();
waitForWritesToFinish();
if (!range_) {
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
}
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
return false;
}
@@ -114,6 +238,271 @@ public:
return true;
}
void
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
{
executor_.write(schema_->insertLedgerHeader, ledgerHeader.seq, std::move(blob));
executor_.write(schema_->insertLedgerHash, ledgerHeader.hash, ledgerHeader.seq);
ledgerSequence_ = ledgerHeader.seq;
}
std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
if (auto const& result = res.value(); result) {
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
return maybeValue;
LOG(log_.error()) << "Could not fetch latest ledger - no rows";
return std::nullopt;
}
LOG(log_.error()) << "Could not fetch latest ledger - no result";
} else {
LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
}
return std::nullopt;
}
std::optional<ripple::LedgerHeader>
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
{
if (auto const lock = ledgerCache_.get(); lock.has_value() && lock->seq == sequence)
return lock->ledger;
auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
if (res) {
if (auto const& result = res.value(); result) {
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
auto const header = util::deserializeHeader(ripple::makeSlice(*maybeValue));
ledgerCache_.put(FetchLedgerCache::CacheEntry{header, sequence});
return header;
}
LOG(log_.error()) << "Could not fetch ledger by sequence - no rows";
return std::nullopt;
}
LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
} else {
LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
}
return std::nullopt;
}
std::optional<ripple::LedgerHeader>
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
if (auto const& result = res.value(); result) {
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
return fetchLedgerBySequence(*maybeValue, yield);
LOG(log_.error()) << "Could not fetch ledger by hash - no rows";
return std::nullopt;
}
LOG(log_.error()) << "Could not fetch ledger by hash - no result";
} else {
LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
}
return std::nullopt;
}
std::optional<LedgerRange>
hardFetchLedgerRange(boost::asio::yield_context yield) const override
{
auto const res = executor_.read(yield, schema_->selectLedgerRange);
if (res) {
auto const& results = res.value();
if (not results.hasRows()) {
LOG(log_.debug()) << "Could not fetch ledger range - no rows";
return std::nullopt;
}
// TODO: this is probably a good place to use user type in
// cassandra instead of having two rows with bool flag. or maybe at
// least use tuple<int, int>?
LedgerRange range;
std::size_t idx = 0;
for (auto [seq] : extract<uint32_t>(results)) {
if (idx == 0) {
range.maxSequence = range.minSequence = seq;
} else if (idx == 1) {
range.maxSequence = seq;
}
++idx;
}
if (range.minSequence > range.maxSequence)
std::swap(range.minSequence, range.maxSequence);
LOG(log_.debug()) << "After hardFetchLedgerRange range is " << range.minSequence << ":"
<< range.maxSequence;
return range;
}
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
return std::nullopt;
}
std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
{
auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
return fetchTransactions(hashes, yield);
}
std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto start = std::chrono::system_clock::now();
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
if (not res) {
LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
return {};
}
auto const& result = res.value();
if (not result.hasRows()) {
LOG(log_.warn()) << "Could not fetch all transaction hashes - no rows; ledger = "
<< std::to_string(ledgerSequence);
return {};
}
std::vector<ripple::uint256> hashes;
for (auto [hash] : extract<ripple::uint256>(result))
hashes.push_back(std::move(hash));
auto end = std::chrono::system_clock::now();
LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from database in "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< " milliseconds";
return hashes;
}
std::optional<NFT>
fetchNFT(
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
if (not res)
return std::nullopt;
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
auto [seq, owner, isBurned] = *maybeRow;
auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
// now fetch URI. Usually we will have the URI even for burned NFTs,
// but if the first ledger on this clio included NFTokenBurn
// transactions we will not have the URIs for any of those tokens.
// In any other case not having the URI indicates something went
// wrong with our data.
//
// TODO - in the future would be great for any handlers that use
// this could inject a warning in this case (the case of not having
// a URI because it was burned in the first ledger) to indicate that
// even though we are returning a blank URI, the NFT might have had
// one.
auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
if (uriRes) {
if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
result->uri = *maybeUri;
}
return result;
}
LOG(log_.error()) << "Could not fetch NFT - no rows";
return std::nullopt;
}
TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context yield
) const override
{
auto rng = fetchLedgerRange();
if (!rng)
return {.txns = {}, .cursor = {}};
Statement const statement = [this, forward, &tokenID]() {
if (forward)
return schema_->selectNFTTxForward.bind(tokenID);
return schema_->selectNFTTx.bind(tokenID);
}();
auto cursor = cursorIn;
if (cursor) {
statement.bindAt(1, cursor->asTuple());
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
<< cursor->transactionIndex;
} else {
auto const seq = forward ? rng->minSequence : rng->maxSequence;
auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq
<< " tuple = " << placeHolder;
}
statement.bindAt(2, Limit{limit});
auto const res = executor_.read(yield, statement);
auto const& results = res.value();
if (not results.hasRows()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
std::vector<ripple::uint256> hashes = {};
auto numRows = results.numRows();
LOG(log_.info()) << "num_rows = " << numRows;
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
hashes.push_back(hash);
if (--numRows == 0) {
LOG(log_.debug()) << "Setting cursor";
cursor = data;
// forward queries by ledger/tx sequence `>=`
// so we have to advance the index by one
if (forward)
++cursor->transactionIndex;
}
}
auto const txns = fetchTransactions(hashes, yield);
LOG(log_.debug()) << "NFT Txns = " << txns.size();
if (txns.size() == limit) {
LOG(log_.debug()) << "Returning cursor";
return {txns, cursor};
}
return {txns, {}};
}
NFTsAndCursor
fetchNFTsByIssuer(
ripple::AccountID const& issuer,
@@ -200,6 +589,208 @@ public:
return ret;
}
MPTHoldersAndCursor
fetchMPTHolders(
ripple::uint192 const& mptID,
std::uint32_t const limit,
std::optional<ripple::AccountID> const& cursorIn,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
auto const holderEntries = executor_.read(
yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
);
auto const& holderResults = holderEntries.value();
if (not holderResults.hasRows()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
std::vector<ripple::uint256> mptKeys;
std::optional<ripple::AccountID> cursor;
for (auto const [holder] : extract<ripple::AccountID>(holderResults)) {
mptKeys.push_back(ripple::keylet::mptoken(mptID, holder).key);
cursor = holder;
}
auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
mptObjects.erase(it, mptObjects.end());
ASSERT(mptKeys.size() <= limit, "Number of keys can't exceed the limit");
if (mptKeys.size() == limit)
return {mptObjects, cursor};
return {mptObjects, {}};
}
std::optional<Blob>
doFetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
if (auto const result = res->template get<Blob>(); result) {
if (result->size())
return result;
} else {
LOG(log_.debug()) << "Could not fetch ledger object - no rows";
}
} else {
LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
}
return std::nullopt;
}
std::optional<std::uint32_t>
doFetchLedgerObjectSeq(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
if (auto const result = res->template get<Blob, std::uint32_t>(); result) {
auto [_, seq] = result.value();
return seq;
}
LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
} else {
LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
}
return std::nullopt;
}
std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
{
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
auto [transaction, meta, seq, date] = *maybeValue;
return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
}
LOG(log_.debug()) << "Could not fetch transaction - no rows";
} else {
LOG(log_.error()) << "Could not fetch transaction: " << res.error();
}
return std::nullopt;
}
std::optional<ripple::uint256>
doFetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const override
{
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
if (auto const result = res->template get<ripple::uint256>(); result) {
if (*result == kLAST_KEY)
return std::nullopt;
return result;
}
LOG(log_.debug()) << "Could not fetch successor - no rows";
} else {
LOG(log_.error()) << "Could not fetch successor: " << res.error();
}
return std::nullopt;
}
std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
{
if (hashes.empty())
return {};
auto const numHashes = hashes.size();
std::vector<TransactionAndMetadata> results;
results.reserve(numHashes);
std::vector<Statement> statements;
statements.reserve(numHashes);
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
// TODO: seems like a job for "hash IN (list of hashes)" instead?
std::transform(
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
return schema_->selectTransaction.bind(hash);
}
);
auto const entries = executor_.readEach(yield, statements);
std::transform(
std::cbegin(entries),
std::cend(entries),
std::back_inserter(results),
[](auto const& res) -> TransactionAndMetadata {
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
return *maybeRow;
return {};
}
);
});
ASSERT(numHashes == results.size(), "Number of hashes and results must match");
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in " << timeDiff
<< " milliseconds";
return results;
}
std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const override
{
if (keys.empty())
return {};
auto const numKeys = keys.size();
LOG(log_.trace()) << "Fetching " << numKeys << " objects";
std::vector<Blob> results;
results.reserve(numKeys);
std::vector<Statement> statements;
statements.reserve(numKeys);
// TODO: seems like a job for "key IN (list of keys)" instead?
std::transform(
std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
return schema_->selectObject.bind(key, sequence);
}
);
auto const entries = executor_.readEach(yield, statements);
std::transform(
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
return *maybeValue;
return {};
}
);
LOG(log_.trace()) << "Fetched " << numKeys << " objects";
return results;
}
std::vector<ripple::uint256>
fetchAccountRoots(
std::uint32_t number,
@@ -228,7 +819,7 @@ public:
fullAccounts.push_back(ripple::keylet::account(account).key);
lastItem = account;
}
auto const objs = this->doFetchLedgerObjects(fullAccounts, seq, yield);
auto const objs = doFetchLedgerObjects(fullAccounts, seq, yield);
for (auto i = 0u; i < fullAccounts.size(); i++) {
if (not objs[i].empty()) {
@@ -247,6 +838,284 @@ public:
return liveAccounts;
}
std::vector<LedgerObject>
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
{
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
if (not res) {
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
return {};
}
auto const& results = res.value();
if (not results) {
LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
return {};
}
std::vector<ripple::uint256> resultKeys;
for (auto [key] : extract<ripple::uint256>(results))
resultKeys.push_back(key);
return resultKeys;
});
// one of the above errors must have happened
if (keys.empty())
return {};
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in " << timeDiff
<< " milliseconds";
auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
std::vector<LedgerObject> results;
results.reserve(keys.size());
std::transform(
std::cbegin(keys),
std::cend(keys),
std::cbegin(objs),
std::back_inserter(results),
[](auto const& key, auto const& obj) { return LedgerObject{key, obj}; }
);
return results;
}
std::optional<std::string>
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
{
auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
if (not res) {
LOG(log_.error()) << "Could not fetch migrator status: " << res.error();
return {};
}
auto const& results = res.value();
if (not results) {
return {};
}
for (auto [statusString] : extract<std::string>(results))
return statusString;
return {};
}
std::expected<std::vector<std::pair<boost::uuids::uuid, std::string>>, std::string>
fetchClioNodesData(boost::asio::yield_context yield) const override
{
auto const readResult = executor_.read(yield, schema_->selectClioNodesData);
if (not readResult)
return std::unexpected{readResult.error().message()};
std::vector<std::pair<boost::uuids::uuid, std::string>> result;
for (auto [uuid, message] : extract<boost::uuids::uuid, std::string>(*readResult)) {
result.emplace_back(uuid, std::move(message));
}
return result;
}
void
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
{
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
if (range_)
executor_.write(schema_->insertDiff, seq, key);
executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
}
void
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
{
LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
ASSERT(!key.empty(), "Key must not be empty");
ASSERT(!successor.empty(), "Successor must not be empty");
executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
}
void
writeAccountTransactions(std::vector<AccountTransactionsData> data) override
{
std::vector<Statement> statements;
statements.reserve(data.size() * 10); // assume 10 transactions avg
for (auto& record : data) {
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
});
}
executor_.write(std::move(statements));
}
void
writeAccountTransaction(AccountTransactionsData record) override
{
std::vector<Statement> statements;
statements.reserve(record.accounts.size());
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
return schema_->insertAccountTx.bind(
std::forward<decltype(account)>(account),
std::make_tuple(record.ledgerSequence, record.transactionIndex),
record.txHash
);
});
executor_.write(std::move(statements));
}
void
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
{
std::vector<Statement> statements;
statements.reserve(data.size());
std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
return schema_->insertNFTTx.bind(
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
);
});
executor_.write(std::move(statements));
}
void
writeTransaction(
std::string&& hash,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata
) override
{
LOG(log_.trace()) << "Writing txn to database";
executor_.write(schema_->insertLedgerTransaction, seq, hash);
executor_.write(
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
);
}
void
writeNFTs(std::vector<NFTsData> const& data) override
{
std::vector<Statement> statements;
statements.reserve(data.size() * 3);
for (NFTsData const& record : data) {
if (!record.onlyUriChanged) {
statements.push_back(
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
);
// If `uri` is set (and it can be set to an empty uri), we know this
// is a net-new NFT. That is, this NFT has not been seen before by
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
// the same NFT ID as an already-burned token. In this case, we need
// to record the URI and link to the issuer_nf_tokens table.
if (record.uri) {
statements.push_back(schema_->insertIssuerNFT.bind(
ripple::nft::getIssuer(record.tokenID),
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
record.tokenID
));
statements.push_back(
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
);
}
} else {
// only uri changed, we update the uri table only
statements.push_back(
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
);
}
}
executor_.writeEach(std::move(statements));
}
void
writeMPTHolders(std::vector<MPTHolderData> const& data) override
{
std::vector<Statement> statements;
statements.reserve(data.size());
for (auto [mptId, holder] : data)
statements.push_back(schema_->insertMPTHolder.bind(mptId, holder));
executor_.write(std::move(statements));
}
void
startWrites() const override
{
// Note: no-op in original implementation too.
// probably was used in PG to start a transaction or smth.
}
void
writeMigratorStatus(std::string const& migratorName, std::string const& status) override
{
executor_.writeSync(
schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
);
}
void
writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
{
executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
}
bool
isTooBusy() const override
{
return executor_.isTooBusy();
}
boost::json::object
stats() const override
{
return executor_.stats();
}
private:
bool
executeSyncUpdate(Statement statement)
{
auto const res = executor_.writeSync(statement);
auto maybeSuccess = res->template get<bool>();
if (not maybeSuccess) {
LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
return false;
}
if (not maybeSuccess.value()) {
LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
// error may indicate that another writer wrote something.
// in this case let's just compare the current state of things
// against what we were trying to write in the first place and
// use that as the source of truth for the result.
auto rng = hardFetchLedgerRangeNoThrow();
return rng && rng->maxSequence == ledgerSequence_;
}
return true;
}
};
using CassandraBackend = BasicCassandraBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;

View File

@@ -1,308 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/LedgerHeaderCache.hpp"
#include "data/Types.hpp"
#include "data/cassandra/CassandraBackendFamily.hpp"
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/KeyspaceSchema.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/ExecutionStrategy.hpp"
#include "util/Assert.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/spawn.hpp>
#include <boost/json/object.hpp>
#include <boost/uuid/string_generator.hpp>
#include <boost/uuid/uuid.hpp>
#include <cassandra.h>
#include <fmt/format.h>
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/strHex.h>
#include <xrpl/protocol/AccountID.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/LedgerHeader.h>
#include <xrpl/protocol/nft.h>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <stdexcept>
#include <utility>
#include <vector>
namespace data::cassandra {
/**
* @brief Implements @ref CassandraBackendFamily for Keyspace
*
* @tparam SettingsProviderType The settings provider type to use
* @tparam ExecutionStrategyType The execution strategy type to use
* @tparam FetchLedgerCacheType The ledger header cache type to use
*/
template <
SomeSettingsProvider SettingsProviderType,
SomeExecutionStrategy ExecutionStrategyType,
typename FetchLedgerCacheType = FetchLedgerCache>
class BasicKeyspaceBackend : public CassandraBackendFamily<
SettingsProviderType,
ExecutionStrategyType,
KeyspaceSchema<SettingsProviderType>,
FetchLedgerCacheType> {
using DefaultCassandraFamily = CassandraBackendFamily<
SettingsProviderType,
ExecutionStrategyType,
KeyspaceSchema<SettingsProviderType>,
FetchLedgerCacheType>;
using DefaultCassandraFamily::executor_;
using DefaultCassandraFamily::ledgerSequence_;
using DefaultCassandraFamily::log_;
using DefaultCassandraFamily::range_;
using DefaultCassandraFamily::schema_;
public:
/**
* @brief Inherit the constructors of the base class.
*/
using DefaultCassandraFamily::DefaultCassandraFamily;
/**
* @brief Move constructor is deleted because handle_ is shared by reference with executor
*/
BasicKeyspaceBackend(BasicKeyspaceBackend&&) = delete;
bool
doFinishWrites() override
{
this->waitForWritesToFinish();
// !range_.has_value() means the table 'ledger_range' is not populated;
// This would be the first write to the table.
// In this case, insert both min_sequence/max_sequence range into the table.
if (not(range_.has_value())) {
executor_.writeSync(schema_->insertLedgerRange, false, ledgerSequence_);
executor_.writeSync(schema_->insertLedgerRange, true, ledgerSequence_);
}
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
log_.warn() << "Update failed for ledger " << ledgerSequence_;
return false;
}
log_.info() << "Committed ledger " << ledgerSequence_;
return true;
}
NFTsAndCursor
fetchNFTsByIssuer(
ripple::AccountID const& issuer,
std::optional<std::uint32_t> const& taxon,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const override
{
std::vector<ripple::uint256> nftIDs;
if (taxon.has_value()) {
// Keyspace and ScyllaDB uses the same logic for taxon-filtered queries
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
} else {
// --- Amazon Keyspaces Workflow for non-taxon queries ---
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
firstQuery.bindAt(1, startTaxon);
firstQuery.bindAt(2, startTokenID);
firstQuery.bindAt(3, Limit{limit});
auto const firstRes = executor_.read(yield, firstQuery);
if (firstRes) {
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
nftIDs.push_back(nftID);
}
if (nftIDs.size() < limit) {
auto const remainingLimit = limit - nftIDs.size();
Statement secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
secondQuery.bindAt(1, startTaxon);
secondQuery.bindAt(2, Limit{remainingLimit});
auto const secondRes = executor_.read(yield, secondQuery);
if (secondRes) {
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
nftIDs.push_back(nftID);
}
}
}
return populateNFTsAndCreateCursor(nftIDs, ledgerSequence, limit, yield);
}
/**
* @brief (Unsupported in Keyspaces) Fetches account root object indexes by page.
* * @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
* This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces does not support, and there is
* no efficient alternative. This is acceptable as the cache is primarily loaded via diffs. Calling this
* function will throw an exception.
*
* @param number The total number of accounts to fetch.
* @param pageSize The maximum number of accounts per page.
* @param seq The accounts need to exist at this ledger sequence.
* @param yield The coroutine context.
* @return A vector of ripple::uint256 representing the account root hashes.
*/
std::vector<ripple::uint256>
fetchAccountRoots(
[[maybe_unused]] std::uint32_t number,
[[maybe_unused]] std::uint32_t pageSize,
[[maybe_unused]] std::uint32_t seq,
[[maybe_unused]] boost::asio::yield_context yield
) const override
{
ASSERT(false, "Fetching account roots is not supported by the Keyspaces backend.");
std::unreachable();
}
private:
std::vector<ripple::uint256>
fetchNFTIDsByTaxon(
ripple::AccountID const& issuer,
std::uint32_t const taxon,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const
{
std::vector<ripple::uint256> nftIDs;
Statement statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
statement.bindAt(1, taxon);
statement.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
statement.bindAt(3, Limit{limit});
auto const res = executor_.read(yield, statement);
if (res && res.value().hasRows()) {
for (auto const [nftID] : extract<ripple::uint256>(res.value()))
nftIDs.push_back(nftID);
}
return nftIDs;
}
std::vector<ripple::uint256>
fetchNFTIDsWithoutTaxon(
ripple::AccountID const& issuer,
std::uint32_t const limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const
{
std::vector<ripple::uint256> nftIDs;
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
firstQuery.bindAt(1, startTaxon);
firstQuery.bindAt(2, startTokenID);
firstQuery.bindAt(3, Limit{limit});
auto const firstRes = executor_.read(yield, firstQuery);
if (firstRes) {
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
nftIDs.push_back(nftID);
}
if (nftIDs.size() < limit) {
auto const remainingLimit = limit - nftIDs.size();
Statement secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
secondQuery.bindAt(1, startTaxon);
secondQuery.bindAt(2, Limit{remainingLimit});
auto const secondRes = executor_.read(yield, secondQuery);
if (secondRes) {
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
nftIDs.push_back(nftID);
}
}
return nftIDs;
}
/**
* @brief Takes a list of NFT IDs, fetches their full data, and assembles the final result with a cursor.
*/
NFTsAndCursor
populateNFTsAndCreateCursor(
std::vector<ripple::uint256> const& nftIDs,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
boost::asio::yield_context yield
) const
{
if (nftIDs.empty()) {
LOG(log_.debug()) << "No rows returned";
return {};
}
NFTsAndCursor ret;
if (nftIDs.size() == limit)
ret.cursor = nftIDs.back();
// Prepare and execute queries to fetch NFT info and URIs in parallel.
std::vector<Statement> selectNFTStatements;
selectNFTStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
return schema_->selectNFT.bind(nftID, ledgerSequence);
}
);
std::vector<Statement> selectNFTURIStatements;
selectNFTURIStatements.reserve(nftIDs.size());
std::transform(
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
}
);
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
// Combine the results into final NFT objects.
for (auto i = 0u; i < nftIDs.size(); ++i) {
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
auto [seq, owner, isBurned] = *maybeRow;
NFT nft(nftIDs[i], seq, owner, isBurned);
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
nft.uri = *maybeUri;
ret.nfts.push_back(nft);
}
}
return ret;
}
};
using KeyspaceBackend = BasicKeyspaceBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;
} // namespace data::cassandra

File diff suppressed because it is too large Load Diff

View File

@@ -1,178 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Schema.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include "util/log/Logger.hpp"
#include <boost/json/string.hpp>
#include <fmt/compile.h>
#include <functional>
#include <memory>
namespace data::cassandra {
/**
* @brief Manages the DB schema and provides access to prepared statements.
*/
template <SomeSettingsProvider SettingsProviderType>
class CassandraSchema : public Schema<SettingsProvider> {
using Schema::Schema;
public:
/**
* @brief Construct a new Cassandra Schema object
*
* @param settingsProvider The settings provider
*/
struct CassandraStatements : public Schema<SettingsProvider>::Statements {
using Schema<SettingsProvider>::Statements::Statements;
//
// Update (and "delete") queries
//
PreparedStatement updateLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
UPDATE {}
SET sequence = ?
WHERE is_latest = ?
IF sequence IN (?, null)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
//
// Select queries
//
PreparedStatement selectNFTIDsByIssuer = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND (taxon, token_id) > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
PreparedStatement selectAccountFromBeginning = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > 0
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectAccountFromToken = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > token(?)
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectLedgerPageKeys = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT key
FROM {}
WHERE TOKEN(key) >= ?
AND sequence <= ?
PER PARTITION LIMIT 1
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")
)
);
}();
PreparedStatement selectLedgerPage = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT object, key
FROM {}
WHERE TOKEN(key) >= ?
AND sequence <= ?
PER PARTITION LIMIT 1
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")
)
);
}();
};
void
prepareStatements(Handle const& handle) override
{
LOG(log_.info()) << "Preparing cassandra statements";
statements_ = std::make_unique<CassandraStatements>(settingsProvider_, handle);
LOG(log_.info()) << "Finished preparing statements";
}
/**
* @brief Provides access to statements.
*
* @return The statements
*/
std::unique_ptr<CassandraStatements> const&
operator->() const
{
return statements_;
}
private:
std::unique_ptr<CassandraStatements> statements_{nullptr};
};
} // namespace data::cassandra

View File

@@ -1,140 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Schema.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "data/cassandra/Types.hpp"
#include "util/log/Logger.hpp"
#include <boost/json/string.hpp>
#include <fmt/compile.h>
#include <functional>
#include <memory>
namespace data::cassandra {
/**
* @brief Manages the DB schema and provides access to prepared statements.
*/
template <SomeSettingsProvider SettingsProviderType>
class KeyspaceSchema : public Schema<SettingsProvider> {
public:
using Schema::Schema;
/**
* @brief Construct a new Keyspace Schema object
*
* @param settingsProvider The settings provider
*/
struct KeyspaceStatements : public Schema<SettingsProvider>::Statements {
using Schema<SettingsProvider>::Statements::Statements;
//
// Insert queries
//
PreparedStatement insertLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
INSERT INTO {} (is_latest, sequence) VALUES (?, ?) IF NOT EXISTS
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
//
// Update (and "delete") queries
//
PreparedStatement updateLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
UPDATE {}
SET sequence = ?
WHERE is_latest = ?
IF sequence = ?
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
PreparedStatement selectLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT sequence
FROM {}
WHERE is_latest in (True, False)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
//
// Select queries
//
PreparedStatement selectNFTsAfterTaxonKeyspaces = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND taxon > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
};
void
prepareStatements(Handle const& handle) override
{
LOG(log_.info()) << "Preparing aws keyspace statements";
statements_ = std::make_unique<KeyspaceStatements>(settingsProvider_, handle);
LOG(log_.info()) << "Finished preparing statements";
}
/**
* @brief Provides access to statements.
*
* @return The statements
*/
std::unique_ptr<KeyspaceStatements> const&
operator->() const
{
return statements_;
}
private:
std::unique_ptr<KeyspaceStatements> statements_{nullptr};
};
} // namespace data::cassandra

View File

@@ -24,10 +24,10 @@
#include "data/cassandra/Types.hpp"
#include "util/log/Logger.hpp"
#include <boost/json/string.hpp>
#include <fmt/compile.h>
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
@@ -53,15 +53,12 @@ template <SomeSettingsProvider SettingsProviderType>
*/
template <SomeSettingsProvider SettingsProviderType>
class Schema {
protected:
util::Logger log_{"Backend"};
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
public:
virtual ~Schema() = default;
/**
* @brief Shared Schema's between all Schema classes (Cassandra and Keyspace)
* @brief Construct a new Schema object
*
* @param settingsProvider The settings provider
*/
@@ -337,7 +334,6 @@ public:
* @brief Prepared statements holder.
*/
class Statements {
protected:
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
std::reference_wrapper<Handle const> handle_;
@@ -530,6 +526,20 @@ public:
// Update (and "delete") queries
//
PreparedStatement updateLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
UPDATE {}
SET sequence = ?
WHERE is_latest = ?
IF sequence IN (?, null)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")
)
);
}();
PreparedStatement deleteLedgerRange = [this]() {
return handle_.get().prepare(
fmt::format(
@@ -644,6 +654,40 @@ public:
);
}();
PreparedStatement selectLedgerPageKeys = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT key
FROM {}
WHERE TOKEN(key) >= ?
AND sequence <= ?
PER PARTITION LIMIT 1
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")
)
);
}();
PreparedStatement selectLedgerPage = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT object, key
FROM {}
WHERE TOKEN(key) >= ?
AND sequence <= ?
PER PARTITION LIMIT 1
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")
)
);
}();
PreparedStatement getToken = [this]() {
return handle_.get().prepare(
fmt::format(
@@ -673,6 +717,36 @@ public:
);
}();
PreparedStatement selectAccountFromBeginning = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > 0
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectAccountFromToken = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT account
FROM {}
WHERE token(account) > token(?)
PER PARTITION LIMIT 1
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")
)
);
}();
PreparedStatement selectAccountTxForward = [this]() {
return handle_.get().prepare(
fmt::format(
@@ -753,6 +827,22 @@ public:
);
}();
PreparedStatement selectNFTIDsByIssuer = [this]() {
return handle_.get().prepare(
fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND (taxon, token_id) > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
)
);
}();
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
return handle_.get().prepare(
fmt::format(
@@ -870,8 +960,27 @@ public:
*
* @param handle The handle to the DB
*/
virtual void
prepareStatements(Handle const& handle) = 0;
void
prepareStatements(Handle const& handle)
{
LOG(log_.info()) << "Preparing cassandra statements";
statements_ = std::make_unique<Statements>(settingsProvider_, handle);
LOG(log_.info()) << "Finished preparing statements";
}
/**
* @brief Provides access to statements.
*
* @return The statements
*/
std::unique_ptr<Statements> const&
operator->() const
{
return statements_;
}
private:
std::unique_ptr<Statements> statements_{nullptr};
};
} // namespace data::cassandra

View File

@@ -97,7 +97,6 @@ SettingsProvider::parseSettings() const
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
settings.provider = config_.get<std::string>("provider");
if (config_.getValueView("connect_timeout").hasValue()) {
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");

View File

@@ -25,8 +25,8 @@
#include <utility>
namespace data::cassandra {
namespace impl {
namespace impl {
struct Settings;
class Session;
class Cluster;
@@ -36,7 +36,6 @@ struct Result;
class Statement;
class PreparedStatement;
struct Batch;
} // namespace impl
using Settings = impl::Settings;

View File

@@ -36,18 +36,9 @@ constexpr auto kBATCH_DELETER = [](CassBatch* ptr) { cass_batch_free(ptr); };
namespace data::cassandra::impl {
/*
* There are 2 main batches of Cassandra Statements:
* LOGGED: Ensures all updates in the batch succeed together, or none do.
* Use this for critical, related changes (e.g., for the same user), but it is slower.
*
* UNLOGGED: For performance. Sends many separate updates in one network trip to be fast.
* Use this for bulk-loading unrelated data, but know there's NO all-or-nothing guarantee.
*
* More info here: https://docs.datastax.com/en/developer/cpp-driver-dse/1.10/features/basics/batches/index.html
*/
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
Batch::Batch(std::vector<Statement> const& statements)
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_UNLOGGED), kBATCH_DELETER}
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), kBATCH_DELETER}
{
cass_batch_set_is_idempotent(*this, cass_true);

View File

@@ -60,17 +60,6 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
// TODO: AWS keyspace reads should be local_one to save cost
if (settings.provider == toString(cassandra::impl::Provider::Keyspace)) {
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM); rc != CASS_OK) {
throw std::runtime_error(fmt::format("Error setting keyspace consistency: {}", cass_error_desc(rc)));
}
} else {
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_QUORUM); rc != CASS_OK) {
throw std::runtime_error(fmt::format("Error setting cassandra consistency: {}", cass_error_desc(rc)));
}
}
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
rc != CASS_OK) {
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));

Some files were not shown because too many files have changed in this diff Show More