mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 20:05:51 +00:00
Compare commits
95 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7558dfc7f5 | ||
|
|
00333a8d16 | ||
|
|
61c17400fe | ||
|
|
d43002b49a | ||
|
|
30880ad627 | ||
|
|
25e55ef952 | ||
|
|
579e6030ca | ||
|
|
d93b23206e | ||
|
|
1b63c3c315 | ||
|
|
a8e61204da | ||
|
|
bef24c1387 | ||
|
|
d83be17ded | ||
|
|
b6c1e2578b | ||
|
|
e0496aff5a | ||
|
|
2f7adfb883 | ||
|
|
0f1895947d | ||
|
|
fa693b2aff | ||
|
|
1825ea701f | ||
|
|
2ae5b13fb9 | ||
|
|
686a732fa8 | ||
|
|
4919b57466 | ||
|
|
44d39f335e | ||
|
|
bfe5b52a64 | ||
|
|
413b823976 | ||
|
|
e664f0b9ce | ||
|
|
907bd7a58f | ||
|
|
f94a9864f0 | ||
|
|
36ea0389e2 | ||
|
|
12640de22d | ||
|
|
ae4f2d9023 | ||
|
|
b7b61ef61d | ||
|
|
f391c3c899 | ||
|
|
562ea41a64 | ||
|
|
687b1e8887 | ||
|
|
cc506fd094 | ||
|
|
1fe323190a | ||
|
|
379a44641b | ||
|
|
18b8fc7e5c | ||
|
|
be2d915df7 | ||
|
|
57dda8ac50 | ||
|
|
5cdd8a642f | ||
|
|
8abc9c6645 | ||
|
|
24e1aa9ae5 | ||
|
|
9bee023105 | ||
|
|
4ee3ef94d9 | ||
|
|
f04d2a97ec | ||
|
|
8fcc2dfa19 | ||
|
|
123e09695e | ||
|
|
371237487b | ||
|
|
d97f19ba1d | ||
|
|
e92dbc8fce | ||
|
|
769fdab6b7 | ||
|
|
363344d36e | ||
|
|
4f7e8194f0 | ||
|
|
04c80c62f5 | ||
|
|
92fdebf590 | ||
|
|
b054a8424d | ||
|
|
162b1305e0 | ||
|
|
bdaa04d1ec | ||
|
|
6c69453bda | ||
|
|
7661ee6a3b | ||
|
|
6cabe89601 | ||
|
|
70f7635dda | ||
|
|
c8574ea42a | ||
|
|
e4fbf5131f | ||
|
|
87ee358297 | ||
|
|
27e29d0421 | ||
|
|
63ec563135 | ||
|
|
2c6f52a0ed | ||
|
|
97956b1718 | ||
|
|
ebfe4e6468 | ||
|
|
534518f13e | ||
|
|
4ed51c22d0 | ||
|
|
4364c07f1e | ||
|
|
f20efae75a | ||
|
|
67b27ee344 | ||
|
|
082f2fe21e | ||
|
|
7584a683dd | ||
|
|
f58c85d203 | ||
|
|
95698ee2de | ||
|
|
3d3db68508 | ||
|
|
7fcabd1ce7 | ||
|
|
59bb9a11ab | ||
|
|
ac5fcc7f4b | ||
|
|
3d0e722176 | ||
|
|
93add775b2 | ||
|
|
0273ba0da3 | ||
|
|
276477c494 | ||
|
|
d0b2a24a30 | ||
|
|
e44a058b13 | ||
|
|
743c9b92de | ||
|
|
35c90e64ec | ||
|
|
6e0d7a0fac | ||
|
|
d3c98ab2a8 | ||
|
|
2d172f470d |
16
.github/actions/build_docker_image/action.yml
vendored
16
.github/actions/build_docker_image/action.yml
vendored
@@ -17,6 +17,9 @@ inputs:
|
||||
platforms:
|
||||
description: Platforms to build the image for (e.g. linux/amd64,linux/arm64)
|
||||
required: true
|
||||
build_args:
|
||||
description: List of build-time variables
|
||||
required: false
|
||||
|
||||
dockerhub_repo:
|
||||
description: DockerHub repository name
|
||||
@@ -46,7 +49,7 @@ runs:
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
with:
|
||||
cache-image: false
|
||||
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
id: meta
|
||||
@@ -61,13 +64,4 @@ runs:
|
||||
platforms: ${{ inputs.platforms }}
|
||||
push: ${{ inputs.push_image == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
|
||||
- name: Update DockerHub description
|
||||
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
|
||||
uses: peter-evans/dockerhub-description@432a30c9e07499fd01da9f8a49f0faf9e0ca5b77 # v4.0.2
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USER }}
|
||||
password: ${{ env.DOCKERHUB_PW }}
|
||||
repository: ${{ inputs.dockerhub_repo }}
|
||||
short-description: ${{ inputs.dockerhub_description }}
|
||||
readme-filepath: ${{ inputs.directory }}/README.md
|
||||
build-args: ${{ inputs.build_args }}
|
||||
|
||||
23
.github/actions/generate/action.yml
vendored
23
.github/actions/generate/action.yml
vendored
@@ -5,8 +5,8 @@ inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
force_conan_source_build:
|
||||
description: Whether conan should build all dependencies from source
|
||||
required: true
|
||||
default: "false"
|
||||
build_type:
|
||||
@@ -25,15 +25,6 @@ inputs:
|
||||
description: Whether Clio is to be statically linked
|
||||
required: true
|
||||
default: "false"
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
default: "false"
|
||||
choices:
|
||||
- "false"
|
||||
- "tsan"
|
||||
- "asan"
|
||||
- "ubsan"
|
||||
time_trace:
|
||||
description: Whether to enable compiler trace reports
|
||||
required: true
|
||||
@@ -49,7 +40,7 @@ runs:
|
||||
- name: Run conan
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '*' }}"
|
||||
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
STATIC_OPTION: "${{ inputs.static == 'true' && 'True' || 'False' }}"
|
||||
INTEGRATION_TESTS_OPTION: "${{ inputs.build_integration_tests == 'true' && 'True' || 'False' }}"
|
||||
@@ -59,7 +50,7 @@ runs:
|
||||
conan \
|
||||
install .. \
|
||||
-of . \
|
||||
-b "$BUILD_OPTION" \
|
||||
-b "$CONAN_BUILD_OPTION" \
|
||||
-s "build_type=${{ inputs.build_type }}" \
|
||||
-o "&:static=${STATIC_OPTION}" \
|
||||
-o "&:tests=True" \
|
||||
@@ -74,9 +65,9 @@ runs:
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
SANITIZER_OPTION: |-
|
||||
${{ inputs.sanitizer == 'tsan' && '-Dsan=thread' ||
|
||||
inputs.sanitizer == 'ubsan' && '-Dsan=undefined' ||
|
||||
inputs.sanitizer == 'asan' && '-Dsan=address' ||
|
||||
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
|
||||
endsWith(inputs.conan_profile, '.tsan') && '-Dsan=thread' ||
|
||||
endsWith(inputs.conan_profile, '.ubsan') && '-Dsan=undefined' ||
|
||||
'' }}
|
||||
run: |
|
||||
cd build
|
||||
|
||||
6
.github/actions/prepare_runner/action.yml
vendored
6
.github/actions/prepare_runner/action.yml
vendored
@@ -13,7 +13,7 @@ runs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install \
|
||||
brew install --quiet \
|
||||
bison \
|
||||
ca-certificates \
|
||||
ccache \
|
||||
@@ -31,7 +31,7 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
# Uninstall any existing cmake
|
||||
brew uninstall cmake --ignore-dependencies || true
|
||||
brew uninstall --formula cmake --ignore-dependencies || true
|
||||
|
||||
# Download specific cmake formula
|
||||
FORMULA_URL="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e46db74e74a8c1650b38b1da222284ce1ec5ce/Formula/c/cmake.rb"
|
||||
@@ -43,7 +43,7 @@ runs:
|
||||
echo "$FORMULA_EXPECTED_SHA256 /tmp/homebrew-formula/cmake.rb" | shasum -a 256 -c
|
||||
|
||||
# Install cmake from the specific formula with force flag
|
||||
brew install --formula --force /tmp/homebrew-formula/cmake.rb
|
||||
brew install --formula --quiet --force /tmp/homebrew-formula/cmake.rb
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
|
||||
30
.github/actions/restore_cache/action.yml
vendored
30
.github/actions/restore_cache/action.yml
vendored
@@ -1,10 +1,7 @@
|
||||
name: Restore cache
|
||||
description: Find and restores conan and ccache cache
|
||||
description: Find and restores ccache cache
|
||||
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to Conan directory
|
||||
required: true
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
@@ -19,13 +16,8 @@ inputs:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
outputs:
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
value: ${{ steps.conan_hash.outputs.hash }}
|
||||
conan_cache_hit:
|
||||
description: True if conan cache has been downloaded
|
||||
value: ${{ steps.conan_cache.outputs.cache-hit }}
|
||||
ccache_cache_hit:
|
||||
description: True if ccache cache has been downloaded
|
||||
value: ${{ steps.ccache_cache.outputs.cache-hit }}
|
||||
@@ -37,24 +29,6 @@ runs:
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Calculate conan hash
|
||||
id: conan_hash
|
||||
shell: bash
|
||||
run: |
|
||||
conan graph info . --format json --out-file info.json -o '&:tests=True' --profile:all ${{ inputs.conan_profile }}
|
||||
packages_info="$(cat info.json | jq -r '.graph.nodes[]?.ref' | grep -v 'clio')"
|
||||
echo "$packages_info"
|
||||
hash="$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)"
|
||||
rm info.json
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore conan cache
|
||||
uses: actions/cache/restore@v4
|
||||
id: conan_cache
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/p
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.conan_profile }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v4
|
||||
id: ccache_cache
|
||||
|
||||
35
.github/actions/save_cache/action.yml
vendored
35
.github/actions/save_cache/action.yml
vendored
@@ -1,27 +1,13 @@
|
||||
name: Save cache
|
||||
description: Save conan and ccache cache for develop branch
|
||||
description: Save ccache cache for develop branch
|
||||
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
ccache_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_cache_miss_rate:
|
||||
description: How many cache misses happened
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
@@ -31,6 +17,12 @@ inputs:
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
ccache_cache_hit:
|
||||
description: Whether ccache cache has been downloaded
|
||||
required: true
|
||||
ccache_cache_miss_rate:
|
||||
description: How many ccache cache misses happened
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -38,19 +30,6 @@ runs:
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Cleanup conan directory from extra data
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
conan cache clean --source --build --temp
|
||||
|
||||
- name: Save conan cache
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/p
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.conan_profile }}-develop-${{ inputs.conan_hash }}
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
|
||||
uses: actions/cache/save@v4
|
||||
|
||||
22
.github/actions/setup_conan/action.yml
vendored
22
.github/actions/setup_conan/action.yml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Setup conan
|
||||
description: Setup conan profile and artifactory
|
||||
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create conan profile on macOS
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
conan profile detect --name "${{ inputs.conan_profile }}" --force
|
||||
sed -i '' 's/compiler.cppstd=[^ ]*/compiler.cppstd=20/' "${{ env.CONAN_HOME }}/profiles/${{ inputs.conan_profile }}"
|
||||
|
||||
- name: Add artifactory remote
|
||||
shell: bash
|
||||
run: |
|
||||
conan remote add --index 0 --force ripple http://18.143.149.228:8081/artifactory/api/conan/dev
|
||||
13
.github/dependabot.yml
vendored
13
.github/dependabot.yml
vendored
@@ -142,16 +142,3 @@ updates:
|
||||
commit-message:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/setup_conan/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
time: "04:00"
|
||||
timezone: Etc/GMT
|
||||
reviewers:
|
||||
- XRPLF/clio-dev-team
|
||||
commit-message:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
8
.github/scripts/conan/apple-clang-ci.profile
vendored
Normal file
8
.github/scripts/conan/apple-clang-ci.profile
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
[settings]
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=16
|
||||
os=Macos
|
||||
11
.github/scripts/conan/apple-clang-local.profile
vendored
Normal file
11
.github/scripts/conan/apple-clang-local.profile
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[settings]
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=17
|
||||
os=Macos
|
||||
|
||||
[conf]
|
||||
grpc/1.50.1:tools.build:cxxflags+=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
39
.github/scripts/conan/generate_matrix.py
vendored
Executable file
39
.github/scripts/conan/generate_matrix.py
vendored
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python3
|
||||
import itertools
|
||||
import json
|
||||
|
||||
LINUX_OS = ["heavy", "heavy-arm64"]
|
||||
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:latest" }']
|
||||
LINUX_COMPILERS = ["gcc", "clang"]
|
||||
|
||||
MACOS_OS = ["macos15"]
|
||||
MACOS_CONTAINERS = [""]
|
||||
MACOS_COMPILERS = ["apple-clang"]
|
||||
|
||||
BUILD_TYPES = ["Release", "Debug"]
|
||||
SANITIZER_EXT = [".asan", ".tsan", ".ubsan", ""]
|
||||
|
||||
|
||||
def generate_matrix():
|
||||
configurations = []
|
||||
|
||||
for os, container, compiler in itertools.chain(
|
||||
itertools.product(LINUX_OS, LINUX_CONTAINERS, LINUX_COMPILERS),
|
||||
itertools.product(MACOS_OS, MACOS_CONTAINERS, MACOS_COMPILERS),
|
||||
):
|
||||
for sanitizer_ext, build_type in itertools.product(SANITIZER_EXT, BUILD_TYPES):
|
||||
configurations.append(
|
||||
{
|
||||
"os": os,
|
||||
"container": container,
|
||||
"compiler": compiler,
|
||||
"sanitizer_ext": sanitizer_ext,
|
||||
"build_type": build_type,
|
||||
}
|
||||
)
|
||||
|
||||
return {"include": configurations}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(f"matrix={json.dumps(generate_matrix())}")
|
||||
47
.github/scripts/conan/init.sh
vendored
Executable file
47
.github/scripts/conan/init.sh
vendored
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
CURRENT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_DIR="$(cd "$CURRENT_DIR/../../../" && pwd)"
|
||||
|
||||
CONAN_DIR="${CONAN_HOME:-$HOME/.conan2}"
|
||||
PROFILES_DIR="$CONAN_DIR/profiles"
|
||||
|
||||
if [[ -z "$CI" ]]; then
|
||||
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-local.profile"
|
||||
else
|
||||
APPLE_CLANG_PROFILE="$CURRENT_DIR/apple-clang-ci.profile"
|
||||
fi
|
||||
|
||||
GCC_PROFILE="$REPO_DIR/docker/ci/conan/gcc.profile"
|
||||
CLANG_PROFILE="$REPO_DIR/docker/ci/conan/clang.profile"
|
||||
|
||||
SANITIZER_TEMPLATE_FILE="$REPO_DIR/docker/ci/conan/sanitizer_template.profile"
|
||||
|
||||
rm -rf "$CONAN_DIR"
|
||||
|
||||
conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
|
||||
|
||||
cp "$REPO_DIR/docker/ci/conan/global.conf" "$CONAN_DIR/global.conf"
|
||||
|
||||
create_profile_with_sanitizers() {
|
||||
profile_name="$1"
|
||||
profile_source="$2"
|
||||
|
||||
cp "$profile_source" "$PROFILES_DIR/$profile_name"
|
||||
cp "$SANITIZER_TEMPLATE_FILE" "$PROFILES_DIR/$profile_name.asan"
|
||||
cp "$SANITIZER_TEMPLATE_FILE" "$PROFILES_DIR/$profile_name.tsan"
|
||||
cp "$SANITIZER_TEMPLATE_FILE" "$PROFILES_DIR/$profile_name.ubsan"
|
||||
}
|
||||
|
||||
mkdir -p "$PROFILES_DIR"
|
||||
|
||||
if [[ "$(uname)" == "Darwin" ]]; then
|
||||
create_profile_with_sanitizers "apple-clang" "$APPLE_CLANG_PROFILE"
|
||||
echo "include(apple-clang)" > "$PROFILES_DIR/default"
|
||||
else
|
||||
create_profile_with_sanitizers "clang" "$CLANG_PROFILE"
|
||||
create_profile_with_sanitizers "gcc" "$GCC_PROFILE"
|
||||
echo "include(gcc)" > "$PROFILES_DIR/default"
|
||||
fi
|
||||
28
.github/scripts/update-libxrpl-version
vendored
28
.github/scripts/update-libxrpl-version
vendored
@@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Note: This script is intended to be run from the root of the repository.
|
||||
#
|
||||
# This script modifies conanfile.py such that the specified version of libXRPL is used.
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
Version should be passed as first argument to the script.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$1
|
||||
GNU_SED=$(sed --version 2>&1 | grep -q 'GNU' && echo true || echo false)
|
||||
|
||||
echo "+ Updating required libXRPL version to $VERSION"
|
||||
|
||||
if [[ "$GNU_SED" == "false" ]]; then
|
||||
sed -i '' -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/$VERSION'|g" conanfile.py
|
||||
else
|
||||
sed -i -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/$VERSION'|g" conanfile.py
|
||||
fi
|
||||
26
.github/workflows/build.yml
vendored
26
.github/workflows/build.yml
vendored
@@ -2,9 +2,9 @@ name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, release/*, develop]
|
||||
branches: [release/*, develop]
|
||||
pull_request:
|
||||
branches: [master, release/*, develop]
|
||||
branches: [release/*, develop]
|
||||
paths:
|
||||
- .github/workflows/build.yml
|
||||
|
||||
@@ -19,6 +19,7 @@ on:
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
- "cmake/**"
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
@@ -46,7 +47,7 @@ jobs:
|
||||
|
||||
include:
|
||||
- os: macos15
|
||||
conan_profile: default_apple_clang
|
||||
conan_profile: apple-clang
|
||||
build_type: Release
|
||||
container: ""
|
||||
static: false
|
||||
@@ -76,7 +77,6 @@ jobs:
|
||||
static: true
|
||||
upload_clio_server: false
|
||||
targets: all
|
||||
sanitizer: "false"
|
||||
analyze_build_time: false
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -99,23 +99,9 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
repoConfigFile=docs/config-description.md
|
||||
if ! [ -f "${repoConfigFile}" ]; then
|
||||
echo "Config Description markdown file is missing in docs folder"
|
||||
exit 1
|
||||
fi
|
||||
configDescriptionFile=config_description_new.md
|
||||
|
||||
chmod +x ./clio_server
|
||||
configDescriptionFile=config_description_new.md
|
||||
./clio_server -d "${configDescriptionFile}"
|
||||
|
||||
configDescriptionHash=$(sha256sum "${configDescriptionFile}" | cut -d' ' -f1)
|
||||
repoConfigHash=$(sha256sum "${repoConfigFile}" | cut -d' ' -f1)
|
||||
|
||||
if [ "${configDescriptionHash}" != "${repoConfigHash}" ]; then
|
||||
echo "Markdown file is not up to date"
|
||||
diff -u "${repoConfigFile}" "${configDescriptionFile}"
|
||||
rm -f "${configDescriptionFile}"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "${configDescriptionFile}"
|
||||
exit 0
|
||||
diff -u "${repoConfigFile}" "${configDescriptionFile}"
|
||||
|
||||
10
.github/workflows/build_and_test.yml
vendored
10
.github/workflows/build_and_test.yml
vendored
@@ -24,7 +24,7 @@ on:
|
||||
type: string
|
||||
|
||||
disable_cache:
|
||||
description: Whether ccache and conan cache should be disabled
|
||||
description: Whether ccache should be disabled
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
@@ -57,12 +57,6 @@ on:
|
||||
type: string
|
||||
default: all
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: false
|
||||
type: string
|
||||
default: "false"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
@@ -76,7 +70,6 @@ jobs:
|
||||
static: ${{ inputs.static }}
|
||||
upload_clio_server: ${{ inputs.upload_clio_server }}
|
||||
targets: ${{ inputs.targets }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
analyze_build_time: false
|
||||
|
||||
test:
|
||||
@@ -89,4 +82,3 @@ jobs:
|
||||
build_type: ${{ inputs.build_type }}
|
||||
run_unit_tests: ${{ inputs.run_unit_tests }}
|
||||
run_integration_tests: ${{ inputs.run_integration_tests }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
|
||||
34
.github/workflows/build_impl.yml
vendored
34
.github/workflows/build_impl.yml
vendored
@@ -24,7 +24,7 @@ on:
|
||||
type: string
|
||||
|
||||
disable_cache:
|
||||
description: Whether ccache and conan cache should be disabled
|
||||
description: Whether ccache should be disabled
|
||||
required: false
|
||||
type: boolean
|
||||
|
||||
@@ -48,11 +48,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
type: string
|
||||
|
||||
analyze_build_time:
|
||||
description: Whether to enable build time analysis
|
||||
required: true
|
||||
@@ -64,7 +59,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
name: Build
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
@@ -82,17 +77,16 @@ jobs:
|
||||
with:
|
||||
disable_ccache: ${{ inputs.disable_cache }}
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
- name: Setup conan on macOS
|
||||
if: runner.os == 'macOS'
|
||||
shell: bash
|
||||
run: ./.github/scripts/conan/init.sh
|
||||
|
||||
- name: Restore cache
|
||||
if: ${{ !inputs.disable_cache }}
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_HOME }}
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
@@ -102,11 +96,9 @@ jobs:
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
conan_cache_hit: ${{ !inputs.disable_cache && steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
static: ${{ inputs.static }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
time_trace: ${{ inputs.analyze_build_time }}
|
||||
|
||||
- name: Build Clio
|
||||
@@ -140,11 +132,11 @@ jobs:
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip unit_tests
|
||||
if: inputs.sanitizer == 'false' && !inputs.code_coverage && !inputs.analyze_build_time
|
||||
if: ${{ !endsWith(inputs.conan_profile, 'san') && !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Strip integration_tests
|
||||
if: inputs.sanitizer == 'false' && !inputs.code_coverage && !inputs.analyze_build_time
|
||||
if: ${{ !endsWith(inputs.conan_profile, 'san') && !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
run: strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
@@ -172,15 +164,13 @@ jobs:
|
||||
if: ${{ !inputs.disable_cache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_HOME }}
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
|
||||
# This is run as part of the build job, because it requires the following:
|
||||
# - source code
|
||||
|
||||
19
.github/workflows/check_libxrpl.yml
vendored
19
.github/workflows/check_libxrpl.yml
vendored
@@ -15,7 +15,7 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
||||
runs-on: [self-hosted, heavy]
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:latest
|
||||
|
||||
@@ -27,24 +27,23 @@ jobs:
|
||||
- name: Update libXRPL version requirement
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/update-libxrpl-version ${{ github.event.client_payload.version }}
|
||||
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.version }}'|g" conanfile.py
|
||||
rm -f conanfile.py.bak
|
||||
|
||||
- name: Update conan lockfile
|
||||
shell: bash
|
||||
run: |
|
||||
conan lock create . -o '&:tests=True' -o '&:benchmark=True'
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: Release
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
@@ -61,7 +60,7 @@ jobs:
|
||||
run_tests:
|
||||
name: Run tests
|
||||
needs: build
|
||||
runs-on: [self-hosted, heavy]
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:latest
|
||||
|
||||
|
||||
13
.github/workflows/clang-tidy.yml
vendored
13
.github/workflows/clang-tidy.yml
vendored
@@ -18,6 +18,7 @@ concurrency:
|
||||
|
||||
env:
|
||||
CONAN_PROFILE: clang
|
||||
LLVM_TOOLS_VERSION: 20
|
||||
|
||||
jobs:
|
||||
clang_tidy:
|
||||
@@ -40,25 +41,17 @@ jobs:
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_HOME }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: Release
|
||||
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
@@ -69,7 +62,7 @@ jobs:
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-19 -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
|
||||
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
|
||||
|
||||
- name: Fix local includes and clang-format style
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
|
||||
5
.github/workflows/docs.yml
vendored
5
.github/workflows/docs.yml
vendored
@@ -22,6 +22,11 @@ jobs:
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Create build directory
|
||||
run: mkdir build_docs
|
||||
|
||||
|
||||
11
.github/workflows/nightly.yml
vendored
11
.github/workflows/nightly.yml
vendored
@@ -32,28 +32,24 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: macos15
|
||||
conan_profile: default_apple_clang
|
||||
conan_profile: apple-clang
|
||||
build_type: Release
|
||||
static: false
|
||||
sanitizer: "false"
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
sanitizer: "false"
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
sanitizer: "false"
|
||||
- os: heavy
|
||||
conan_profile: gcc.ubsan
|
||||
build_type: Release
|
||||
static: false
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
sanitizer: "ubsan"
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
@@ -66,7 +62,6 @@ jobs:
|
||||
run_integration_tests: true
|
||||
upload_clio_server: true
|
||||
disable_cache: true
|
||||
sanitizer: ${{ matrix.sanitizer }}
|
||||
|
||||
analyze_build_time:
|
||||
name: Analyze Build Time
|
||||
@@ -80,7 +75,7 @@ jobs:
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
static: true
|
||||
- os: macos15
|
||||
conan_profile: default_apple_clang
|
||||
conan_profile: apple-clang
|
||||
container: ""
|
||||
static: false
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
@@ -94,7 +89,6 @@ jobs:
|
||||
static: ${{ matrix.static }}
|
||||
upload_clio_server: false
|
||||
targets: all
|
||||
sanitizer: "false"
|
||||
analyze_build_time: true
|
||||
|
||||
nightly_release:
|
||||
@@ -102,6 +96,7 @@ jobs:
|
||||
uses: ./.github/workflows/release_impl.yml
|
||||
with:
|
||||
overwrite_release: true
|
||||
prerelease: true
|
||||
title: "Clio development (nightly) build"
|
||||
version: nightly
|
||||
header: >
|
||||
|
||||
15
.github/workflows/pre-commit-autoupdate.yml
vendored
15
.github/workflows/pre-commit-autoupdate.yml
vendored
@@ -26,14 +26,25 @@ jobs:
|
||||
- run: pre-commit autoupdate --freeze
|
||||
- run: pre-commit run --all-files || true
|
||||
|
||||
- uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0
|
||||
if: github.event_name != 'pull_request'
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
if: always()
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
branch: update/pre-commit-hooks
|
||||
title: "style: Update pre-commit hooks"
|
||||
commit-message: "style: Update pre-commit hooks"
|
||||
committer: Clio CI <skuznetsov@ripple.com>
|
||||
branch: update/pre-commit-hooks
|
||||
branch-suffix: timestamp
|
||||
delete-branch: true
|
||||
title: "style: Update pre-commit hooks"
|
||||
body: Update versions of pre-commit hooks to latest version.
|
||||
reviewers: "godexsoft,kuznetsss,PeterChen13579,mathbunnyru"
|
||||
|
||||
3
.github/workflows/pre-commit.yml
vendored
3
.github/workflows/pre-commit.yml
vendored
@@ -3,8 +3,7 @@ name: Run pre-commit hooks
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
branches: [develop]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: macos15
|
||||
conan_profile: default_apple_clang
|
||||
conan_profile: apple-clang
|
||||
build_type: Release
|
||||
static: false
|
||||
- os: heavy
|
||||
@@ -48,6 +48,7 @@ jobs:
|
||||
uses: ./.github/workflows/release_impl.yml
|
||||
with:
|
||||
overwrite_release: false
|
||||
prerelease: ${{ contains(github.ref_name, '-') }}
|
||||
title: "${{ github.ref_name}}"
|
||||
version: "${{ github.ref_name }}"
|
||||
header: >
|
||||
|
||||
21
.github/workflows/release_impl.yml
vendored
21
.github/workflows/release_impl.yml
vendored
@@ -8,6 +8,11 @@ on:
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
prerelease:
|
||||
description: "Create a prerelease"
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
title:
|
||||
description: "Release title"
|
||||
required: true
|
||||
@@ -25,12 +30,12 @@ on:
|
||||
|
||||
generate_changelog:
|
||||
description: "Generate changelog"
|
||||
required: false
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
draft:
|
||||
description: "Create a draft release"
|
||||
required: false
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
@@ -69,9 +74,9 @@ jobs:
|
||||
shell: bash
|
||||
if: ${{ inputs.generate_changelog }}
|
||||
run: |
|
||||
LAST_TAG=$(gh release view --json tagName -q .tagName)
|
||||
LAST_TAG_COMMIT=$(git rev-parse $LAST_TAG)
|
||||
BASE_COMMIT=$(git merge-base HEAD $LAST_TAG_COMMIT)
|
||||
LAST_TAG="$(gh release view --json tagName -q .tagName)"
|
||||
LAST_TAG_COMMIT="$(git rev-parse $LAST_TAG)"
|
||||
BASE_COMMIT="$(git merge-base HEAD $LAST_TAG_COMMIT)"
|
||||
git-cliff "${BASE_COMMIT}..HEAD" --ignore-tags "nightly|-b"
|
||||
cat CHANGELOG.md >> "${RUNNER_TEMP}/release_notes.md"
|
||||
|
||||
@@ -108,10 +113,10 @@ jobs:
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
shell: bash
|
||||
run: |
|
||||
gh release create ${{ inputs.version }} \
|
||||
${{ inputs.overwrite_release && '--prerelease' || '' }} \
|
||||
gh release create "${{ inputs.version }}" \
|
||||
${{ inputs.prerelease && '--prerelease' || '' }} \
|
||||
--title "${{ inputs.title }}" \
|
||||
--target $GITHUB_SHA \
|
||||
--target "${GITHUB_SHA}" \
|
||||
${{ inputs.draft && '--draft' || '' }} \
|
||||
--notes-file "${RUNNER_TEMP}/release_notes.md" \
|
||||
./release_artifacts/clio_server*
|
||||
|
||||
19
.github/workflows/sanitizers.yml
vendored
19
.github/workflows/sanitizers.yml
vendored
@@ -19,6 +19,7 @@ on:
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
- "cmake/**"
|
||||
# We don't run sanitizer on code change, because it takes too long
|
||||
# - "src/**"
|
||||
@@ -36,24 +37,20 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sanitizer: tsan
|
||||
compiler: gcc
|
||||
- sanitizer: asan
|
||||
compiler: gcc
|
||||
- sanitizer: ubsan
|
||||
compiler: gcc
|
||||
compiler: [gcc, clang]
|
||||
sanitizer_ext: [.asan, .tsan, .ubsan]
|
||||
build_type: [Release, Debug]
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
disable_cache: true
|
||||
conan_profile: ${{ matrix.compiler }}.${{ matrix.sanitizer }}
|
||||
build_type: Release
|
||||
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
static: false
|
||||
run_unit_tests: true
|
||||
# Currently, both gcc.tsan and clang.tsan unit tests hang
|
||||
run_unit_tests: ${{ matrix.sanitizer_ext != '.tsan' }}
|
||||
run_integration_tests: false
|
||||
upload_clio_server: false
|
||||
targets: clio_tests clio_integration_tests
|
||||
sanitizer: ${{ matrix.sanitizer }}
|
||||
|
||||
15
.github/workflows/test_impl.yml
vendored
15
.github/workflows/test_impl.yml
vendored
@@ -33,22 +33,17 @@ on:
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
unit_tests:
|
||||
name: Unit testing ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
name: Unit testing
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
if: inputs.run_unit_tests
|
||||
|
||||
env:
|
||||
# TODO: remove when we have fixed all currently existing issues from sanitizers
|
||||
SANITIZER_IGNORE_ERRORS: ${{ inputs.sanitizer != 'false' && inputs.sanitizer != 'ubsan' }}
|
||||
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.asan') || endsWith(inputs.conan_profile, '.tsan') }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
@@ -90,7 +85,7 @@ jobs:
|
||||
if: env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.conan_profile }}_report
|
||||
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: .sanitizer-report/*
|
||||
include-hidden-files: true
|
||||
|
||||
@@ -109,7 +104,7 @@ jobs:
|
||||
Reports are available as artifacts.
|
||||
|
||||
integration_tests:
|
||||
name: Integration testing ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
name: Integration testing
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
|
||||
236
.github/workflows/update_docker_ci.yml
vendored
236
.github/workflows/update_docker_ci.yml
vendored
@@ -23,14 +23,33 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
# Only cancel in-progress jobs or runs for the current workflow - matches against branch & tags
|
||||
# Only matches runs for the current workflow - matches against branch & tags
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
# We want to execute all builds sequentially in develop
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
CLANG_MAJOR_VERSION: 19
|
||||
GCC_MAJOR_VERSION: 14
|
||||
GCC_VERSION: 14.3.0
|
||||
|
||||
jobs:
|
||||
gcc:
|
||||
name: Build and push GCC docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
repo:
|
||||
name: Calculate repo name
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
GHCR_REPO: ${{ steps.set-ghcr-repo.outputs.GHCR_REPO }}
|
||||
|
||||
steps:
|
||||
- name: Set GHCR_REPO
|
||||
id: set-ghcr-repo
|
||||
run: |
|
||||
echo "GHCR_REPO=$(echo ghcr.io/${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_OUTPUT}
|
||||
|
||||
gcc-amd64:
|
||||
name: Build and push GCC docker image (amd64)
|
||||
runs-on: heavy
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -42,30 +61,116 @@ jobs:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
# Skipping this build for now, because CI environment is not stable
|
||||
if: false && steps.changed-files.outputs.any_changed == 'true'
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/xrplf/clio-gcc
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc
|
||||
rippleci/clio_gcc
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/gcc
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=12
|
||||
type=raw,value=12.3.0
|
||||
type=raw,value=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
type=raw,value=amd64-latest
|
||||
type=raw,value=amd64-${{ env.GCC_MAJOR_VERSION }}
|
||||
type=raw,value=amd64-${{ env.GCC_VERSION }}
|
||||
type=raw,value=amd64-${{ github.sha }}
|
||||
platforms: linux/amd64
|
||||
build_args: |
|
||||
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
|
||||
GCC_VERSION=${{ env.GCC_VERSION }}
|
||||
dockerhub_repo: rippleci/clio_gcc
|
||||
dockerhub_description: GCC compiler for XRPLF/clio.
|
||||
|
||||
gcc-arm64:
|
||||
name: Build and push GCC docker image (arm64)
|
||||
runs-on: heavy-arm64
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc
|
||||
rippleci/clio_gcc
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/gcc
|
||||
tags: |
|
||||
type=raw,value=arm64-latest
|
||||
type=raw,value=arm64-${{ env.GCC_MAJOR_VERSION }}
|
||||
type=raw,value=arm64-${{ env.GCC_VERSION }}
|
||||
type=raw,value=arm64-${{ github.sha }}
|
||||
platforms: linux/arm64
|
||||
build_args: |
|
||||
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
|
||||
GCC_VERSION=${{ env.GCC_VERSION }}
|
||||
dockerhub_repo: rippleci/clio_gcc
|
||||
dockerhub_description: GCC compiler for XRPLF/clio.
|
||||
|
||||
gcc-merge:
|
||||
name: Merge and push multi-arch GCC docker image
|
||||
runs-on: heavy
|
||||
needs: [repo, gcc-amd64, gcc-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PW }}
|
||||
|
||||
- name: Create and push multi-arch manifest
|
||||
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
for image in ${{ needs.repo.outputs.GHCR_REPO }}/clio-gcc rippleci/clio_gcc; do
|
||||
docker buildx imagetools create \
|
||||
-t $image:latest \
|
||||
-t $image:${{ env.GCC_MAJOR_VERSION }} \
|
||||
-t $image:${{ env.GCC_VERSION }} \
|
||||
-t $image:${{ github.sha }} \
|
||||
$image:arm64-latest \
|
||||
$image:amd64-latest
|
||||
done
|
||||
|
||||
clang:
|
||||
name: Build and push Clang docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
runs-on: heavy
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -84,21 +189,24 @@ jobs:
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/xrplf/clio-clang
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-clang
|
||||
rippleci/clio_clang
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/clang
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=16
|
||||
type=raw,value=${{ env.CLANG_MAJOR_VERSION }}
|
||||
type=raw,value=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_args: |
|
||||
CLANG_MAJOR_VERSION=${{ env.CLANG_MAJOR_VERSION }}
|
||||
dockerhub_repo: rippleci/clio_clang
|
||||
dockerhub_description: Clang compiler for XRPLF/clio.
|
||||
|
||||
tools:
|
||||
name: Build and push tools docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
tools-amd64:
|
||||
name: Build and push tools docker image (amd64)
|
||||
runs-on: heavy
|
||||
needs: [repo, gcc-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -115,18 +223,87 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/xrplf/clio-tools
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-tools
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/tools
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
type=raw,value=amd64-latest
|
||||
type=raw,value=amd64-${{ github.sha }}
|
||||
platforms: linux/amd64
|
||||
build_args: |
|
||||
GHCR_REPO=${{ needs.repo.outputs.GHCR_REPO }}
|
||||
GCC_VERSION=${{ env.GCC_VERSION }}
|
||||
|
||||
tools-arm64:
|
||||
name: Build and push tools docker image (arm64)
|
||||
runs-on: heavy-arm64
|
||||
needs: [repo, gcc-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-tools
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/tools
|
||||
tags: |
|
||||
type=raw,value=arm64-latest
|
||||
type=raw,value=arm64-${{ github.sha }}
|
||||
platforms: linux/arm64
|
||||
build_args: |
|
||||
GHCR_REPO=${{ needs.repo.outputs.GHCR_REPO }}
|
||||
GCC_VERSION=${{ env.GCC_VERSION }}
|
||||
|
||||
tools-merge:
|
||||
name: Merge and push multi-arch tools docker image
|
||||
runs-on: heavy
|
||||
needs: [repo, tools-amd64, tools-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push multi-arch manifest
|
||||
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
image=${{ needs.repo.outputs.GHCR_REPO }}/clio-tools
|
||||
docker buildx imagetools create \
|
||||
-t $image:latest \
|
||||
-t $image:${{ github.sha }} \
|
||||
$image:arm64-latest \
|
||||
$image:amd64-latest
|
||||
|
||||
ci:
|
||||
name: Build and push CI docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
needs: [gcc, clang, tools]
|
||||
runs-on: heavy
|
||||
needs: [repo, gcc-merge, clang, tools-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -137,14 +314,19 @@ jobs:
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-ci
|
||||
rippleci/clio_ci
|
||||
ghcr.io/xrplf/clio-ci
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/ci
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=gcc_12_clang_16
|
||||
type=raw,value=gcc_${{ env.GCC_MAJOR_VERSION }}_clang_${{ env.CLANG_MAJOR_VERSION }}
|
||||
type=raw,value=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_args: |
|
||||
GHCR_REPO=${{ needs.repo.outputs.GHCR_REPO }}
|
||||
CLANG_MAJOR_VERSION=${{ env.CLANG_MAJOR_VERSION }}
|
||||
GCC_MAJOR_VERSION=${{ env.GCC_MAJOR_VERSION }}
|
||||
GCC_VERSION=${{ env.GCC_VERSION }}
|
||||
dockerhub_repo: rippleci/clio_ci
|
||||
dockerhub_description: CI image for XRPLF/clio.
|
||||
|
||||
100
.github/workflows/upload_conan_deps.yml
vendored
Normal file
100
.github/workflows/upload_conan_deps.yml
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
name: Upload Conan Dependencies
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 9 * * 1-5"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force_source_build:
|
||||
description: "Force source build of all dependencies"
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
|
||||
- .github/actions/generate/action.yml
|
||||
- .github/actions/prepare_runner/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
- "!.github/scripts/conan/apple-clang-local.profile"
|
||||
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
|
||||
- .github/actions/generate/action.yml
|
||||
- .github/actions/prepare_runner/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
- "!.github/scripts/conan/apple-clang-local.profile"
|
||||
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
generate-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Calculate conan matrix
|
||||
id: set-matrix
|
||||
run: .github/scripts/conan/generate_matrix.py >> "${GITHUB_OUTPUT}"
|
||||
|
||||
upload-conan-deps:
|
||||
name: Build ${{ matrix.compiler }}${{ matrix.sanitizer_ext }} ${{ matrix.build_type }}
|
||||
|
||||
needs: generate-matrix
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container != '' && fromJson(matrix.container) || null }}
|
||||
|
||||
env:
|
||||
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan on macOS
|
||||
if: runner.os == 'macOS'
|
||||
shell: bash
|
||||
run: ./.github/scripts/conan/init.sh
|
||||
|
||||
- name: Show conan profile
|
||||
run: conan profile show --profile:all ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
# We check that everything builds fine from source on scheduled runs
|
||||
# But we do build and upload packages with build=missing by default
|
||||
force_conan_source_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
|
||||
- name: Login to Conan
|
||||
if: github.event_name != 'pull_request'
|
||||
run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ripple ${{ secrets.CONAN_USERNAME }}
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: github.event_name != 'pull_request' && github.event_name != 'schedule'
|
||||
run: conan upload "*" -r=ripple --confirm
|
||||
@@ -11,7 +11,7 @@
|
||||
#
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
exclude: ^docs/doxygen-awesome-theme/
|
||||
exclude: ^(docs/doxygen-awesome-theme/|conan\.lock$)
|
||||
|
||||
repos:
|
||||
# `pre-commit sample-config` default hooks
|
||||
@@ -26,12 +26,12 @@ repos:
|
||||
|
||||
# Autoformat: YAML, JSON, Markdown, etc.
|
||||
- repo: https://github.com/rbubley/mirrors-prettier
|
||||
rev: 787fb9f542b140ba0b2aced38e6a3e68021647a3 # frozen: v3.5.3
|
||||
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
|
||||
hooks:
|
||||
- id: prettier
|
||||
|
||||
- repo: https://github.com/igorshubovych/markdownlint-cli
|
||||
rev: 586c3ea3f51230da42bab657c6a32e9e66c364f0 # frozen: v0.44.0
|
||||
rev: 192ad822316c3a22fb3d3cc8aa6eafa0b8488360 # frozen: v0.45.0
|
||||
hooks:
|
||||
- id: markdownlint-fix
|
||||
exclude: LICENSE.md
|
||||
@@ -55,17 +55,38 @@ repos:
|
||||
--ignore-words=pre-commit-hooks/codespell_ignore.txt,
|
||||
]
|
||||
|
||||
# Running fix-local-includes before clang-format
|
||||
# to ensure that the include order is correct.
|
||||
- repo: https://github.com/trufflesecurity/trufflehog
|
||||
rev: 6641d4ba5b684fffe195b9820345de1bf19f3181 # frozen: v3.89.2
|
||||
hooks:
|
||||
- id: trufflehog
|
||||
entry: trufflehog git file://. --since-commit HEAD --no-verification --fail
|
||||
|
||||
# Running some C++ hooks before clang-format
|
||||
# to ensure that the style is consistent.
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: json-in-cpp
|
||||
name: Fix JSON style in C++
|
||||
entry: pre-commit-hooks/json_in_cpp.py
|
||||
types: [c++]
|
||||
language: python
|
||||
exclude: |
|
||||
(?x)^(
|
||||
tests/unit/etl/SubscriptionSourceTests.cpp|
|
||||
tests/unit/web/ServerTests.cpp|
|
||||
tests/unit/web/impl/ErrorHandlingTests.cpp|
|
||||
tests/unit/web/ng/ServerTests.cpp|
|
||||
tests/unit/web/ng/impl/ErrorHandlingTests.cpp
|
||||
)$
|
||||
|
||||
- id: fix-local-includes
|
||||
name: Fix Local Includes
|
||||
entry: pre-commit-hooks/fix-local-includes.sh
|
||||
types: [c++]
|
||||
language: script
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: f9a52e87b6cdcb01b0a62b8611d9ba9f2dad0067 # frozen: v19.1.7
|
||||
rev: 6b9072cd80691b1b48d80046d884409fb1d962d1 # frozen: v20.1.7
|
||||
hooks:
|
||||
- id: clang-format
|
||||
args: [--style=file]
|
||||
|
||||
@@ -69,15 +69,17 @@ endif ()
|
||||
# Enable selected sanitizer if enabled via `san`
|
||||
if (san)
|
||||
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
|
||||
list(FIND SUPPORTED_SANITIZERS "${san}" INDEX)
|
||||
if (INDEX EQUAL -1)
|
||||
if (NOT san IN_LIST SUPPORTED_SANITIZERS)
|
||||
message(FATAL_ERROR "Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}.")
|
||||
endif ()
|
||||
|
||||
target_compile_options(
|
||||
clio_options INTERFACE # Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1> ${SAN_FLAG} -fno-omit-frame-pointer
|
||||
)
|
||||
# Sanitizers recommend minimum of -O1 for reasonable performance so we enable it for debug builds
|
||||
set(SAN_OPTIMIZATION_FLAG "")
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
set(SAN_OPTIMIZATION_FLAG -O1)
|
||||
endif ()
|
||||
target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer)
|
||||
|
||||
target_compile_definitions(
|
||||
clio_options INTERFACE $<$<STREQUAL:${san},address>:SANITIZER=ASAN> $<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN> $<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>
|
||||
|
||||
@@ -8,7 +8,7 @@ if (lint)
|
||||
endif ()
|
||||
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||
else ()
|
||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-19" "clang-tidy" REQUIRED)
|
||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-20" "clang-tidy" REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (NOT _CLANG_TIDY_BIN)
|
||||
|
||||
@@ -4,39 +4,42 @@
|
||||
|
||||
find_package(Git REQUIRED)
|
||||
|
||||
set(GIT_COMMAND rev-parse --short HEAD)
|
||||
set(GIT_COMMAND describe --tags --exact-match)
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE REV
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE TAG
|
||||
RESULT_VARIABLE RC
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
|
||||
set(GIT_COMMAND branch --show-current)
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BRANCH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
if (RC EQUAL 0)
|
||||
# if we are on a tag, use the tag name
|
||||
set(CLIO_VERSION "${TAG}")
|
||||
set(DOC_CLIO_VERSION "${TAG}")
|
||||
else ()
|
||||
# if not, use YYYYMMDDHMS-<branch>-<git-rev>
|
||||
|
||||
if (BRANCH STREQUAL "")
|
||||
set(BRANCH "dev")
|
||||
endif ()
|
||||
|
||||
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name
|
||||
# YYYYMMDDHMS-<branch>-<git-rev>
|
||||
set(GIT_COMMAND show -s --date=format:%Y%m%d%H%M%S --format=%cd)
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE DATE
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE DATE
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
set(GIT_COMMAND branch --show-current)
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BRANCH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
set(GIT_COMMAND rev-parse --short HEAD)
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE REV
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
set(CLIO_VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
set(DOC_CLIO_VERSION "develop")
|
||||
else ()
|
||||
set(GIT_COMMAND describe --tags)
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE CLIO_TAG_VERSION
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
set(CLIO_VERSION "${CLIO_TAG_VERSION}")
|
||||
set(DOC_CLIO_VERSION "${CLIO_TAG_VERSION}")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
set(COMPILER_FLAGS
|
||||
-pedantic
|
||||
-Wall
|
||||
-Wcast-align
|
||||
-Wdouble-promotion
|
||||
-Wextra
|
||||
-Werror
|
||||
-Wextra
|
||||
-Wformat=2
|
||||
-Wimplicit-fallthrough
|
||||
-Wmisleading-indentation
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-narrowing
|
||||
-Wno-unused-but-set-variable
|
||||
-Wnon-virtual-dtor
|
||||
-Wnull-dereference
|
||||
-Wold-style-cast
|
||||
-pedantic
|
||||
-Wpedantic
|
||||
-Wunreachable-code
|
||||
-Wunused
|
||||
# FIXME: The following bunch are needed for gcc12 atm.
|
||||
-Wno-missing-requires
|
||||
|
||||
57
conan.lock
Normal file
57
conan.lock
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"version": "0.5",
|
||||
"requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1752006674.465",
|
||||
"xxhash/0.8.2#7856c968c985b2981b707ee8f2413b2b%1752006674.334",
|
||||
"xrpl/2.5.0#7880d1696f11fceb1d498570f1a184c8%1752006708.218",
|
||||
"sqlite3/3.47.0#7a0904fd061f5f8a2366c294f9387830%1752006674.338",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1752006674.465",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1752006674.077",
|
||||
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1752006673.227",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1752006673.172",
|
||||
"openssl/1.1.1v#216374e4fb5b2e0f5ab1fb6f27b5b434%1752006673.069",
|
||||
"nudb/2.0.8#63990d3e517038e04bf529eb8167f69f%1752006673.862",
|
||||
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1752006672.983",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1752006672.825",
|
||||
"libuv/1.46.0#78565d142ac7102776256328a26cdf60%1752006672.827",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1752006672.826",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1752006672.822",
|
||||
"libarchive/3.7.6#e0453864b2a4d225f06b3304903cb2b7%1752006672.917",
|
||||
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1752006672.658",
|
||||
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1752006671.555",
|
||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1752006671.777",
|
||||
"fmt/11.2.0#579bb2cdf4a7607621beea4eb4651e0f%1752006671.557",
|
||||
"date/3.0.3#cf28fe9c0aab99fe12da08aa42df65e1%1752006671.553",
|
||||
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1752006671.654",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1752006671.554",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1752006671.549",
|
||||
"boost/1.83.0#5bcb2a14a35875e328bf312e080d3562%1752006671.557",
|
||||
"benchmark/1.8.3#1a2ce62c99e2b3feaa57b1f0c15a8c46%1752006671.408",
|
||||
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1752006671.555"
|
||||
],
|
||||
"build_requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1752006674.465",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1752006673.172",
|
||||
"protobuf/3.21.9#64ce20e1d9ea24f3d6c504015d5f6fa8%1752006673.173",
|
||||
"cmake/3.31.7#57c3e118bcf267552c0ea3f8bee1e7d5%1752006671.64",
|
||||
"b2/5.3.2#7b5fabfe7088ae933fb3e78302343ea0%1752006671.407"
|
||||
],
|
||||
"python_requires": [],
|
||||
"overrides": {
|
||||
"boost/1.83.0": [
|
||||
null,
|
||||
"boost/1.83.0#5bcb2a14a35875e328bf312e080d3562"
|
||||
],
|
||||
"protobuf/3.21.9": [
|
||||
null,
|
||||
"protobuf/3.21.12"
|
||||
],
|
||||
"lz4/1.9.4": [
|
||||
"lz4/1.10.0"
|
||||
],
|
||||
"sqlite3/3.44.2": [
|
||||
"sqlite3/3.47.0"
|
||||
]
|
||||
},
|
||||
"config_requires": []
|
||||
}
|
||||
23
conanfile.py
23
conanfile.py
@@ -11,7 +11,6 @@ class ClioConan(ConanFile):
|
||||
settings = 'os', 'compiler', 'build_type', 'arch'
|
||||
options = {
|
||||
'static': [True, False], # static linkage
|
||||
'fPIC': [True, False], # unused?
|
||||
'verbose': [True, False],
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'integration_tests': [True, False], # build integration tests; create `clio_integration_tests` binary
|
||||
@@ -27,18 +26,17 @@ class ClioConan(ConanFile):
|
||||
requires = [
|
||||
'boost/1.83.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'fmt/11.2.0',
|
||||
'protobuf/3.21.12',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1v',
|
||||
'xrpl/2.5.0-rc1',
|
||||
'xrpl/2.5.0',
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
|
||||
default_options = {
|
||||
'static': False,
|
||||
'fPIC': True,
|
||||
'verbose': False,
|
||||
'tests': False,
|
||||
'integration_tests': False,
|
||||
@@ -89,21 +87,8 @@ class ClioConan(ConanFile):
|
||||
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
tc.variables['verbose'] = self.options.verbose
|
||||
tc.variables['static'] = self.options.static
|
||||
tc.variables['tests'] = self.options.tests
|
||||
tc.variables['integration_tests'] = self.options.integration_tests
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['lint'] = self.options.lint
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.variables['benchmark'] = self.options.benchmark
|
||||
tc.variables['snapshot'] = self.options.snapshot
|
||||
tc.variables['time_trace'] = self.options.time_trace
|
||||
|
||||
if self.settings.compiler == 'clang' and self.settings.compiler.version == 16:
|
||||
tc.extra_cxxflags = ["-DBOOST_ASIO_DISABLE_CONCEPTS"]
|
||||
|
||||
for option_name, option_value in self.options.items():
|
||||
tc.variables[option_name] = option_value
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
# TODO: change this when we are able to push gcc image to ghcr.io
|
||||
FROM rippleci/clio_gcc:12.3.0 AS clio-gcc
|
||||
FROM ghcr.io/xrplf/clio-tools:latest AS clio-tools
|
||||
ARG GHCR_REPO=invalid
|
||||
ARG CLANG_MAJOR_VERSION=invalid
|
||||
ARG GCC_VERSION=invalid
|
||||
|
||||
FROM ghcr.io/xrplf/clio-clang:16
|
||||
FROM ${GHCR_REPO}/clio-gcc:${GCC_VERSION} AS clio-gcc
|
||||
FROM ${GHCR_REPO}/clio-tools:latest AS clio-tools
|
||||
|
||||
FROM ${GHCR_REPO}/clio-clang:${CLANG_MAJOR_VERSION}
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@@ -17,7 +20,7 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
USER root
|
||||
WORKDIR /root
|
||||
|
||||
ARG LLVM_TOOLS_VERSION=19
|
||||
ARG LLVM_TOOLS_VERSION=20
|
||||
|
||||
# Add repositories
|
||||
RUN apt-get update \
|
||||
@@ -33,10 +36,8 @@ RUN apt-get update \
|
||||
# Install packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
bison \
|
||||
clang-tidy-${LLVM_TOOLS_VERSION} \
|
||||
clang-tools-${LLVM_TOOLS_VERSION} \
|
||||
flex \
|
||||
git \
|
||||
git-lfs \
|
||||
graphviz \
|
||||
@@ -48,6 +49,10 @@ RUN apt-get update \
|
||||
zip \
|
||||
&& pip3 install -q --upgrade --no-cache-dir pip \
|
||||
&& pip3 install -q --no-cache-dir \
|
||||
# TODO: Remove this once we switch to newer Ubuntu base image
|
||||
# lxml 6.0.0 is not compatible with our image
|
||||
'lxml<6.0.0' \
|
||||
\
|
||||
cmake==3.31.6 \
|
||||
conan==2.17.0 \
|
||||
gcovr \
|
||||
@@ -55,29 +60,33 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install gcc-12 and make ldconfig aware of the new libstdc++ location (for gcc)
|
||||
ARG GCC_MAJOR_VERSION=invalid
|
||||
|
||||
# Install custom-built gcc and make ldconfig aware of the new libstdc++ location (for gcc)
|
||||
# Note: Clang is using libc++ instead
|
||||
COPY --from=clio-gcc /gcc12.deb /
|
||||
COPY --from=clio-gcc /gcc${GCC_MAJOR_VERSION}.deb /
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
binutils \
|
||||
libc6-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& dpkg -i /gcc12.deb \
|
||||
&& rm -rf /gcc12.deb \
|
||||
&& dpkg -i /gcc${GCC_MAJOR_VERSION}.deb \
|
||||
&& rm -rf /gcc${GCC_MAJOR_VERSION}.deb \
|
||||
&& ldconfig
|
||||
|
||||
# Rewire to use gcc-12 as default compiler
|
||||
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12 100
|
||||
# Rewire to use our custom-built gcc as default compiler
|
||||
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_MAJOR_VERSION} 100
|
||||
|
||||
COPY --from=clio-tools \
|
||||
/usr/local/bin/mold \
|
||||
/usr/local/bin/ld.mold \
|
||||
/usr/local/bin/ccache \
|
||||
/usr/local/bin/doxygen \
|
||||
/usr/local/bin/ClangBuildAnalyzer \
|
||||
@@ -90,6 +99,9 @@ WORKDIR /root
|
||||
# Setup conan
|
||||
RUN conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
|
||||
|
||||
WORKDIR /root/.conan2
|
||||
COPY conan/global.conf ./global.conf
|
||||
|
||||
WORKDIR /root/.conan2/profiles
|
||||
|
||||
COPY conan/clang.profile ./clang
|
||||
|
||||
@@ -6,13 +6,14 @@ It is used in [Clio Github Actions](https://github.com/XRPLF/clio/actions) but c
|
||||
The image is based on Ubuntu 20.04 and contains:
|
||||
|
||||
- ccache 4.11.3
|
||||
- clang 16.0.6
|
||||
- Clang 19
|
||||
- ClangBuildAnalyzer 1.6.0
|
||||
- conan 2.17.0
|
||||
- doxygen 1.12
|
||||
- gcc 12.3.0
|
||||
- Conan 2.17.0
|
||||
- Doxygen 1.12
|
||||
- GCC 14.3.0
|
||||
- gh 2.74
|
||||
- git-cliff 2.9.1
|
||||
- mold 2.40.1
|
||||
- and some other useful tools
|
||||
|
||||
Conan is set up to build Clio without any additional steps.
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
[settings]
|
||||
arch=x86_64
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=16
|
||||
compiler.version=19
|
||||
os=Linux
|
||||
|
||||
[conf]
|
||||
tools.build:compiler_executables={'c': '/usr/bin/clang-16', 'cpp': '/usr/bin/clang++-16'}
|
||||
tools.build:compiler_executables={"c": "/usr/bin/clang-19", "cpp": "/usr/bin/clang++-19"}
|
||||
grpc/1.50.1:tools.build:cxxflags+=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
[settings]
|
||||
arch=x86_64
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=gcc
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libstdc++11
|
||||
compiler.version=12
|
||||
compiler.version=14
|
||||
os=Linux
|
||||
|
||||
[conf]
|
||||
tools.build:compiler_executables={'c': '/usr/bin/gcc-12', 'cpp': '/usr/bin/g++-12'}
|
||||
tools.build:compiler_executables={"c": "/usr/bin/gcc-14", "cpp": "/usr/bin/g++-14"}
|
||||
|
||||
2
docker/ci/conan/global.conf
Normal file
2
docker/ci/conan/global.conf
Normal file
@@ -0,0 +1,2 @@
|
||||
core.download:parallel={{os.cpu_count()}}
|
||||
core.upload:parallel={{os.cpu_count()}}
|
||||
@@ -1,15 +1,23 @@
|
||||
{% set compiler, sani = profile_name.split('.') %}
|
||||
|
||||
{% set sanitizer_opt_map = {'asan': 'address', 'tsan': 'thread', 'ubsan': 'undefined'} %}
|
||||
{% set sanitizer_opt_map = {"asan": "address", "tsan": "thread", "ubsan": "undefined"} %}
|
||||
{% set sanitizer = sanitizer_opt_map[sani] %}
|
||||
|
||||
{% set sanitizer_build_flags_str = "-fsanitize=" ~ sanitizer ~ " -g -O1 -fno-omit-frame-pointer" %}
|
||||
{% set sanitizer_build_flags = sanitizer_build_flags_str.split(' ') %}
|
||||
{% set sanitizer_link_flags_str = "-fsanitize=" ~ sanitizer %}
|
||||
{% set sanitizer_link_flags = sanitizer_link_flags_str.split(' ') %}
|
||||
|
||||
include({{ compiler }})
|
||||
|
||||
[options]
|
||||
boost/*:extra_b2_flags="cxxflags=\"-fsanitize={{ sanitizer }}\" linkflags=\"-fsanitize={{ sanitizer }}\""
|
||||
boost/*:extra_b2_flags="cxxflags=\"{{ sanitizer_build_flags_str }}\" linkflags=\"{{ sanitizer_link_flags_str }}\""
|
||||
boost/*:without_stacktrace=True
|
||||
|
||||
[conf]
|
||||
tools.build:cflags+=["-fsanitize={{ sanitizer }}"]
|
||||
tools.build:cxxflags+=["-fsanitize={{ sanitizer }}"]
|
||||
tools.build:exelinkflags+=["-fsanitize={{ sanitizer }}"]
|
||||
tools.build:cflags+={{ sanitizer_build_flags }}
|
||||
tools.build:cxxflags+={{ sanitizer_build_flags }}
|
||||
tools.build:exelinkflags+={{ sanitizer_link_flags }}
|
||||
tools.build:sharedlinkflags+={{ sanitizer_link_flags }}
|
||||
|
||||
tools.info.package_id:confs+=["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]
|
||||
|
||||
@@ -8,8 +8,6 @@ SHELL ["/bin/bash", "-c"]
|
||||
USER root
|
||||
WORKDIR /root
|
||||
|
||||
ARG CLANG_VERSION=16
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
wget \
|
||||
@@ -18,13 +16,17 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG CLANG_MAJOR_VERSION=invalid
|
||||
# Bump this version to force rebuild of the image
|
||||
ARG BUILD_VERSION=0
|
||||
|
||||
RUN wget --progress=dot:giga https://apt.llvm.org/llvm.sh \
|
||||
&& chmod +x llvm.sh \
|
||||
&& ./llvm.sh ${CLANG_VERSION} \
|
||||
&& ./llvm.sh ${CLANG_MAJOR_VERSION} \
|
||||
&& rm -rf llvm.sh \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
libc++-${CLANG_VERSION}-dev \
|
||||
libc++abi-${CLANG_VERSION}-dev \
|
||||
libc++-${CLANG_MAJOR_VERSION}-dev \
|
||||
libc++abi-${CLANG_MAJOR_VERSION}-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
FROM ubuntu:20.04 AS build
|
||||
ARG UBUNTU_VERSION=20.04
|
||||
|
||||
ARG GCC_MAJOR_VERSION=invalid
|
||||
|
||||
FROM ubuntu:$UBUNTU_VERSION AS build
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ARG GCC_MAJOR_VERSION
|
||||
|
||||
ARG BUILD_VERSION=0
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG UBUNTU_VERSION=20.04
|
||||
ARG GCC_VERSION=12.3.0
|
||||
ARG BUILD_VERSION=2
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
@@ -18,18 +25,23 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN wget --progress=dot:giga https://gcc.gnu.org/pub/gcc/releases/gcc-$GCC_VERSION/gcc-$GCC_VERSION.tar.gz \
|
||||
&& tar xf gcc-$GCC_VERSION.tar.gz \
|
||||
&& cd /gcc-$GCC_VERSION && ./contrib/download_prerequisites
|
||||
ARG GCC_VERSION
|
||||
|
||||
RUN mkdir /${TARGETARCH}-gcc-12
|
||||
WORKDIR /${TARGETARCH}-gcc-12
|
||||
WORKDIR /
|
||||
RUN wget --progress=dot:giga https://gcc.gnu.org/pub/gcc/releases/gcc-$GCC_VERSION/gcc-$GCC_VERSION.tar.gz \
|
||||
&& tar xf gcc-$GCC_VERSION.tar.gz
|
||||
|
||||
WORKDIR /gcc-$GCC_VERSION
|
||||
RUN ./contrib/download_prerequisites
|
||||
|
||||
RUN mkdir /gcc-build
|
||||
WORKDIR /gcc-build
|
||||
RUN /gcc-$GCC_VERSION/configure \
|
||||
--with-pkgversion="clio-build-$BUILD_VERSION https://github.com/XRPLF/clio" \
|
||||
--enable-languages=c,c++ \
|
||||
--prefix=/usr \
|
||||
--with-gcc-major-version-only \
|
||||
--program-suffix=-12 \
|
||||
--program-suffix=-${GCC_MAJOR_VERSION} \
|
||||
--enable-shared \
|
||||
--enable-linker-build-id \
|
||||
--libexecdir=/usr/lib \
|
||||
@@ -53,38 +65,55 @@ RUN /gcc-$GCC_VERSION/configure \
|
||||
--enable-cet \
|
||||
--disable-multilib \
|
||||
--without-cuda-driver \
|
||||
--enable-checking=release \
|
||||
&& make -j "$(nproc)" \
|
||||
&& make install-strip DESTDIR=/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION \
|
||||
&& mkdir -p /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/share/gdb/auto-load/usr/lib64 \
|
||||
&& mv /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/lib64/libstdc++.so.6.0.30-gdb.py /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/share/gdb/auto-load/usr/lib64/libstdc++.so.6.0.30-gdb.py
|
||||
--enable-checking=release
|
||||
|
||||
RUN make -j "$(nproc)"
|
||||
RUN make install-strip DESTDIR=/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION
|
||||
|
||||
RUN export GDB_AUTOLOAD_DIR="/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/share/gdb/auto-load/usr/lib64" \
|
||||
&& mkdir -p "$GDB_AUTOLOAD_DIR" \
|
||||
&& mv \
|
||||
/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/lib64/libstdc++.so.*-gdb.py \
|
||||
$GDB_AUTOLOAD_DIR/
|
||||
|
||||
# Generate deb
|
||||
WORKDIR /
|
||||
COPY control.m4 /
|
||||
COPY ld.so.conf /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/etc/ld.so.conf.d/1-gcc-12.conf
|
||||
COPY ld.so.conf /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/etc/ld.so.conf.d/1-gcc-${GCC_MAJOR_VERSION}.conf
|
||||
|
||||
RUN mkdir /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/DEBIAN \
|
||||
&& m4 -P -DUBUNTU_VERSION=$UBUNTU_VERSION -DVERSION=$GCC_VERSION-$BUILD_VERSION -DTARGETARCH=$TARGETARCH control.m4 > /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/DEBIAN/control \
|
||||
&& dpkg-deb --build --root-owner-group /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION /gcc12.deb
|
||||
&& m4 \
|
||||
-P \
|
||||
-DUBUNTU_VERSION=$UBUNTU_VERSION \
|
||||
-DVERSION=$GCC_VERSION-$BUILD_VERSION \
|
||||
-DTARGETARCH=$TARGETARCH \
|
||||
control.m4 > /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/DEBIAN/control \
|
||||
&& dpkg-deb \
|
||||
--build \
|
||||
--root-owner-group \
|
||||
/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION \
|
||||
/gcc${GCC_MAJOR_VERSION}.deb
|
||||
|
||||
# Create final image
|
||||
FROM ubuntu:20.04
|
||||
COPY --from=build /gcc12.deb /
|
||||
FROM ubuntu:$UBUNTU_VERSION
|
||||
|
||||
# Make gcc-12 available but also leave gcc12.deb for others to copy if needed
|
||||
ARG GCC_MAJOR_VERSION
|
||||
|
||||
COPY --from=build /gcc${GCC_MAJOR_VERSION}.deb /
|
||||
|
||||
# Install gcc-${GCC_MAJOR_VERSION}, but also leave gcc${GCC_MAJOR_VERSION}.deb for others to copy if needed
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
binutils \
|
||||
libc6-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& dpkg -i /gcc12.deb
|
||||
&& dpkg -i /gcc${GCC_MAJOR_VERSION}.deb
|
||||
|
||||
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12 100
|
||||
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_MAJOR_VERSION} 100
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# gcc compiler
|
||||
# GCC compiler
|
||||
|
||||
This image contains gcc compiler to build <https://github.com/XRPLF/clio>.
|
||||
This image contains GCC compiler to build <https://github.com/XRPLF/clio>.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
Package: gcc-12-ubuntu-UBUNTUVERSION
|
||||
Package: gcc-14-ubuntu-UBUNTUVERSION
|
||||
Version: VERSION
|
||||
Architecture: TARGETARCH
|
||||
Maintainer: Alex Kremer <akremer@ripple.com>
|
||||
Description: Gcc VERSION build for ubuntu UBUNTUVERSION
|
||||
Uploaders: Ayaz Salikhov <asalikhov@ripple.com>
|
||||
Description: GCC VERSION build for ubuntu UBUNTUVERSION
|
||||
Depends: binutils, libc6-dev
|
||||
|
||||
@@ -2,7 +2,7 @@ services:
|
||||
clio_develop:
|
||||
image: ghcr.io/xrplf/clio-ci:latest
|
||||
volumes:
|
||||
- clio_develop_conan_data:/root/.conan/data
|
||||
- clio_develop_conan_data:/root/.conan2/p
|
||||
- clio_develop_ccache:/root/.ccache
|
||||
- ../../:/root/clio
|
||||
- clio_develop_build:/root/clio/build_docker
|
||||
|
||||
@@ -1,24 +1,41 @@
|
||||
FROM ubuntu:20.04
|
||||
ARG GHCR_REPO=invalid
|
||||
ARG GCC_VERSION=invalid
|
||||
|
||||
FROM ${GHCR_REPO}/clio-gcc:${GCC_VERSION}
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
ARG BUILD_VERSION=1
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
bison \
|
||||
build-essential \
|
||||
cmake \
|
||||
flex \
|
||||
ninja-build \
|
||||
python3 \
|
||||
python3-pip \
|
||||
software-properties-common \
|
||||
wget \
|
||||
&& pip3 install -q --no-cache-dir \
|
||||
cmake==3.31.6 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
ARG MOLD_VERSION=2.40.1
|
||||
RUN wget --progress=dot:giga "https://github.com/rui314/mold/archive/refs/tags/v${MOLD_VERSION}.tar.gz" \
|
||||
&& tar xf "v${MOLD_VERSION}.tar.gz" \
|
||||
&& cd "mold-${MOLD_VERSION}" \
|
||||
&& mkdir build \
|
||||
&& cd build \
|
||||
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
|
||||
&& ninja install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ARG CCACHE_VERSION=4.11.3
|
||||
RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
|
||||
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
|
||||
@@ -26,7 +43,7 @@ RUN wget --progress=dot:giga "https://github.com/ccache/ccache/releases/download
|
||||
&& mkdir build \
|
||||
&& cd build \
|
||||
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DENABLE_TESTING=False .. \
|
||||
&& cmake --build . --target install \
|
||||
&& ninja install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ARG DOXYGEN_VERSION=1.12.0
|
||||
@@ -36,7 +53,7 @@ RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/downlo
|
||||
&& mkdir build \
|
||||
&& cd build \
|
||||
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
|
||||
&& cmake --build . --target install \
|
||||
&& ninja install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ARG CLANG_BUILD_ANALYZER_VERSION=1.6.0
|
||||
@@ -46,7 +63,7 @@ RUN wget --progress=dot:giga "https://github.com/aras-p/ClangBuildAnalyzer/archi
|
||||
&& mkdir build \
|
||||
&& cd build \
|
||||
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
|
||||
&& cmake --build . --target install \
|
||||
&& ninja install \
|
||||
&& rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ARG GIT_CLIFF_VERSION=2.9.1
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
## Minimum Requirements
|
||||
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 1.55, <2.0](https://conan.io/downloads.html)
|
||||
- [Conan 2.17.0](https://conan.io/downloads.html)
|
||||
- [CMake 3.20, <4.0](https://cmake.org/download/)
|
||||
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html): needed for code coverage generation
|
||||
- [**Optional**] [CCache](https://ccache.dev/): speeds up compilation if you are going to compile Clio often
|
||||
@@ -19,32 +19,43 @@
|
||||
|
||||
### Conan Configuration
|
||||
|
||||
Clio requires `compiler.cppstd=20` in your Conan profile (`~/.conan/profiles/default`).
|
||||
By default, Conan uses `~/.conan2` as it's home folder.
|
||||
You can change it by using `$CONAN_HOME` env variable.
|
||||
[More info about Conan home](https://docs.conan.io/2/reference/environment.html#conan-home).
|
||||
|
||||
> [!NOTE]
|
||||
> Although Clio is built using C++23, it's required to set `compiler.cppstd=20` for the time being as some of Clio's dependencies are not yet capable of building under C++23.
|
||||
> [!TIP]
|
||||
> To setup Conan automatically, you can run `.github/scripts/conan/init.sh`.
|
||||
> This will delete Conan home directory (if it exists), set up profiles and add Artifactory remote.
|
||||
|
||||
**Mac apple-clang 16 example**:
|
||||
The instruction below assumes that `$CONAN_HOME` is not set.
|
||||
|
||||
#### Profiles
|
||||
|
||||
The default profile is the file in `~/.conan2/profiles/default`.
|
||||
|
||||
Here are some examples of possible profiles:
|
||||
|
||||
**Mac apple-clang 17 example**:
|
||||
|
||||
```text
|
||||
[settings]
|
||||
arch=armv8
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=16
|
||||
compiler.version=17
|
||||
os=Macos
|
||||
|
||||
[conf]
|
||||
tools.build:cxxflags+=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
grpc/1.50.1:tools.build:cxxflags+=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
```
|
||||
|
||||
**Linux gcc-12 example**:
|
||||
|
||||
```text
|
||||
[settings]
|
||||
arch=x86_64
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=gcc
|
||||
compiler.cppstd=20
|
||||
@@ -53,7 +64,19 @@ compiler.version=12
|
||||
os=Linux
|
||||
|
||||
[conf]
|
||||
tools.build:compiler_executables={'c': '/usr/bin/gcc-12', 'cpp': '/usr/bin/g++-12'}
|
||||
tools.build:compiler_executables={"c": "/usr/bin/gcc-12", "cpp": "/usr/bin/g++-12"}
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> Although Clio is built using C++23, it's required to set `compiler.cppstd=20` in your profile for the time being as some of Clio's dependencies are not yet capable of building under C++23.
|
||||
|
||||
#### global.conf file
|
||||
|
||||
To increase the speed of downloading and uploading packages, add the following to the `~/.conan2/global.conf` file:
|
||||
|
||||
```text
|
||||
core.download:parallel={{os.cpu_count()}}
|
||||
core.upload:parallel={{os.cpu_count()}}
|
||||
```
|
||||
|
||||
#### Artifactory
|
||||
@@ -64,15 +87,21 @@ Make sure artifactory is setup with Conan.
|
||||
conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
|
||||
```
|
||||
|
||||
Now you should be able to download the prebuilt `xrpl` package on some platforms.
|
||||
Now you should be able to download the prebuilt dependencies (including `xrpl` package) on supported platforms.
|
||||
|
||||
> [!NOTE]
|
||||
> You may need to edit the `~/.conan/remotes.json` file to ensure that this newly added artifactory is listed last. Otherwise, you could see compilation errors when building the project with gcc version 13 (or newer).
|
||||
#### Conan lockfile
|
||||
|
||||
Remove old packages you may have cached.
|
||||
To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
|
||||
|
||||
```sh
|
||||
conan remove -f xrpl
|
||||
The `conan.lock` file in the repository contains a "snapshot" of the current dependencies.
|
||||
It is implicitly used when running `conan` commands, you don't need to specify it.
|
||||
|
||||
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
|
||||
|
||||
To do that, run the following command in the repository root:
|
||||
|
||||
```bash
|
||||
conan lock create . -o '&:tests=True' -o '&:benchmark=True'
|
||||
```
|
||||
|
||||
## Building Clio
|
||||
@@ -81,6 +110,7 @@ Navigate to Clio's root directory and run:
|
||||
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
# You can also specify profile explicitly by adding `--profile:all <PROFILE_NAME>`
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True'
|
||||
# You can also add -GNinja to use Ninja build system instead of Make
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
@@ -152,24 +182,24 @@ If you wish to develop against a `rippled` instance running in standalone mode t
|
||||
|
||||
Sometimes, during development, you need to build against a custom version of `libxrpl`. (For example, you may be developing compatibility for a proposed amendment that is not yet merged to the main `rippled` codebase.) To build Clio with compatibility for a custom fork or branch of `rippled`, follow these steps:
|
||||
|
||||
1. First, pull/clone the appropriate `rippled` fork and switch to the branch you want to build.
|
||||
The following example uses an in-development build with [XLS-33d Multi-Purpose Tokens](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0033d-multi-purpose-tokens):
|
||||
1. First, pull/clone the appropriate `rippled` version and switch to the branch you want to build.
|
||||
The following example uses a `2.5.0-rc1` tag of rippled in the main branch:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/shawnxie999/rippled/
|
||||
git clone https://github.com/XRPLF/rippled/
|
||||
cd rippled
|
||||
git switch mpt-1.1
|
||||
git checkout 2.5.0-rc1
|
||||
```
|
||||
|
||||
2. Export a custom package to your local Conan store using a user/channel:
|
||||
|
||||
```sh
|
||||
conan export . my/feature
|
||||
conan export . --user=my --channel=feature
|
||||
```
|
||||
|
||||
3. Patch your local Clio build to use the right package.
|
||||
|
||||
Edit `conanfile.py` (from the Clio repository root). Replace the `xrpl` requirement with the custom package version from the previous step. This must also include the current version number from your `rippled` branch. For example:
|
||||
Edit `conanfile.py` in the Clio repository root. Replace the `xrpl` requirement with the custom package version from the previous step. This must also include the current version number from your `rippled` branch. For example:
|
||||
|
||||
```py
|
||||
# ... (excerpt from conanfile.py)
|
||||
@@ -180,7 +210,7 @@ Sometimes, during development, you need to build against a custom version of `li
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1v',
|
||||
'xrpl/2.3.0-b1@my/feature', # Update this line
|
||||
'xrpl/2.5.0-rc1@my/feature', # Use your exported version here
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
@@ -192,18 +222,16 @@ Sometimes, during development, you need to build against a custom version of `li
|
||||
|
||||
## Using `clang-tidy` for static analysis
|
||||
|
||||
The minimum [clang-tidy](https://clang.llvm.org/extra/clang-tidy/) version required is 19.0.
|
||||
|
||||
Clang-tidy can be run by CMake when building the project.
|
||||
To achieve this, you just need to provide the option `-o '&:lint=True'` for the `conan install` command:
|
||||
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:lint=True'
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:lint=True' --profile:all clang
|
||||
```
|
||||
|
||||
By default CMake will try to find `clang-tidy` automatically in your system.
|
||||
To force CMake to use your desired binary, set the `CLIO_CLANG_TIDY_BIN` environment variable to the path of the `clang-tidy` binary. For example:
|
||||
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@19/bin/clang-tidy
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm/bin/clang-tidy
|
||||
```
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
Clio needs access to a `rippled` server in order to work. The following configurations are required for Clio and `rippled` to communicate:
|
||||
|
||||
1. In the Clio config file, provide the following:
|
||||
|
||||
- The IP of the `rippled` server
|
||||
|
||||
- The port on which `rippled` is accepting unencrypted WebSocket connections
|
||||
@@ -13,7 +12,6 @@ Clio needs access to a `rippled` server in order to work. The following configur
|
||||
- The port on which `rippled` is handling gRPC requests
|
||||
|
||||
2. In the `rippled` config file, you need to open:
|
||||
|
||||
- A port to accept unencrypted WebSocket connections
|
||||
|
||||
- A port to handle gRPC requests, with the IP(s) of Clio specified in the `secure_gateway` entry
|
||||
|
||||
76
pre-commit-hooks/json_in_cpp.py
Executable file
76
pre-commit-hooks/json_in_cpp.py
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def fix_json_style(cpp_content: str) -> str:
|
||||
cpp_content = cpp_content.replace('R"json(', 'R"JSON(').replace(')json"', ')JSON"')
|
||||
|
||||
pattern = r'R"JSON\((.*?)\)JSON"'
|
||||
|
||||
def replace_json(match):
|
||||
raw_json = match.group(1)
|
||||
|
||||
raw_json = (
|
||||
raw_json.replace(" :", ":")
|
||||
.replace(" ,", ",")
|
||||
.replace(" null", "null")
|
||||
.replace(':"', ': "')
|
||||
.replace(',"', ', "')
|
||||
.replace('":{', '": {')
|
||||
.replace('":[', '": [')
|
||||
.replace('":true', '": true')
|
||||
.replace('":false', '": false')
|
||||
.replace('":null', '": null')
|
||||
)
|
||||
for digit in range(10):
|
||||
raw_json = raw_json.replace(f'":{digit}', f'": {digit}')
|
||||
return f'R"JSON({raw_json})JSON"'
|
||||
|
||||
return re.sub(pattern, replace_json, cpp_content, flags=re.DOTALL)
|
||||
|
||||
|
||||
def process_file(file_path: Path, dry_run: bool) -> bool:
|
||||
content = file_path.read_text(encoding="utf-8")
|
||||
new_content = fix_json_style(content)
|
||||
|
||||
if new_content != content:
|
||||
print(f"Processing file: {file_path}")
|
||||
if dry_run:
|
||||
print("Dry run: changes won't be written to the file.")
|
||||
else:
|
||||
print("Writing changes to file.")
|
||||
file_path.write_text(new_content, encoding="utf-8")
|
||||
return new_content == content
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Fix JSON style in C++ files",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Don't modify files, just print what would be changed",
|
||||
)
|
||||
parser.add_argument(
|
||||
"files",
|
||||
nargs="*",
|
||||
help="Specific files to process",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
success = True
|
||||
for file in args.files:
|
||||
success = success and process_file(Path(file), dry_run=args.dry_run)
|
||||
if not success:
|
||||
print("Errors occurred while processing files.")
|
||||
exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -10,4 +10,5 @@ target_link_libraries(
|
||||
clio_web
|
||||
clio_rpc
|
||||
clio_migration
|
||||
PRIVATE Boost::program_options
|
||||
)
|
||||
|
||||
@@ -60,17 +60,17 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
po::store(po::command_line_parser(argc, argv).options(description).positional(positional).run(), parsed);
|
||||
po::notify(parsed);
|
||||
|
||||
if (parsed.count("help") != 0u) {
|
||||
if (parsed.contains("help")) {
|
||||
std::cout << "Clio server " << util::build::getClioFullVersionString() << "\n\n" << description;
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
}
|
||||
|
||||
if (parsed.count("version") != 0u) {
|
||||
if (parsed.contains("version")) {
|
||||
std::cout << util::build::getClioFullVersionString() << '\n';
|
||||
return Action{Action::Exit{EXIT_SUCCESS}};
|
||||
}
|
||||
|
||||
if (parsed.count("config-description") != 0u) {
|
||||
if (parsed.contains("config-description")) {
|
||||
std::filesystem::path const filePath = parsed["config-description"].as<std::string>();
|
||||
|
||||
auto const res = util::config::ClioConfigDescription::generateConfigDescriptionToFile(filePath);
|
||||
@@ -83,18 +83,17 @@ CliArgs::parse(int argc, char const* argv[])
|
||||
|
||||
auto configPath = parsed["conf"].as<std::string>();
|
||||
|
||||
if (parsed.count("migrate") != 0u) {
|
||||
if (parsed.contains("migrate")) {
|
||||
auto const opt = parsed["migrate"].as<std::string>();
|
||||
if (opt == "status")
|
||||
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::status()}};
|
||||
return Action{Action::Migrate{.configPath = std::move(configPath), .subCmd = MigrateSubCmd::migration(opt)}};
|
||||
}
|
||||
|
||||
if (parsed.count("verify") != 0u)
|
||||
if (parsed.contains("verify"))
|
||||
return Action{Action::VerifyConfig{.configPath = std::move(configPath)}};
|
||||
|
||||
return Action{Action::Run{.configPath = std::move(configPath), .useNgWebServer = parsed.count("ng-web-server") != 0}
|
||||
};
|
||||
return Action{Action::Run{.configPath = std::move(configPath), .useNgWebServer = parsed.contains("ng-web-server")}};
|
||||
}
|
||||
|
||||
} // namespace app
|
||||
|
||||
@@ -41,7 +41,6 @@
|
||||
#include "util/build/Build.hpp"
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
#include "web/AdminVerificationStrategy.hpp"
|
||||
#include "web/RPCServerHandler.hpp"
|
||||
#include "web/Server.hpp"
|
||||
@@ -91,7 +90,6 @@ ClioApplication::ClioApplication(util::config::ClioConfigDefinition const& confi
|
||||
: config_(config), signalsHandler_{config_}
|
||||
{
|
||||
LOG(util::LogService::info()) << "Clio version: " << util::build::getClioFullVersionString();
|
||||
PrometheusService::init(config);
|
||||
signalsHandler_.subscribeToStop([this]() { appStopper_.stop(); });
|
||||
}
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ HealthCheckHandler::operator()(
|
||||
boost::asio::yield_context
|
||||
)
|
||||
{
|
||||
static auto constexpr kHEALTH_CHECK_HTML = R"html(
|
||||
static constexpr auto kHEALTH_CHECK_HTML = R"html(
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Test page for Clio</title></head>
|
||||
|
||||
@@ -78,17 +78,20 @@ WritingAmendmentKey::WritingAmendmentKey(std::string amendmentName) : AmendmentK
|
||||
|
||||
} // namespace impl
|
||||
|
||||
AmendmentKey::operator std::string const&() const
|
||||
AmendmentKey::
|
||||
operator std::string const&() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
AmendmentKey::operator std::string_view() const
|
||||
AmendmentKey::
|
||||
operator std::string_view() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
AmendmentKey::operator ripple::uint256() const
|
||||
AmendmentKey::
|
||||
operator ripple::uint256() const
|
||||
{
|
||||
return Amendment::getAmendmentId(name);
|
||||
}
|
||||
|
||||
@@ -49,35 +49,45 @@ durationInMillisecondsSince(std::chrono::steady_clock::time_point const startTim
|
||||
using namespace util::prometheus;
|
||||
|
||||
BackendCounters::BackendCounters()
|
||||
: tooBusyCounter_(PrometheusService::counterInt(
|
||||
"backend_too_busy_total_number",
|
||||
Labels(),
|
||||
"The total number of times the backend was too busy to process a request"
|
||||
))
|
||||
, writeSyncCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync"}}),
|
||||
"The total number of times the backend had to write synchronously"
|
||||
))
|
||||
, writeSyncRetryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync_retry"}}),
|
||||
"The total number of times the backend had to retry a synchronous write"
|
||||
))
|
||||
: tooBusyCounter_(
|
||||
PrometheusService::counterInt(
|
||||
"backend_too_busy_total_number",
|
||||
Labels(),
|
||||
"The total number of times the backend was too busy to process a request"
|
||||
)
|
||||
)
|
||||
, writeSyncCounter_(
|
||||
PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync"}}),
|
||||
"The total number of times the backend had to write synchronously"
|
||||
)
|
||||
)
|
||||
, writeSyncRetryCounter_(
|
||||
PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync_retry"}}),
|
||||
"The total number of times the backend had to retry a synchronous write"
|
||||
)
|
||||
)
|
||||
, asyncWriteCounters_{"write_async"}
|
||||
, asyncReadCounters_{"read_async"}
|
||||
, readDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "read"}}),
|
||||
kHISTOGRAM_BUCKETS,
|
||||
"The duration of backend read operations including retries"
|
||||
))
|
||||
, writeDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "write"}}),
|
||||
kHISTOGRAM_BUCKETS,
|
||||
"The duration of backend write operations including retries"
|
||||
))
|
||||
, readDurationHistogram_(
|
||||
PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "read"}}),
|
||||
kHISTOGRAM_BUCKETS,
|
||||
"The duration of backend read operations including retries"
|
||||
)
|
||||
)
|
||||
, writeDurationHistogram_(
|
||||
PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "write"}}),
|
||||
kHISTOGRAM_BUCKETS,
|
||||
"The duration of backend write operations including retries"
|
||||
)
|
||||
)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -170,26 +180,34 @@ BackendCounters::report() const
|
||||
|
||||
BackendCounters::AsyncOperationCounters::AsyncOperationCounters(std::string name)
|
||||
: name_(std::move(name))
|
||||
, pendingCounter_(PrometheusService::gaugeInt(
|
||||
"backend_operations_current_number",
|
||||
Labels({{"operation", name_}, {"status", "pending"}}),
|
||||
"The current number of pending " + name_ + " operations"
|
||||
))
|
||||
, completedCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "completed"}}),
|
||||
"The total number of completed " + name_ + " operations"
|
||||
))
|
||||
, retryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "retry"}}),
|
||||
"The total number of retried " + name_ + " operations"
|
||||
))
|
||||
, errorCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "error"}}),
|
||||
"The total number of errored " + name_ + " operations"
|
||||
))
|
||||
, pendingCounter_(
|
||||
PrometheusService::gaugeInt(
|
||||
"backend_operations_current_number",
|
||||
Labels({{"operation", name_}, {"status", "pending"}}),
|
||||
"The current number of pending " + name_ + " operations"
|
||||
)
|
||||
)
|
||||
, completedCounter_(
|
||||
PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "completed"}}),
|
||||
"The total number of completed " + name_ + " operations"
|
||||
)
|
||||
)
|
||||
, retryCounter_(
|
||||
PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "retry"}}),
|
||||
"The total number of retried " + name_ + " operations"
|
||||
)
|
||||
)
|
||||
, errorCounter_(
|
||||
PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "error"}}),
|
||||
"The total number of errored " + name_ + " operations"
|
||||
)
|
||||
)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -234,8 +234,12 @@ public:
|
||||
* @return A vector of ripple::uint256 representing the account roots
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
fetchAccountRoots(
|
||||
std::uint32_t number,
|
||||
std::uint32_t pageSize,
|
||||
std::uint32_t seq,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences that are stored in the DB.
|
||||
@@ -459,8 +463,11 @@ public:
|
||||
* @return The sequence in unit32_t on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<std::uint32_t>
|
||||
doFetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield)
|
||||
const = 0;
|
||||
doFetchLedgerObjectSeq(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching ledger objects.
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
#include <boost/uuid/string_generator.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
@@ -361,8 +361,10 @@ public:
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
|
||||
@@ -392,8 +394,11 @@ public:
|
||||
}
|
||||
|
||||
std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
|
||||
if (not res)
|
||||
@@ -554,10 +559,9 @@ public:
|
||||
selectNFTStatements.reserve(nftIDs.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(nftIDs),
|
||||
std::cend(nftIDs),
|
||||
std::back_inserter(selectNFTStatements),
|
||||
[&](auto const& nftID) { return schema_->selectNFT.bind(nftID, ledgerSequence); }
|
||||
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
|
||||
return schema_->selectNFT.bind(nftID, ledgerSequence);
|
||||
}
|
||||
);
|
||||
|
||||
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
|
||||
@@ -566,10 +570,9 @@ public:
|
||||
selectNFTURIStatements.reserve(nftIDs.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(nftIDs),
|
||||
std::cend(nftIDs),
|
||||
std::back_inserter(selectNFTURIStatements),
|
||||
[&](auto const& nftID) { return schema_->selectNFTURI.bind(nftID, ledgerSequence); }
|
||||
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
|
||||
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
|
||||
}
|
||||
);
|
||||
|
||||
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
|
||||
@@ -626,8 +629,11 @@ public:
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
@@ -645,8 +651,11 @@ public:
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
doFetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
doFetchLedgerObjectSeq(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
@@ -680,8 +689,11 @@ public:
|
||||
}
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
|
||||
if (auto const result = res->template get<ripple::uint256>(); result) {
|
||||
@@ -714,10 +726,9 @@ public:
|
||||
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
|
||||
// TODO: seems like a job for "hash IN (list of hashes)" instead?
|
||||
std::transform(
|
||||
std::cbegin(hashes),
|
||||
std::cend(hashes),
|
||||
std::back_inserter(statements),
|
||||
[this](auto const& hash) { return schema_->selectTransaction.bind(hash); }
|
||||
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
|
||||
return schema_->selectTransaction.bind(hash);
|
||||
}
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
@@ -761,18 +772,14 @@ public:
|
||||
|
||||
// TODO: seems like a job for "key IN (list of keys)" instead?
|
||||
std::transform(
|
||||
std::cbegin(keys),
|
||||
std::cend(keys),
|
||||
std::back_inserter(statements),
|
||||
[this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }
|
||||
std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
|
||||
return schema_->selectObject.bind(key, sequence);
|
||||
}
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
std::cbegin(entries),
|
||||
std::cend(entries),
|
||||
std::back_inserter(results),
|
||||
[](auto const& res) -> Blob {
|
||||
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
|
||||
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
|
||||
return *maybeValue;
|
||||
|
||||
@@ -785,8 +792,12 @@ public:
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield)
|
||||
const override
|
||||
fetchAccountRoots(
|
||||
std::uint32_t number,
|
||||
std::uint32_t pageSize,
|
||||
std::uint32_t seq,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
std::vector<ripple::uint256> liveAccounts;
|
||||
std::optional<ripple::AccountID> lastItem;
|
||||
|
||||
@@ -198,39 +198,6 @@ struct MPTHolderData {
|
||||
ripple::AccountID holder;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied object is an offer.
|
||||
*
|
||||
* @param object The object to check
|
||||
* @return true if the object is an offer; false otherwise
|
||||
*/
|
||||
template <typename T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
{
|
||||
static constexpr short kOFFER_OFFSET = 0x006f;
|
||||
static constexpr short kSHIFT = 8;
|
||||
|
||||
short offerBytes = (object[1] << kSHIFT) | object[2];
|
||||
return offerBytes == kOFFER_OFFSET;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied hex represents an offer object.
|
||||
*
|
||||
* @param object The object to check
|
||||
* @return true if the object is an offer; false otherwise
|
||||
*/
|
||||
template <typename T>
|
||||
inline bool
|
||||
isOfferHex(T const& object)
|
||||
{
|
||||
auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4);
|
||||
if (blob)
|
||||
return isOffer(*blob);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check whether the supplied object is a dir node.
|
||||
*
|
||||
@@ -241,6 +208,10 @@ template <typename T>
|
||||
inline bool
|
||||
isDirNode(T const& object)
|
||||
{
|
||||
static constexpr auto kMIN_SIZE_REQUIRED = 3;
|
||||
if (std::size(object) < kMIN_SIZE_REQUIRED)
|
||||
return false;
|
||||
|
||||
static constexpr short kDIR_NODE_SPACE_KEY = 0x0064;
|
||||
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == kDIR_NODE_SPACE_KEY;
|
||||
@@ -264,23 +235,6 @@ isBookDir(T const& key, R const& object)
|
||||
return !sle[~ripple::sfOwner].has_value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the book out of an offer object.
|
||||
*
|
||||
* @param offer The offer to get the book for
|
||||
* @return Book as ripple::uint256
|
||||
*/
|
||||
template <typename T>
|
||||
inline ripple::uint256
|
||||
getBook(T const& offer)
|
||||
{
|
||||
ripple::SerialIter it{offer.data(), offer.size()};
|
||||
ripple::SLE const sle{it, {}};
|
||||
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
|
||||
|
||||
return book;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the book base.
|
||||
*
|
||||
|
||||
@@ -6,7 +6,7 @@ To support additional database types, you can create new classes that implement
|
||||
|
||||
## Data Model
|
||||
|
||||
The data model used by Clio to read and write ledger data is different from what `rippled` uses. `rippled` uses a novel data structure named [_SHAMap_](https://github.com/ripple/rippled/blob/master/src/ripple/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record.
|
||||
The data model used by Clio to read and write ledger data is different from what `rippled` uses. `rippled` uses a novel data structure named [_SHAMap_](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/shamap/README.md), which is a combination of a Merkle Tree and a Radix Trie. In a SHAMap, ledger objects are stored in the root vertices of the tree. Thus, looking up a record located at the leaf node of the SHAMap executes a tree search, where the path from the root node to the leaf node is the key of the record.
|
||||
|
||||
`rippled` nodes can also generate a proof-tree by forming a subtree with all the path nodes and their neighbors, which can then be used to prove the existence of the leaf node data to other `rippled` nodes. In short, the main purpose of the SHAMap data structure is to facilitate the fast validation of data integrity between different decentralized `rippled` nodes.
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ public:
|
||||
connect() const;
|
||||
|
||||
/**
|
||||
* @brief Connect to the the specified keyspace asynchronously.
|
||||
* @brief Connect to the specified keyspace asynchronously.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return A future
|
||||
@@ -137,7 +137,7 @@ public:
|
||||
disconnect() const;
|
||||
|
||||
/**
|
||||
* @brief Reconnect to the the specified keyspace asynchronously.
|
||||
* @brief Reconnect to the specified keyspace asynchronously.
|
||||
*
|
||||
* @param keyspace The keyspace to use
|
||||
* @return A future
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -25,7 +25,7 @@
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
@@ -45,7 +45,8 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
|
||||
|
||||
cass_cluster_set_token_aware_routing(*this, cass_true);
|
||||
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc))
|
||||
throw std::runtime_error(
|
||||
fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc))
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -45,11 +45,13 @@ public:
|
||||
* @brief Create a new retry policy instance with the io_context provided
|
||||
*/
|
||||
ExponentialBackoffRetryPolicy(boost::asio::io_context& ioc)
|
||||
: retry_(util::makeRetryExponentialBackoff(
|
||||
std::chrono::milliseconds(1),
|
||||
std::chrono::seconds(1),
|
||||
boost::asio::make_strand(ioc)
|
||||
))
|
||||
: retry_(
|
||||
util::makeRetryExponentialBackoff(
|
||||
std::chrono::milliseconds(1),
|
||||
std::chrono::seconds(1),
|
||||
boost::asio::make_strand(ioc)
|
||||
)
|
||||
)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <boost/uuid/uuid_io.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/STAccount.h>
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/CacheLoader.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
@@ -38,6 +37,7 @@
|
||||
#include "etlng/LoadBalancer.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/impl/LedgerPublisher.hpp"
|
||||
#include "etlng/impl/MonitorProvider.hpp"
|
||||
#include "etlng/impl/TaskManagerProvider.hpp"
|
||||
#include "etlng/impl/ext/Cache.hpp"
|
||||
#include "etlng/impl/ext/Core.hpp"
|
||||
@@ -86,6 +86,7 @@ ETLService::makeETLService(
|
||||
);
|
||||
|
||||
auto state = std::make_shared<etl::SystemState>();
|
||||
state->isStrictReadonly = config.get<bool>("read_only");
|
||||
|
||||
auto fetcher = std::make_shared<etl::impl::LedgerFetcher>(backend, balancer);
|
||||
auto extractor = std::make_shared<etlng::impl::Extractor>(fetcher);
|
||||
@@ -93,6 +94,9 @@ ETLService::makeETLService(
|
||||
auto cacheLoader = std::make_shared<etl::CacheLoader<>>(config, backend, backend->cache());
|
||||
auto cacheUpdater = std::make_shared<etlng::impl::CacheUpdater>(backend->cache());
|
||||
auto amendmentBlockHandler = std::make_shared<etlng::impl::AmendmentBlockHandler>(ctx, *state);
|
||||
auto monitorProvider = std::make_shared<etlng::impl::MonitorProvider>();
|
||||
|
||||
backend->setCorruptionDetector(CorruptionDetector{*state, backend->cache()});
|
||||
|
||||
auto loader = std::make_shared<etlng::impl::Loader>(
|
||||
backend,
|
||||
@@ -104,7 +108,8 @@ ETLService::makeETLService(
|
||||
etlng::impl::NFTExt{backend},
|
||||
etlng::impl::MPTExt{backend}
|
||||
),
|
||||
amendmentBlockHandler
|
||||
amendmentBlockHandler,
|
||||
state
|
||||
);
|
||||
|
||||
auto taskManagerProvider = std::make_shared<etlng::impl::TaskManagerProvider>(*ledgers, extractor, loader);
|
||||
@@ -122,6 +127,7 @@ ETLService::makeETLService(
|
||||
loader, // loader itself
|
||||
loader, // initial load observer
|
||||
taskManagerProvider,
|
||||
monitorProvider,
|
||||
state
|
||||
);
|
||||
} else {
|
||||
@@ -165,9 +171,11 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors)
|
||||
auto pipe = DataPipeType{numExtractors, startSequence};
|
||||
|
||||
for (auto i = 0u; i < numExtractors; ++i) {
|
||||
extractors.push_back(std::make_unique<ExtractorType>(
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_
|
||||
));
|
||||
extractors.push_back(
|
||||
std::make_unique<ExtractorType>(
|
||||
pipe, networkValidatedLedgers_, ledgerFetcher_, startSequence + i, finishSequence_, state_
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
auto transformer =
|
||||
@@ -346,7 +354,7 @@ ETLService::doWork()
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("ETLService worker");
|
||||
|
||||
if (state_.isReadOnly) {
|
||||
if (state_.isStrictReadonly) {
|
||||
monitorReadOnly();
|
||||
} else {
|
||||
monitor();
|
||||
@@ -373,7 +381,7 @@ ETLService::ETLService(
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
state_.isReadOnly = config.get<bool>("read_only");
|
||||
state_.isStrictReadonly = config.get<bool>("read_only");
|
||||
extractorThreads_ = config.get<uint32_t>("extractor_threads");
|
||||
|
||||
// This should probably be done in the backend factory but we don't have state available until here
|
||||
|
||||
@@ -239,7 +239,7 @@ public:
|
||||
|
||||
result["etl_sources"] = loadBalancer_->toJson();
|
||||
result["is_writer"] = static_cast<int>(state_.isWriting);
|
||||
result["read_only"] = static_cast<int>(state_.isReadOnly);
|
||||
result["read_only"] = static_cast<int>(state_.isStrictReadonly);
|
||||
auto last = ledgerPublisher_.getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(ledgerPublisher_.lastPublishAgeSeconds());
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
@@ -57,7 +57,6 @@
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
using namespace util::config;
|
||||
@@ -184,11 +183,14 @@ LoadBalancer::LoadBalancer(
|
||||
LOG(log_.warn()) << "Failed to fetch ETL state from source = " << source->toString()
|
||||
<< " Please check the configuration and network";
|
||||
} else if (etlState_ && etlState_->networkID != stateOpt->networkID) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"ETL sources must be on the same network. Source network id = {} does not match others network id = {}",
|
||||
stateOpt->networkID,
|
||||
etlState_->networkID
|
||||
));
|
||||
checkOnETLFailure(
|
||||
fmt::format(
|
||||
"ETL sources must be on the same network. Source network id = {} does not match others network id "
|
||||
"= {}",
|
||||
stateOpt->networkID,
|
||||
etlState_->networkID
|
||||
)
|
||||
);
|
||||
} else {
|
||||
etlState_ = stateOpt;
|
||||
}
|
||||
@@ -275,42 +277,46 @@ LoadBalancer::forwardToRippled(
|
||||
return std::unexpected{rpc::ClioError::RpcCommandIsMissing};
|
||||
|
||||
auto const cmd = boost::json::value_to<std::string>(request.at("command"));
|
||||
|
||||
if (forwardingCache_ and forwardingCache_->shouldCache(cmd)) {
|
||||
bool servedFromCache = true;
|
||||
auto updater =
|
||||
[this, &request, &clientIp, &servedFromCache, isAdmin](boost::asio::yield_context yield
|
||||
) -> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
|
||||
servedFromCache = false;
|
||||
auto result = forwardToRippledImpl(request, clientIp, isAdmin, yield);
|
||||
if (result.has_value()) {
|
||||
return util::ResponseExpirationCache::EntryData{
|
||||
.lastUpdated = std::chrono::steady_clock::now(), .response = std::move(result).value()
|
||||
};
|
||||
}
|
||||
return std::unexpected{
|
||||
util::ResponseExpirationCache::Error{.status = rpc::Status{result.error()}, .warnings = {}}
|
||||
};
|
||||
};
|
||||
|
||||
auto result = forwardingCache_->getOrUpdate(
|
||||
yield,
|
||||
cmd,
|
||||
std::move(updater),
|
||||
[](util::ResponseExpirationCache::EntryData const& entry) { return not entry.response.contains("error"); }
|
||||
);
|
||||
if (servedFromCache) {
|
||||
++forwardingCounters_.cacheHit.get();
|
||||
if (forwardingCache_) {
|
||||
if (auto cachedResponse = forwardingCache_->get(cmd); cachedResponse) {
|
||||
forwardingCounters_.cacheHit.get() += 1;
|
||||
return std::move(cachedResponse).value();
|
||||
}
|
||||
if (result.has_value()) {
|
||||
return std::move(result).value();
|
||||
}
|
||||
forwardingCounters_.cacheMiss.get() += 1;
|
||||
|
||||
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
|
||||
std::size_t sourceIdx = randomGenerator_->uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0u;
|
||||
|
||||
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
|
||||
|
||||
std::optional<boost::json::object> response;
|
||||
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
|
||||
while (numAttempts < sources_.size()) {
|
||||
auto [res, duration] =
|
||||
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
|
||||
if (res) {
|
||||
forwardingCounters_.successDuration.get() += duration;
|
||||
response = std::move(res).value();
|
||||
break;
|
||||
}
|
||||
auto const combinedError = result.error().status.code;
|
||||
ASSERT(std::holds_alternative<rpc::ClioError>(combinedError), "There could be only ClioError here");
|
||||
return std::unexpected{std::get<rpc::ClioError>(combinedError)};
|
||||
forwardingCounters_.failDuration.get() += duration;
|
||||
++forwardingCounters_.retries.get();
|
||||
error = std::max(error, res.error()); // Choose the best result between all sources
|
||||
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
++numAttempts;
|
||||
}
|
||||
|
||||
return forwardToRippledImpl(request, clientIp, isAdmin, yield);
|
||||
if (response) {
|
||||
if (forwardingCache_ and not response->contains("error"))
|
||||
forwardingCache_->put(cmd, *response);
|
||||
return std::move(response).value();
|
||||
}
|
||||
|
||||
return std::unexpected{error};
|
||||
}
|
||||
|
||||
boost::json::value
|
||||
@@ -401,47 +407,4 @@ LoadBalancer::chooseForwardingSource()
|
||||
}
|
||||
}
|
||||
|
||||
std::expected<boost::json::object, rpc::CombinedError>
|
||||
LoadBalancer::forwardToRippledImpl(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
bool const isAdmin,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
++forwardingCounters_.cacheMiss.get();
|
||||
|
||||
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
|
||||
std::size_t sourceIdx = randomGenerator_->uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0u;
|
||||
|
||||
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
|
||||
|
||||
std::optional<boost::json::object> response;
|
||||
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
|
||||
while (numAttempts < sources_.size()) {
|
||||
auto [res, duration] =
|
||||
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
|
||||
|
||||
if (res) {
|
||||
forwardingCounters_.successDuration.get() += duration;
|
||||
response = std::move(res).value();
|
||||
break;
|
||||
}
|
||||
forwardingCounters_.failDuration.get() += duration;
|
||||
++forwardingCounters_.retries.get();
|
||||
error = std::max(error, res.error()); // Choose the best result between all sources
|
||||
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
++numAttempts;
|
||||
}
|
||||
|
||||
if (response.has_value()) {
|
||||
return std::move(response).value();
|
||||
}
|
||||
|
||||
return std::unexpected{error};
|
||||
}
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -49,6 +49,7 @@
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
@@ -172,19 +173,21 @@ public:
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
*/
|
||||
std::vector<std::string>
|
||||
loadInitialLedger(uint32_t sequence, std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2})
|
||||
override;
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2}
|
||||
) override;
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded or the download is cancelled.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param observer The observer to notify of progress
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
* @return A std::expected with ledger edge keys on success, or InitialLedgerLoadError on failure
|
||||
*/
|
||||
std::vector<std::string>
|
||||
etlng::InitialLedgerLoadResult
|
||||
loadInitialLedger(
|
||||
[[maybe_unused]] uint32_t sequence,
|
||||
[[maybe_unused]] etlng::InitialLoadObserverInterface& observer,
|
||||
@@ -279,14 +282,6 @@ private:
|
||||
*/
|
||||
void
|
||||
chooseForwardingSource();
|
||||
|
||||
std::expected<boost::json::object, rpc::CombinedError>
|
||||
forwardToRippledImpl(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
bool isAdmin,
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
};
|
||||
|
||||
} // namespace etl
|
||||
|
||||
@@ -18,8 +18,9 @@
|
||||
//==============================================================================
|
||||
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
@@ -138,7 +139,8 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
// There should always be a difference so the returned finalIDs
|
||||
// iterator should never be end(). But better safe than sorry.
|
||||
if (finalIDs.size() != prevIDs.size() + 1 || diff.first == finalIDs.end() || !owner) {
|
||||
throw std::runtime_error(fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID()))
|
||||
throw std::runtime_error(
|
||||
fmt::format(" - unexpected NFTokenMint data in tx {}", strHex(sttx.getTransactionID()))
|
||||
);
|
||||
}
|
||||
|
||||
@@ -358,14 +360,18 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx)
|
||||
std::vector<NFTsData>
|
||||
getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob)
|
||||
{
|
||||
std::vector<NFTsData> nfts;
|
||||
ripple::STLedgerEntry const sle =
|
||||
// https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0020-non-fungible-tokens#tokenpage-id-format
|
||||
ASSERT(key.size() == ripple::uint256::size(), "The size of the key (token) is expected to fit uint256 exactly");
|
||||
|
||||
auto const sle =
|
||||
ripple::STLedgerEntry(ripple::SerialIter{blob.data(), blob.size()}, ripple::uint256::fromVoid(key.data()));
|
||||
|
||||
if (sle.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltNFTOKEN_PAGE)
|
||||
return nfts;
|
||||
return {};
|
||||
|
||||
auto const owner = ripple::AccountID::fromVoid(key.data());
|
||||
std::vector<NFTsData> nfts;
|
||||
|
||||
for (ripple::STObject const& node : sle.getFieldArray(ripple::sfNFTokens))
|
||||
nfts.emplace_back(node.getFieldH256(ripple::sfNFTokenID), seq, owner, node.getFieldVL(ripple::sfURI));
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ struct SystemState {
|
||||
* In strict read-only mode, the process will never attempt to become the ETL writer, and will only publish ledgers
|
||||
* as they are written to the database.
|
||||
*/
|
||||
util::prometheus::Bool isReadOnly = PrometheusService::boolMetric(
|
||||
util::prometheus::Bool isStrictReadonly = PrometheusService::boolMetric(
|
||||
"read_only",
|
||||
util::prometheus::Labels{},
|
||||
"Whether the process is in strict read-only mode"
|
||||
|
||||
@@ -94,8 +94,8 @@ private:
|
||||
double totalTime = 0.0;
|
||||
auto currentSequence = startSequence_;
|
||||
|
||||
while (!shouldFinish(currentSequence) && networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence)
|
||||
) {
|
||||
while (!shouldFinish(currentSequence) &&
|
||||
networkValidatedLedgers_->waitUntilValidatedByNetwork(currentSequence)) {
|
||||
auto [fetchResponse, time] = ::util::timed<std::chrono::duration<double>>([this, currentSequence]() {
|
||||
return ledgerFetcher_.get().fetchDataAndDiff(currentSequence);
|
||||
});
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
#include <boost/json/serialize.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <exception>
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/security/credentials.h>
|
||||
#include <grpcpp/support/channel_arguments.h>
|
||||
|
||||
@@ -209,47 +209,49 @@ public:
|
||||
size_t numWrites = 0;
|
||||
backend_->cache().setFull();
|
||||
|
||||
auto seconds = ::util::timed<std::chrono::seconds>([this, keys = std::move(edgeKeys), sequence, &numWrites](
|
||||
) mutable {
|
||||
for (auto& key : keys) {
|
||||
LOG(log_.debug()) << "Writing edge key = " << ripple::strHex(key);
|
||||
auto succ = backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence);
|
||||
if (succ)
|
||||
backend_->writeSuccessor(std::move(key), sequence, uint256ToString(succ->key));
|
||||
}
|
||||
|
||||
ripple::uint256 prev = data::kFIRST_KEY;
|
||||
while (auto cur = backend_->cache().getSuccessor(prev, sequence)) {
|
||||
ASSERT(cur.has_value(), "Successor for key {} must exist", ripple::strHex(prev));
|
||||
if (prev == data::kFIRST_KEY)
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key));
|
||||
|
||||
if (isBookDir(cur->key, cur->blob)) {
|
||||
auto base = getBookBase(cur->key);
|
||||
// make sure the base is not an actual object
|
||||
if (!backend_->cache().get(base, sequence)) {
|
||||
auto succ = backend_->cache().getSuccessor(base, sequence);
|
||||
ASSERT(succ.has_value(), "Book base {} must have a successor", ripple::strHex(base));
|
||||
if (succ->key == cur->key) {
|
||||
LOG(log_.debug()) << "Writing book successor = " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(cur->key);
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(base), sequence, uint256ToString(cur->key));
|
||||
}
|
||||
}
|
||||
|
||||
++numWrites;
|
||||
auto seconds =
|
||||
::util::timed<std::chrono::seconds>([this, keys = std::move(edgeKeys), sequence, &numWrites]() mutable {
|
||||
for (auto& key : keys) {
|
||||
LOG(log_.debug()) << "Writing edge key = " << ripple::strHex(key);
|
||||
auto succ = backend_->cache().getSuccessor(*ripple::uint256::fromVoidChecked(key), sequence);
|
||||
if (succ)
|
||||
backend_->writeSuccessor(std::move(key), sequence, uint256ToString(succ->key));
|
||||
}
|
||||
|
||||
prev = cur->key;
|
||||
static constexpr std::size_t kLOG_INTERVAL = 100000;
|
||||
if (numWrites % kLOG_INTERVAL == 0 && numWrites != 0)
|
||||
LOG(log_.info()) << "Wrote " << numWrites << " book successors";
|
||||
}
|
||||
ripple::uint256 prev = data::kFIRST_KEY;
|
||||
while (auto cur = backend_->cache().getSuccessor(prev, sequence)) {
|
||||
ASSERT(cur.has_value(), "Successor for key {} must exist", ripple::strHex(prev));
|
||||
if (prev == data::kFIRST_KEY)
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key));
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(data::kLAST_KEY));
|
||||
++numWrites;
|
||||
});
|
||||
if (isBookDir(cur->key, cur->blob)) {
|
||||
auto base = getBookBase(cur->key);
|
||||
// make sure the base is not an actual object
|
||||
if (!backend_->cache().get(base, sequence)) {
|
||||
auto succ = backend_->cache().getSuccessor(base, sequence);
|
||||
ASSERT(succ.has_value(), "Book base {} must have a successor", ripple::strHex(base));
|
||||
if (succ->key == cur->key) {
|
||||
LOG(log_.debug()) << "Writing book successor = " << ripple::strHex(base) << " - "
|
||||
<< ripple::strHex(cur->key);
|
||||
|
||||
backend_->writeSuccessor(
|
||||
uint256ToString(base), sequence, uint256ToString(cur->key)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
++numWrites;
|
||||
}
|
||||
|
||||
prev = cur->key;
|
||||
static constexpr std::size_t kLOG_STRIDE = 100000;
|
||||
if (numWrites % kLOG_STRIDE == 0 && numWrites != 0)
|
||||
LOG(log_.info()) << "Wrote " << numWrites << " book successors";
|
||||
}
|
||||
|
||||
backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(data::kLAST_KEY));
|
||||
++numWrites;
|
||||
});
|
||||
|
||||
LOG(log_.info()) << "Looping through cache and submitting all writes took " << seconds
|
||||
<< " seconds. numWrites = " << std::to_string(numWrites);
|
||||
|
||||
@@ -249,8 +249,9 @@ public:
|
||||
std::chrono::time_point<std::chrono::system_clock>
|
||||
getLastPublish() const override
|
||||
{
|
||||
return std::chrono::time_point<std::chrono::system_clock>{std::chrono::seconds{lastPublishSeconds_.get().value()
|
||||
}};
|
||||
return std::chrono::time_point<std::chrono::system_clock>{
|
||||
std::chrono::seconds{lastPublishSeconds_.get().value()}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
#include <boost/json/parse.hpp>
|
||||
#include <boost/json/serialize.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <algorithm>
|
||||
@@ -79,11 +79,13 @@ SubscriptionSource::SubscriptionSource(
|
||||
, onConnect_(std::move(onConnect))
|
||||
, onDisconnect_(std::move(onDisconnect))
|
||||
, onLedgerClosed_(std::move(onLedgerClosed))
|
||||
, lastMessageTimeSecondsSinceEpoch_(PrometheusService::gaugeInt(
|
||||
"subscription_source_last_message_time",
|
||||
util::prometheus::Labels({{"source", fmt::format("{}:{}", ip, wsPort)}}),
|
||||
"Seconds since epoch of the last message received from rippled subscription streams"
|
||||
))
|
||||
, lastMessageTimeSecondsSinceEpoch_(
|
||||
PrometheusService::gaugeInt(
|
||||
"subscription_source_last_message_time",
|
||||
util::prometheus::Labels({{"source", fmt::format("{}:{}", ip, wsPort)}}),
|
||||
"Seconds since epoch of the last message received from rippled subscription streams"
|
||||
)
|
||||
)
|
||||
{
|
||||
wsConnectionBuilder_.addHeader({boost::beast::http::field::user_agent, "clio-client"})
|
||||
.addHeader({"X-User", "clio-client"})
|
||||
@@ -329,9 +331,13 @@ SubscriptionSource::setValidatedRange(std::string range)
|
||||
pairs.emplace_back(sequence, sequence);
|
||||
} else {
|
||||
if (minAndMax.size() != 2) {
|
||||
throw std::runtime_error(fmt::format(
|
||||
"Error parsing range: {}.Min and max should be of size 2. Got size = {}", range, minAndMax.size()
|
||||
));
|
||||
throw std::runtime_error(
|
||||
fmt::format(
|
||||
"Error parsing range: {}.Min and max should be of size 2. Got size = {}",
|
||||
range,
|
||||
minAndMax.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
uint32_t const min = std::stoll(minAndMax[0]);
|
||||
uint32_t const max = std::stoll(minAndMax[1]);
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <boost/beast/http/field.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
|
||||
@@ -35,13 +35,13 @@
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "etlng/MonitorProviderInterface.hpp"
|
||||
#include "etlng/TaskManagerProviderInterface.hpp"
|
||||
#include "etlng/impl/AmendmentBlockHandler.hpp"
|
||||
#include "etlng/impl/CacheUpdater.hpp"
|
||||
#include "etlng/impl/Extraction.hpp"
|
||||
#include "etlng/impl/LedgerPublisher.hpp"
|
||||
#include "etlng/impl/Loading.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "etlng/impl/Registry.hpp"
|
||||
#include "etlng/impl/Scheduling.hpp"
|
||||
#include "etlng/impl/TaskManager.hpp"
|
||||
@@ -56,7 +56,7 @@
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
@@ -82,6 +82,7 @@ ETLService::ETLService(
|
||||
std::shared_ptr<LoaderInterface> loader,
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver,
|
||||
std::shared_ptr<etlng::TaskManagerProviderInterface> taskManagerProvider,
|
||||
std::shared_ptr<etlng::MonitorProviderInterface> monitorProvider,
|
||||
std::shared_ptr<etl::SystemState> state
|
||||
)
|
||||
: ctx_(std::move(ctx))
|
||||
@@ -96,9 +97,20 @@ ETLService::ETLService(
|
||||
, loader_(std::move(loader))
|
||||
, initialLoadObserver_(std::move(initialLoadObserver))
|
||||
, taskManagerProvider_(std::move(taskManagerProvider))
|
||||
, monitorProvider_(std::move(monitorProvider))
|
||||
, state_(std::move(state))
|
||||
, startSequence_(config.get().maybeValue<uint32_t>("start_sequence"))
|
||||
, finishSequence_(config.get().maybeValue<uint32_t>("finish_sequence"))
|
||||
{
|
||||
LOG(log_.info()) << "Creating ETLng...";
|
||||
ASSERT(not state_->isWriting, "ETL should never start in writer mode");
|
||||
|
||||
if (startSequence_.has_value())
|
||||
LOG(log_.info()) << "Start sequence: " << *startSequence_;
|
||||
|
||||
if (finishSequence_.has_value())
|
||||
LOG(log_.info()) << "Finish sequence: " << *finishSequence_;
|
||||
|
||||
LOG(log_.info()) << "Starting in " << (state_->isStrictReadonly ? "STRICT READONLY MODE" : "WRITE MODE");
|
||||
}
|
||||
|
||||
ETLService::~ETLService()
|
||||
@@ -112,12 +124,7 @@ ETLService::run()
|
||||
{
|
||||
LOG(log_.info()) << "Running ETLng...";
|
||||
|
||||
// TODO: write-enabled node should start in readonly and do the 10 second dance to become a writer
|
||||
mainLoop_.emplace(ctx_.execute([this] {
|
||||
state_->isWriting =
|
||||
not state_->isReadOnly; // TODO: this is now needed because we don't have a mechanism for readonly or
|
||||
// ETL writer node. remove later in favor of real mechanism
|
||||
|
||||
auto const rng = loadInitialLedgerIfNeeded();
|
||||
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
@@ -129,15 +136,18 @@ ETLService::run()
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||
if (not rng.has_value()) {
|
||||
LOG(log_.warn()) << "Initial ledger download got cancelled - stopping ETL service";
|
||||
return;
|
||||
}
|
||||
|
||||
auto const nextSequence = rng->maxSequence + 1;
|
||||
|
||||
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
||||
startMonitor(nextSequence);
|
||||
|
||||
// TODO: we only want to run the full ETL task man if we are POSSIBLY a write node
|
||||
// but definitely not in strict readonly
|
||||
if (not state_->isReadOnly)
|
||||
// If we are a writer as the result of loading the initial ledger - start loading
|
||||
if (state_->isWriting)
|
||||
startLoading(nextSequence);
|
||||
}));
|
||||
}
|
||||
@@ -147,6 +157,8 @@ ETLService::stop()
|
||||
{
|
||||
LOG(log_.info()) << "Stop called";
|
||||
|
||||
if (mainLoop_)
|
||||
mainLoop_->wait();
|
||||
if (taskMan_)
|
||||
taskMan_->stop();
|
||||
if (monitor_)
|
||||
@@ -160,7 +172,7 @@ ETLService::getInfo() const
|
||||
|
||||
result["etl_sources"] = balancer_->toJson();
|
||||
result["is_writer"] = static_cast<int>(state_->isWriting);
|
||||
result["read_only"] = static_cast<int>(state_->isReadOnly);
|
||||
result["read_only"] = static_cast<int>(state_->isStrictReadonly);
|
||||
auto last = publisher_->getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(publisher_->lastPublishAgeSeconds());
|
||||
@@ -196,21 +208,40 @@ ETLService::loadInitialLedgerIfNeeded()
|
||||
{
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (not rng.has_value()) {
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
ASSERT(
|
||||
not state_->isStrictReadonly,
|
||||
"Database is empty but this node is in strict readonly mode. Can't write initial ledger."
|
||||
);
|
||||
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
if (auto const mostRecentValidated = ledgers_->getMostRecent(); mostRecentValidated.has_value()) {
|
||||
auto const seq = *mostRecentValidated;
|
||||
LOG(log_.info()) << "Ledger " << seq << " has been validated. Downloading... ";
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
state_->isWriting = true; // immediately become writer as the db is empty
|
||||
|
||||
auto const getMostRecent = [this]() {
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
return ledgers_->getMostRecent();
|
||||
};
|
||||
|
||||
if (auto const maybeSeq = startSequence_.or_else(getMostRecent); maybeSeq.has_value()) {
|
||||
auto const seq = *maybeSeq;
|
||||
LOG(log_.info()) << "Starting from sequence " << seq
|
||||
<< ". Initial ledger download and extraction can take a while...";
|
||||
|
||||
auto [ledger, timeDiff] = ::util::timed<std::chrono::duration<double>>([this, seq]() {
|
||||
return extractor_->extractLedgerOnly(seq).and_then([this, seq](auto&& data) {
|
||||
// TODO: loadInitialLedger in balancer should be called fetchEdgeKeys or similar
|
||||
data.edgeKeys = balancer_->loadInitialLedger(seq, *initialLoadObserver_);
|
||||
return extractor_->extractLedgerOnly(seq).and_then(
|
||||
[this, seq](auto&& data) -> std::optional<ripple::LedgerHeader> {
|
||||
// TODO: loadInitialLedger in balancer should be called fetchEdgeKeys or similar
|
||||
auto res = balancer_->loadInitialLedger(seq, *initialLoadObserver_);
|
||||
if (not res.has_value() and res.error() == InitialLedgerLoadError::Cancelled) {
|
||||
LOG(log_.debug()) << "Initial ledger load got cancelled";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// TODO: this should be interruptible for graceful shutdown
|
||||
return loader_->loadInitialLedger(data);
|
||||
});
|
||||
ASSERT(res.has_value(), "Initial ledger retry logic failed");
|
||||
data.edgeKeys = std::move(res).value();
|
||||
|
||||
return loader_->loadInitialLedger(data);
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
if (not ledger.has_value()) {
|
||||
@@ -238,28 +269,64 @@ ETLService::loadInitialLedgerIfNeeded()
|
||||
void
|
||||
ETLService::startMonitor(uint32_t seq)
|
||||
{
|
||||
monitor_ = std::make_unique<impl::Monitor>(ctx_, backend_, ledgers_, seq);
|
||||
monitorSubscription_ = monitor_->subscribe([this](uint32_t seq) {
|
||||
log_.info() << "MONITOR got new seq from db: " << seq;
|
||||
monitor_ = monitorProvider_->make(ctx_, backend_, ledgers_, seq);
|
||||
|
||||
monitorNewSeqSubscription_ = monitor_->subscribeToNewSequence([this](uint32_t seq) {
|
||||
LOG(log_.info()) << "ETLService (via Monitor) got new seq from db: " << seq;
|
||||
|
||||
if (state_->writeConflict) {
|
||||
LOG(log_.info()) << "Got a write conflict; Giving up writer seat immediately";
|
||||
giveUpWriter();
|
||||
}
|
||||
|
||||
// FIXME: is this the best way?
|
||||
if (not state_->isWriting) {
|
||||
auto const diff = data::synchronousAndRetryOnTimeout([this, seq](auto yield) {
|
||||
return backend_->fetchLedgerDiff(seq, yield);
|
||||
});
|
||||
|
||||
cacheUpdater_->update(seq, diff);
|
||||
backend_->updateRange(seq);
|
||||
}
|
||||
|
||||
publisher_->publish(seq, {});
|
||||
});
|
||||
|
||||
monitorDbStalledSubscription_ = monitor_->subscribeToDbStalled([this]() {
|
||||
LOG(log_.warn()) << "ETLService received DbStalled signal from Monitor";
|
||||
if (not state_->isStrictReadonly and not state_->isWriting)
|
||||
attemptTakeoverWriter();
|
||||
});
|
||||
|
||||
monitor_->run();
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::startLoading(uint32_t seq)
|
||||
{
|
||||
taskMan_ = taskManagerProvider_->make(ctx_, *monitor_, seq);
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
taskMan_ = taskManagerProvider_->make(ctx_, *monitor_, seq, finishSequence_);
|
||||
taskMan_->run(config_.get().get<std::size_t>("extractor_threads"));
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::attemptTakeoverWriter()
|
||||
{
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||
|
||||
state_->isWriting = true; // switch to writer
|
||||
LOG(log_.info()) << "Taking over the ETL writer seat";
|
||||
startLoading(rng->maxSequence + 1);
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::giveUpWriter()
|
||||
{
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
state_->isWriting = false;
|
||||
state_->writeConflict = false;
|
||||
taskMan_ = nullptr;
|
||||
}
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "etlng/MonitorProviderInterface.hpp"
|
||||
#include "etlng/TaskManagerInterface.hpp"
|
||||
#include "etlng/TaskManagerProviderInterface.hpp"
|
||||
#include "etlng/impl/AmendmentBlockHandler.hpp"
|
||||
@@ -42,7 +43,6 @@
|
||||
#include "etlng/impl/Extraction.hpp"
|
||||
#include "etlng/impl/LedgerPublisher.hpp"
|
||||
#include "etlng/impl/Loading.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "etlng/impl/Registry.hpp"
|
||||
#include "etlng/impl/Scheduling.hpp"
|
||||
#include "etlng/impl/TaskManager.hpp"
|
||||
@@ -58,7 +58,7 @@
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
@@ -106,12 +106,17 @@ class ETLService : public ETLServiceInterface {
|
||||
std::shared_ptr<LoaderInterface> loader_;
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver_;
|
||||
std::shared_ptr<etlng::TaskManagerProviderInterface> taskManagerProvider_;
|
||||
std::shared_ptr<etlng::MonitorProviderInterface> monitorProvider_;
|
||||
std::shared_ptr<etl::SystemState> state_;
|
||||
|
||||
std::optional<uint32_t> startSequence_;
|
||||
std::optional<uint32_t> finishSequence_;
|
||||
|
||||
std::unique_ptr<MonitorInterface> monitor_;
|
||||
std::unique_ptr<TaskManagerInterface> taskMan_;
|
||||
|
||||
boost::signals2::scoped_connection monitorSubscription_;
|
||||
boost::signals2::scoped_connection monitorNewSeqSubscription_;
|
||||
boost::signals2::scoped_connection monitorDbStalledSubscription_;
|
||||
|
||||
std::optional<util::async::AnyOperation<void>> mainLoop_;
|
||||
|
||||
@@ -131,6 +136,7 @@ public:
|
||||
* @param loader Interface for loading data
|
||||
* @param initialLoadObserver The observer for initial data loading
|
||||
* @param taskManagerProvider The provider of the task manager instance
|
||||
* @param monitorProvider The provider of the monitor instance
|
||||
* @param state System state tracking object
|
||||
*/
|
||||
ETLService(
|
||||
@@ -146,6 +152,7 @@ public:
|
||||
std::shared_ptr<LoaderInterface> loader,
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver,
|
||||
std::shared_ptr<etlng::TaskManagerProviderInterface> taskManagerProvider,
|
||||
std::shared_ptr<etlng::MonitorProviderInterface> monitorProvider,
|
||||
std::shared_ptr<etl::SystemState> state
|
||||
);
|
||||
|
||||
@@ -173,7 +180,6 @@ public:
|
||||
lastCloseAgeSeconds() const override;
|
||||
|
||||
private:
|
||||
// TODO: this better be std::expected
|
||||
std::optional<data::LedgerRange>
|
||||
loadInitialLedgerIfNeeded();
|
||||
|
||||
@@ -182,6 +188,12 @@ private:
|
||||
|
||||
void
|
||||
startLoading(uint32_t seq);
|
||||
|
||||
void
|
||||
attemptTakeoverWriter();
|
||||
|
||||
void
|
||||
giveUpWriter();
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
@@ -58,7 +58,6 @@
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
using namespace util::config;
|
||||
@@ -184,11 +183,14 @@ LoadBalancer::LoadBalancer(
|
||||
LOG(log_.warn()) << "Failed to fetch ETL state from source = " << source->toString()
|
||||
<< " Please check the configuration and network";
|
||||
} else if (etlState_ && etlState_->networkID != stateOpt->networkID) {
|
||||
checkOnETLFailure(fmt::format(
|
||||
"ETL sources must be on the same network. Source network id = {} does not match others network id = {}",
|
||||
stateOpt->networkID,
|
||||
etlState_->networkID
|
||||
));
|
||||
checkOnETLFailure(
|
||||
fmt::format(
|
||||
"ETL sources must be on the same network. Source network id = {} does not match others network id "
|
||||
"= {}",
|
||||
stateOpt->networkID,
|
||||
etlState_->networkID
|
||||
)
|
||||
);
|
||||
} else {
|
||||
etlState_ = stateOpt;
|
||||
}
|
||||
@@ -210,30 +212,32 @@ LoadBalancer::LoadBalancer(
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string>
|
||||
InitialLedgerLoadResult
|
||||
LoadBalancer::loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
etlng::InitialLoadObserverInterface& loadObserver,
|
||||
std::chrono::steady_clock::duration retryAfter
|
||||
)
|
||||
{
|
||||
std::vector<std::string> response;
|
||||
InitialLedgerLoadResult response;
|
||||
|
||||
execute(
|
||||
[this, &response, &sequence, &loadObserver](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, loadObserver);
|
||||
auto res = source->loadInitialLedger(sequence, downloadRanges_, loadObserver);
|
||||
|
||||
if (!res) {
|
||||
if (not res.has_value() and res.error() == InitialLedgerLoadError::Errored) {
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
} else {
|
||||
response = std::move(data);
|
||||
return false; // should retry on error
|
||||
}
|
||||
|
||||
return res;
|
||||
response = std::move(res); // cancelled or data received
|
||||
return true;
|
||||
},
|
||||
sequence,
|
||||
retryAfter
|
||||
);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
@@ -279,42 +283,46 @@ LoadBalancer::forwardToRippled(
|
||||
return std::unexpected{rpc::ClioError::RpcCommandIsMissing};
|
||||
|
||||
auto const cmd = boost::json::value_to<std::string>(request.at("command"));
|
||||
|
||||
if (forwardingCache_ and forwardingCache_->shouldCache(cmd)) {
|
||||
bool servedFromCache = true;
|
||||
auto updater =
|
||||
[this, &request, &clientIp, &servedFromCache, isAdmin](boost::asio::yield_context yield
|
||||
) -> std::expected<util::ResponseExpirationCache::EntryData, util::ResponseExpirationCache::Error> {
|
||||
servedFromCache = false;
|
||||
auto result = forwardToRippledImpl(request, clientIp, isAdmin, yield);
|
||||
if (result.has_value()) {
|
||||
return util::ResponseExpirationCache::EntryData{
|
||||
.lastUpdated = std::chrono::steady_clock::now(), .response = std::move(result).value()
|
||||
};
|
||||
}
|
||||
return std::unexpected{
|
||||
util::ResponseExpirationCache::Error{.status = rpc::Status{result.error()}, .warnings = {}}
|
||||
};
|
||||
};
|
||||
|
||||
auto result = forwardingCache_->getOrUpdate(
|
||||
yield,
|
||||
cmd,
|
||||
std::move(updater),
|
||||
[](util::ResponseExpirationCache::EntryData const& entry) { return not entry.response.contains("error"); }
|
||||
);
|
||||
if (servedFromCache) {
|
||||
++forwardingCounters_.cacheHit.get();
|
||||
if (forwardingCache_) {
|
||||
if (auto cachedResponse = forwardingCache_->get(cmd); cachedResponse) {
|
||||
forwardingCounters_.cacheHit.get() += 1;
|
||||
return std::move(cachedResponse).value();
|
||||
}
|
||||
if (result.has_value()) {
|
||||
return std::move(result).value();
|
||||
}
|
||||
forwardingCounters_.cacheMiss.get() += 1;
|
||||
|
||||
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
|
||||
std::size_t sourceIdx = randomGenerator_->uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0u;
|
||||
|
||||
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
|
||||
|
||||
std::optional<boost::json::object> response;
|
||||
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
|
||||
while (numAttempts < sources_.size()) {
|
||||
auto [res, duration] =
|
||||
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
|
||||
if (res) {
|
||||
forwardingCounters_.successDuration.get() += duration;
|
||||
response = std::move(res).value();
|
||||
break;
|
||||
}
|
||||
auto const combinedError = result.error().status.code;
|
||||
ASSERT(std::holds_alternative<rpc::ClioError>(combinedError), "There could be only ClioError here");
|
||||
return std::unexpected{std::get<rpc::ClioError>(combinedError)};
|
||||
forwardingCounters_.failDuration.get() += duration;
|
||||
++forwardingCounters_.retries.get();
|
||||
error = std::max(error, res.error()); // Choose the best result between all sources
|
||||
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
++numAttempts;
|
||||
}
|
||||
|
||||
return forwardToRippledImpl(request, clientIp, isAdmin, yield);
|
||||
if (response) {
|
||||
if (forwardingCache_ and not response->contains("error"))
|
||||
forwardingCache_->put(cmd, *response);
|
||||
return std::move(response).value();
|
||||
}
|
||||
|
||||
return std::unexpected{error};
|
||||
}
|
||||
|
||||
boost::json::value
|
||||
@@ -405,47 +413,4 @@ LoadBalancer::chooseForwardingSource()
|
||||
}
|
||||
}
|
||||
|
||||
std::expected<boost::json::object, rpc::CombinedError>
|
||||
LoadBalancer::forwardToRippledImpl(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
bool isAdmin,
|
||||
boost::asio::yield_context yield
|
||||
)
|
||||
{
|
||||
++forwardingCounters_.cacheMiss.get();
|
||||
|
||||
ASSERT(not sources_.empty(), "ETL sources must be configured to forward requests.");
|
||||
std::size_t sourceIdx = randomGenerator_->uniform(0ul, sources_.size() - 1);
|
||||
|
||||
auto numAttempts = 0u;
|
||||
|
||||
auto xUserValue = isAdmin ? kADMIN_FORWARDING_X_USER_VALUE : kUSER_FORWARDING_X_USER_VALUE;
|
||||
|
||||
std::optional<boost::json::object> response;
|
||||
rpc::ClioError error = rpc::ClioError::EtlConnectionError;
|
||||
while (numAttempts < sources_.size()) {
|
||||
auto [res, duration] =
|
||||
util::timed([&]() { return sources_[sourceIdx]->forwardToRippled(request, clientIp, xUserValue, yield); });
|
||||
|
||||
if (res) {
|
||||
forwardingCounters_.successDuration.get() += duration;
|
||||
response = std::move(res).value();
|
||||
break;
|
||||
}
|
||||
forwardingCounters_.failDuration.get() += duration;
|
||||
++forwardingCounters_.retries.get();
|
||||
error = std::max(error, res.error()); // Choose the best result between all sources
|
||||
|
||||
sourceIdx = (sourceIdx + 1) % sources_.size();
|
||||
++numAttempts;
|
||||
}
|
||||
|
||||
if (response.has_value()) {
|
||||
return std::move(response).value();
|
||||
}
|
||||
|
||||
return std::unexpected{error};
|
||||
}
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -49,6 +49,7 @@
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
@@ -183,14 +184,14 @@ public:
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded or the download is cancelled.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param observer The observer to notify of progress
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
* @return A std::expected with ledger edge keys on success, or InitialLedgerLoadError on failure
|
||||
*/
|
||||
std::vector<std::string>
|
||||
InitialLedgerLoadResult
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
etlng::InitialLoadObserverInterface& observer,
|
||||
@@ -281,14 +282,6 @@ private:
|
||||
*/
|
||||
void
|
||||
chooseForwardingSource();
|
||||
|
||||
std::expected<boost::json::object, rpc::CombinedError>
|
||||
forwardToRippledImpl(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
bool isAdmin,
|
||||
boost::asio::yield_context yield
|
||||
);
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -39,6 +39,20 @@
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief Represents possible errors for initial ledger load
|
||||
*/
|
||||
enum class InitialLedgerLoadError {
|
||||
Cancelled, /*< Indicating the initial load got cancelled by user */
|
||||
Errored, /*< Indicating some error happened during initial ledger load */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief The result type of the initial ledger load
|
||||
* @note The successful value represents edge keys
|
||||
*/
|
||||
using InitialLedgerLoadResult = std::expected<std::vector<std::string>, InitialLedgerLoadError>;
|
||||
|
||||
/**
|
||||
* @brief An interface for LoadBalancer
|
||||
*/
|
||||
@@ -52,14 +66,14 @@ public:
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded or the download is cancelled.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param loader InitialLoadObserverInterface implementation
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
* @return A std::expected with ledger edge keys on success, or InitialLedgerLoadError on failure
|
||||
*/
|
||||
virtual std::vector<std::string>
|
||||
[[nodiscard]] virtual InitialLedgerLoadResult
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
etlng::InitialLoadObserverInterface& loader,
|
||||
@@ -74,7 +88,7 @@ public:
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
*/
|
||||
virtual std::vector<std::string>
|
||||
[[nodiscard]] virtual std::vector<std::string>
|
||||
loadInitialLedger(uint32_t sequence, std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2}) = 0;
|
||||
|
||||
/**
|
||||
@@ -90,7 +104,7 @@ public:
|
||||
* @return The extracted data, if extraction was successful. If the ledger was found
|
||||
* in the database or the server is shutting down, the optional will be empty
|
||||
*/
|
||||
virtual OptionalGetLedgerResponseType
|
||||
[[nodiscard]] virtual OptionalGetLedgerResponseType
|
||||
fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects,
|
||||
@@ -103,7 +117,7 @@ public:
|
||||
*
|
||||
* @return JSON representation of the state of this load balancer.
|
||||
*/
|
||||
virtual boost::json::value
|
||||
[[nodiscard]] virtual boost::json::value
|
||||
toJson() const = 0;
|
||||
|
||||
/**
|
||||
@@ -115,7 +129,7 @@ public:
|
||||
* @param yield The coroutine context
|
||||
* @return Response received from rippled node as JSON object on success or error on failure
|
||||
*/
|
||||
virtual std::expected<boost::json::object, rpc::CombinedError>
|
||||
[[nodiscard]] virtual std::expected<boost::json::object, rpc::CombinedError>
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
@@ -127,7 +141,7 @@ public:
|
||||
* @brief Return state of ETL nodes.
|
||||
* @return ETL state, nullopt if etl nodes not available
|
||||
*/
|
||||
virtual std::optional<etl::ETLState>
|
||||
[[nodiscard]] virtual std::optional<etl::ETLState>
|
||||
getETLState() noexcept = 0;
|
||||
|
||||
/**
|
||||
|
||||
@@ -23,10 +23,19 @@
|
||||
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
|
||||
#include <expected>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief Enumeration of possible errors that can occur during loading operations
|
||||
*/
|
||||
enum class LoaderError {
|
||||
AmendmentBlocked, /*< Error indicating that an operation is blocked by an amendment */
|
||||
WriteConflict, /*< Error indicating that a write operation resulted in a conflict */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief An interface for a ETL Loader
|
||||
*/
|
||||
@@ -36,8 +45,9 @@ struct LoaderInterface {
|
||||
/**
|
||||
* @brief Load ledger data
|
||||
* @param data The data to load
|
||||
* @return Nothing or error as std::expected
|
||||
*/
|
||||
virtual void
|
||||
[[nodiscard]] virtual std::expected<void, LoaderError>
|
||||
load(model::LedgerData const& data) = 0;
|
||||
|
||||
/**
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
#include "util/Concepts.hpp"
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
|
||||
@@ -36,7 +36,8 @@ namespace etlng {
|
||||
class MonitorInterface {
|
||||
public:
|
||||
static constexpr auto kDEFAULT_REPEAT_INTERVAL = std::chrono::seconds{1};
|
||||
using SignalType = boost::signals2::signal<void(uint32_t)>;
|
||||
using NewSequenceSignalType = boost::signals2::signal<void(uint32_t)>;
|
||||
using DbStalledSignalType = boost::signals2::signal<void()>;
|
||||
|
||||
virtual ~MonitorInterface() = default;
|
||||
|
||||
@@ -45,7 +46,14 @@ public:
|
||||
* @param seq The ledger sequence loaded
|
||||
*/
|
||||
virtual void
|
||||
notifyLedgerLoaded(uint32_t seq) = 0;
|
||||
notifySequenceLoaded(uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Notifies the monitor of a write conflict
|
||||
* @param seq The sequence number of the ledger that encountered a write conflict
|
||||
*/
|
||||
virtual void
|
||||
notifyWriteConflict(uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Allows clients to get notified when a new ledger becomes available in Clio's database
|
||||
@@ -54,7 +62,16 @@ public:
|
||||
* @return A connection object that automatically disconnects the subscription once destroyed
|
||||
*/
|
||||
[[nodiscard]] virtual boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) = 0;
|
||||
subscribeToNewSequence(NewSequenceSignalType::slot_type const& subscriber) = 0;
|
||||
|
||||
/**
|
||||
* @brief Allows clients to get notified when no database update is detected for a configured period.
|
||||
*
|
||||
* @param subscriber The slot to connect
|
||||
* @return A connection object that automatically disconnects the subscription once destroyed
|
||||
*/
|
||||
[[nodiscard]] virtual boost::signals2::scoped_connection
|
||||
subscribeToDbStalled(DbStalledSignalType::slot_type const& subscriber) = 0;
|
||||
|
||||
/**
|
||||
* @brief Run the monitor service
|
||||
|
||||
64
src/etlng/MonitorProviderInterface.hpp
Normal file
64
src/etlng/MonitorProviderInterface.hpp
Normal file
@@ -0,0 +1,64 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief An interface for providing Monitor instances
|
||||
*/
|
||||
struct MonitorProviderInterface {
|
||||
/**
|
||||
* @brief The time Monitor should wait before reporting absence of updates to the database
|
||||
*/
|
||||
static constexpr auto kDEFAULT_DB_STALLED_REPORT_DELAY = std::chrono::seconds{10};
|
||||
|
||||
virtual ~MonitorProviderInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Create a new Monitor instance
|
||||
*
|
||||
* @param ctx The execution context for asynchronous operations
|
||||
* @param backend Interface to the backend database
|
||||
* @param validatedLedgers Interface for accessing network validated ledgers
|
||||
* @param startSequence The sequence number to start monitoring from
|
||||
* @param dbStalledReportDelay The timeout duration after which to signal no database updates
|
||||
* @return A unique pointer to a Monitor implementation
|
||||
*/
|
||||
[[nodiscard]] virtual std::unique_ptr<MonitorInterface>
|
||||
make(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence,
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay = kDEFAULT_DB_STALLED_REPORT_DELAY
|
||||
) = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
@@ -20,8 +20,8 @@
|
||||
#include "etlng/Source.hpp"
|
||||
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/impl/ForwardingSource.hpp"
|
||||
#include "etl/impl/SubscriptionSource.hpp"
|
||||
#include "etlng/impl/ForwardingSource.hpp"
|
||||
#include "etlng/impl/GrpcSource.hpp"
|
||||
#include "etlng/impl/SourceImpl.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -52,7 +52,7 @@ makeSource(
|
||||
auto const wsPort = config.get<std::string>("ws_port");
|
||||
auto const grpcPort = config.get<std::string>("grpc_port");
|
||||
|
||||
etl::impl::ForwardingSource forwardingSource{ip, wsPort, forwardingTimeout};
|
||||
etlng::impl::ForwardingSource forwardingSource{ip, wsPort, forwardingTimeout};
|
||||
impl::GrpcSource grpcSource{ip, grpcPort};
|
||||
auto subscriptionSource = std::make_unique<etl::impl::SubscriptionSource>(
|
||||
ioc,
|
||||
|
||||
@@ -19,9 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "util/config/ObjectView.hpp"
|
||||
@@ -131,7 +131,7 @@ public:
|
||||
* @param loader InitialLoadObserverInterface implementation
|
||||
* @return A std::pair of the data and a bool indicating whether the download was successful
|
||||
*/
|
||||
virtual std::pair<std::vector<std::string>, bool>
|
||||
virtual InitialLedgerLoadResult
|
||||
loadInitialLedger(uint32_t sequence, std::uint32_t numMarkers, etlng::InitialLoadObserverInterface& loader) = 0;
|
||||
|
||||
/**
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
@@ -41,11 +42,17 @@ struct TaskManagerProviderInterface {
|
||||
*
|
||||
* @param ctx The async context to associate the task manager instance with
|
||||
* @param monitor The monitor to notify when ledger is loaded
|
||||
* @param seq The sequence to start at
|
||||
* @param startSeq The sequence to start at
|
||||
* @param finishSeq The sequence to stop at if specified
|
||||
* @return A unique pointer to a TaskManager implementation
|
||||
*/
|
||||
virtual std::unique_ptr<TaskManagerInterface>
|
||||
make(util::async::AnyExecutionContext ctx, std::reference_wrapper<MonitorInterface> monitor, uint32_t seq) = 0;
|
||||
[[nodiscard]] virtual std::unique_ptr<TaskManagerInterface>
|
||||
make(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::reference_wrapper<MonitorInterface> monitor,
|
||||
uint32_t startSeq,
|
||||
std::optional<uint32_t> finishSeq = std::nullopt
|
||||
) = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/parse.hpp>
|
||||
#include <boost/json/serialize.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <exception>
|
||||
|
||||
@@ -20,12 +20,14 @@
|
||||
#include "etlng/impl/GrpcSource.hpp"
|
||||
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/impl/AsyncGrpcCall.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/Resolver.hpp"
|
||||
|
||||
#include <fmt/core.h>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/security/credentials.h>
|
||||
#include <grpcpp/support/channel_arguments.h>
|
||||
@@ -33,9 +35,12 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <expected>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
@@ -60,6 +65,7 @@ namespace etlng::impl {
|
||||
|
||||
GrpcSource::GrpcSource(std::string const& ip, std::string const& grpcPort)
|
||||
: log_(fmt::format("ETL_Grpc[{}:{}]", ip, grpcPort))
|
||||
, initialLoadShouldStop_(std::make_unique<std::atomic_bool>(false))
|
||||
{
|
||||
try {
|
||||
grpc::ChannelArguments chArgs;
|
||||
@@ -103,15 +109,18 @@ GrpcSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNeighb
|
||||
return {status, std::move(response)};
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
InitialLedgerLoadResult
|
||||
GrpcSource::loadInitialLedger(
|
||||
uint32_t const sequence,
|
||||
uint32_t const numMarkers,
|
||||
etlng::InitialLoadObserverInterface& observer
|
||||
)
|
||||
{
|
||||
if (*initialLoadShouldStop_)
|
||||
return std::unexpected{InitialLedgerLoadError::Cancelled};
|
||||
|
||||
if (!stub_)
|
||||
return {{}, false};
|
||||
return std::unexpected{InitialLedgerLoadError::Errored};
|
||||
|
||||
std::vector<AsyncGrpcCall> calls = AsyncGrpcCall::makeAsyncCalls(sequence, numMarkers);
|
||||
|
||||
@@ -131,9 +140,9 @@ GrpcSource::loadInitialLedger(
|
||||
ASSERT(tag != nullptr, "Tag can't be null.");
|
||||
auto ptr = static_cast<AsyncGrpcCall*>(tag);
|
||||
|
||||
if (!ok) {
|
||||
LOG(log_.error()) << "loadInitialLedger - ok is false";
|
||||
return {{}, false}; // cancelled
|
||||
if (not ok or *initialLoadShouldStop_) {
|
||||
LOG(log_.error()) << "loadInitialLedger cancelled";
|
||||
return std::unexpected{InitialLedgerLoadError::Cancelled};
|
||||
}
|
||||
|
||||
LOG(log_.trace()) << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
@@ -151,7 +160,16 @@ GrpcSource::loadInitialLedger(
|
||||
abort = true;
|
||||
}
|
||||
|
||||
return {std::move(edgeKeys), !abort};
|
||||
if (abort)
|
||||
return std::unexpected{InitialLedgerLoadError::Errored};
|
||||
|
||||
return edgeKeys;
|
||||
}
|
||||
|
||||
void
|
||||
GrpcSource::stop(boost::asio::yield_context)
|
||||
{
|
||||
initialLoadShouldStop_->store(true);
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
|
||||
@@ -20,23 +20,26 @@
|
||||
#pragma once
|
||||
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <grpcpp/support/status.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class GrpcSource {
|
||||
util::Logger log_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
std::unique_ptr<std::atomic_bool> initialLoadShouldStop_;
|
||||
|
||||
public:
|
||||
GrpcSource(std::string const& ip, std::string const& grpcPort);
|
||||
@@ -61,10 +64,18 @@ public:
|
||||
* @param sequence Sequence of the ledger to download
|
||||
* @param numMarkers Number of markers to generate for async calls
|
||||
* @param observer InitialLoadObserverInterface implementation
|
||||
* @return A std::pair of the data and a bool indicating whether the download was successful
|
||||
* @return Downloaded data or an indication of error or cancellation
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
InitialLedgerLoadResult
|
||||
loadInitialLedger(uint32_t sequence, uint32_t numMarkers, etlng::InitialLoadObserverInterface& observer);
|
||||
|
||||
/**
|
||||
* @brief Stop any ongoing operations
|
||||
* @note This is used to cancel any ongoing initial ledger downloads
|
||||
* @param yield The coroutine context
|
||||
*/
|
||||
void
|
||||
stop(boost::asio::yield_context yield);
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etlng/LedgerPublisherInterface.hpp"
|
||||
#include "etlng/impl/Loading.hpp"
|
||||
@@ -35,6 +34,7 @@
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/post.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/chrono.h>
|
||||
#include <xrpl/protocol/Fees.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
@@ -164,10 +164,6 @@ public:
|
||||
boost::asio::post(publishStrand_, [this, lgrInfo = lgrInfo]() {
|
||||
LOG(log_.info()) << "Publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
|
||||
// TODO: This should probably not be part of publisher in the future
|
||||
if (not state_.get().isWriting)
|
||||
backend_->updateRange(lgrInfo.seq); // This can't be unit tested atm.
|
||||
|
||||
setLastClose(lgrInfo.closeTime);
|
||||
auto age = lastCloseAgeSeconds();
|
||||
|
||||
@@ -231,8 +227,9 @@ public:
|
||||
std::chrono::time_point<std::chrono::system_clock>
|
||||
getLastPublish() const override
|
||||
{
|
||||
return std::chrono::time_point<std::chrono::system_clock>{std::chrono::seconds{lastPublishSeconds_.get().value()
|
||||
}};
|
||||
return std::chrono::time_point<std::chrono::system_clock>{
|
||||
std::chrono::seconds{lastPublishSeconds_.get().value()}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -20,11 +20,14 @@
|
||||
#include "etlng/impl/Loading.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etlng/AmendmentBlockHandlerInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/RegistryInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -46,29 +49,45 @@ namespace etlng::impl {
|
||||
Loader::Loader(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<RegistryInterface> registry,
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler,
|
||||
std::shared_ptr<etl::SystemState> state
|
||||
)
|
||||
: backend_(std::move(backend))
|
||||
, registry_(std::move(registry))
|
||||
, amendmentBlockHandler_(std::move(amendmentBlockHandler))
|
||||
, state_(std::move(state))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
std::expected<void, LoaderError>
|
||||
Loader::load(model::LedgerData const& data)
|
||||
{
|
||||
try {
|
||||
// perform cache updates and all writes from extensions
|
||||
// Perform cache updates and all writes from extensions
|
||||
// TODO: maybe this readonly logic should be removed?
|
||||
registry_->dispatch(data);
|
||||
|
||||
auto [success, duration] =
|
||||
::util::timed<std::chrono::duration<double>>([&]() { return backend_->finishWrites(data.seq); });
|
||||
LOG(log_.info()) << "Finished writes to DB for " << data.seq << ": " << (success ? "YES" : "NO") << "; took "
|
||||
<< duration;
|
||||
// Only a writer should attempt to commit to DB
|
||||
// This is also where conflicts with other writer nodes will be detected
|
||||
if (state_->isWriting) {
|
||||
auto [success, duration] =
|
||||
::util::timed<std::chrono::milliseconds>([&]() { return backend_->finishWrites(data.seq); });
|
||||
LOG(log_.info()) << "Finished writes to DB for " << data.seq << ": " << (success ? "YES" : "NO")
|
||||
<< "; took " << duration << "ms";
|
||||
|
||||
if (not success) {
|
||||
state_->writeConflict = true;
|
||||
LOG(log_.warn()) << "Another node wrote a ledger into the DB - we have a write conflict";
|
||||
return std::unexpected(LoaderError::WriteConflict);
|
||||
}
|
||||
}
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load " << data.seq << ": " << e.what();
|
||||
amendmentBlockHandler_->notifyAmendmentBlocked();
|
||||
return std::unexpected(LoaderError::AmendmentBlocked);
|
||||
}
|
||||
|
||||
return {};
|
||||
};
|
||||
|
||||
void
|
||||
@@ -78,13 +97,32 @@ Loader::onInitialLoadGotMoreObjects(
|
||||
std::optional<std::string> lastKey
|
||||
)
|
||||
{
|
||||
static constexpr std::size_t kLOG_STRIDE = 1000u;
|
||||
static auto kINITIAL_LOAD_START_TIME = std::chrono::steady_clock::now();
|
||||
|
||||
try {
|
||||
LOG(log_.debug()) << "On initial load: got more objects for seq " << seq << ". size = " << data.size();
|
||||
LOG(log_.trace()) << "On initial load: got more objects for seq " << seq << ". size = " << data.size();
|
||||
registry_->dispatchInitialObjects(
|
||||
seq,
|
||||
data,
|
||||
std::move(lastKey).value_or(std::string{}) // TODO: perhaps use optional all the way to extensions?
|
||||
);
|
||||
|
||||
initialLoadWrittenObjects_ += data.size();
|
||||
++initialLoadWrites_;
|
||||
if (initialLoadWrites_ % kLOG_STRIDE == 0u && initialLoadWrites_ != 0u) {
|
||||
auto elapsedSinceStart = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::steady_clock::now() - kINITIAL_LOAD_START_TIME
|
||||
);
|
||||
auto elapsedSeconds = elapsedSinceStart.count() / static_cast<double>(util::kMILLISECONDS_PER_SECOND);
|
||||
auto objectsPerSecond =
|
||||
elapsedSeconds > 0.0 ? static_cast<double>(initialLoadWrittenObjects_) / elapsedSeconds : 0.0;
|
||||
|
||||
LOG(log_.info()) << "Wrote " << initialLoadWrittenObjects_
|
||||
<< " initial ledger objects so far with average rate of " << objectsPerSecond
|
||||
<< " objects per second";
|
||||
}
|
||||
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial objects for " << seq << ": " << e.what();
|
||||
amendmentBlockHandler_->notifyAmendmentBlocked();
|
||||
@@ -95,9 +133,7 @@ std::optional<ripple::LedgerHeader>
|
||||
Loader::loadInitialLedger(model::LedgerData const& data)
|
||||
{
|
||||
try {
|
||||
// check that database is actually empty
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (rng) {
|
||||
if (auto const rng = backend_->hardFetchLedgerRangeNoThrow(); rng.has_value()) {
|
||||
ASSERT(false, "Database is not empty");
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/LedgerFetcherInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etlng/AmendmentBlockHandlerInterface.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
@@ -39,6 +39,7 @@
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <xrpl/protocol/TxMeta.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
@@ -51,7 +52,10 @@ class Loader : public LoaderInterface, public InitialLoadObserverInterface {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<RegistryInterface> registry_;
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler_;
|
||||
std::shared_ptr<etl::SystemState> state_;
|
||||
|
||||
std::size_t initialLoadWrittenObjects_{0u};
|
||||
std::size_t initialLoadWrites_{0u};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
@@ -62,7 +66,8 @@ public:
|
||||
Loader(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<RegistryInterface> registry,
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler,
|
||||
std::shared_ptr<etl::SystemState> state
|
||||
);
|
||||
|
||||
Loader(Loader const&) = delete;
|
||||
@@ -72,7 +77,7 @@ public:
|
||||
Loader&
|
||||
operator=(Loader&&) = delete;
|
||||
|
||||
void
|
||||
std::expected<void, LoaderError>
|
||||
load(model::LedgerData const& data) override;
|
||||
|
||||
void
|
||||
|
||||
@@ -23,11 +23,11 @@
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -41,12 +41,18 @@ Monitor::Monitor(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence
|
||||
uint32_t startSequence,
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay
|
||||
)
|
||||
: strand_(ctx.makeStrand())
|
||||
, backend_(std::move(backend))
|
||||
, validatedLedgers_(std::move(validatedLedgers))
|
||||
, nextSequence_(startSequence)
|
||||
, updateData_({
|
||||
.dbStalledReportDelay = dbStalledReportDelay,
|
||||
.lastDbCheckTime = std::chrono::steady_clock::now(),
|
||||
.lastSeenMaxSeqInDb = startSequence > 0 ? startSequence - 1 : 0,
|
||||
})
|
||||
{
|
||||
}
|
||||
|
||||
@@ -55,20 +61,37 @@ Monitor::~Monitor()
|
||||
stop();
|
||||
}
|
||||
|
||||
// TODO: think about using signals perhaps? maybe combining with onNextSequence?
|
||||
// also, how do we not double invoke or does it not matter
|
||||
void
|
||||
Monitor::notifyLedgerLoaded(uint32_t seq)
|
||||
Monitor::notifySequenceLoaded(uint32_t seq)
|
||||
{
|
||||
LOG(log_.debug()) << "Loader notified about newly committed ledger " << seq;
|
||||
repeatedTask_->invoke(); // force-invoke immediately
|
||||
LOG(log_.debug()) << "Loader notified Monitor about newly committed ledger " << seq;
|
||||
{
|
||||
auto lck = updateData_.lock();
|
||||
lck->lastSeenMaxSeqInDb = std::max(seq, lck->lastSeenMaxSeqInDb);
|
||||
lck->lastDbCheckTime = std::chrono::steady_clock::now();
|
||||
}
|
||||
repeatedTask_->invoke(); // force-invoke doWork immediately
|
||||
};
|
||||
|
||||
void
|
||||
Monitor::notifyWriteConflict(uint32_t seq)
|
||||
{
|
||||
LOG(log_.warn()) << "Loader notified Monitor about write conflict at " << seq;
|
||||
nextSequence_ = seq + 1; // we already loaded the cache for seq just before we detected conflict
|
||||
LOG(log_.warn()) << "Resume monitoring from " << nextSequence_;
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::run(std::chrono::steady_clock::duration repeatInterval)
|
||||
{
|
||||
ASSERT(not repeatedTask_.has_value(), "Monitor attempted to run more than once");
|
||||
LOG(log_.debug()) << "Starting monitor";
|
||||
{
|
||||
auto lck = updateData_.lock();
|
||||
LOG(log_.debug()) << "Starting monitor with repeat interval: "
|
||||
<< std::chrono::duration_cast<std::chrono::seconds>(repeatInterval).count()
|
||||
<< "s and dbStalledReportDelay: "
|
||||
<< std::chrono::duration_cast<std::chrono::seconds>(lck->dbStalledReportDelay).count() << "s";
|
||||
}
|
||||
|
||||
repeatedTask_ = strand_.executeRepeatedly(repeatInterval, std::bind_front(&Monitor::doWork, this));
|
||||
subscription_ = validatedLedgers_->subscribe(std::bind_front(&Monitor::onNextSequence, this));
|
||||
@@ -80,28 +103,65 @@ Monitor::stop()
|
||||
if (repeatedTask_.has_value())
|
||||
repeatedTask_->abort();
|
||||
|
||||
subscription_ = std::nullopt;
|
||||
repeatedTask_ = std::nullopt;
|
||||
}
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
Monitor::subscribe(SignalType::slot_type const& subscriber)
|
||||
Monitor::subscribeToNewSequence(NewSequenceSignalType::slot_type const& subscriber)
|
||||
{
|
||||
return notificationChannel_.connect(subscriber);
|
||||
}
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
Monitor::subscribeToDbStalled(DbStalledSignalType::slot_type const& subscriber)
|
||||
{
|
||||
return dbStalledChannel_.connect(subscriber);
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::onNextSequence(uint32_t seq)
|
||||
{
|
||||
LOG(log_.debug()) << "rippled published sequence " << seq;
|
||||
ASSERT(repeatedTask_.has_value(), "Ledger subscription without repeated task is a logic error");
|
||||
LOG(log_.debug()) << "Notified about new sequence on the network: " << seq;
|
||||
repeatedTask_->invoke(); // force-invoke immediately
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::doWork()
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng) {
|
||||
while (rng->maxSequence >= nextSequence_)
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
bool dbProgressedThisCycle = false;
|
||||
auto lck = updateData_.lock();
|
||||
|
||||
if (rng.has_value()) {
|
||||
if (rng->maxSequence > lck->lastSeenMaxSeqInDb) {
|
||||
LOG(log_.trace()) << "DB progressed. Old max seq = " << lck->lastSeenMaxSeqInDb
|
||||
<< ", new max seq = " << rng->maxSequence;
|
||||
lck->lastSeenMaxSeqInDb = rng->maxSequence;
|
||||
dbProgressedThisCycle = true;
|
||||
}
|
||||
|
||||
while (lck->lastSeenMaxSeqInDb >= nextSequence_) {
|
||||
LOG(log_.trace()) << "Publishing from Monitor::doWork. nextSequence_ = " << nextSequence_
|
||||
<< ", lastSeenMaxSeqInDb_ = " << lck->lastSeenMaxSeqInDb;
|
||||
notificationChannel_(nextSequence_++);
|
||||
dbProgressedThisCycle = true;
|
||||
}
|
||||
} else {
|
||||
LOG(log_.trace()) << "DB range is not available or empty. lastSeenMaxSeqInDb_ = " << lck->lastSeenMaxSeqInDb
|
||||
<< ", nextSequence_ = " << nextSequence_;
|
||||
}
|
||||
|
||||
if (dbProgressedThisCycle) {
|
||||
lck->lastDbCheckTime = std::chrono::steady_clock::now();
|
||||
} else if (std::chrono::steady_clock::now() - lck->lastDbCheckTime > lck->dbStalledReportDelay) {
|
||||
LOG(log_.info()) << "No DB update detected for "
|
||||
<< std::chrono::duration_cast<std::chrono::seconds>(lck->dbStalledReportDelay).count()
|
||||
<< " seconds. Firing dbStalledChannel. Last seen max seq in DB: " << lck->lastSeenMaxSeqInDb
|
||||
<< ". Expecting next: " << nextSequence_;
|
||||
dbStalledChannel_();
|
||||
lck->lastDbCheckTime = std::chrono::steady_clock::now();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/async/AnyStrand.hpp"
|
||||
@@ -30,6 +31,7 @@
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -43,11 +45,20 @@ class Monitor : public MonitorInterface {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers_;
|
||||
|
||||
uint32_t nextSequence_;
|
||||
std::atomic_uint32_t nextSequence_;
|
||||
std::optional<util::async::AnyOperation<void>> repeatedTask_;
|
||||
std::optional<boost::signals2::scoped_connection> subscription_; // network validated ledgers subscription
|
||||
|
||||
SignalType notificationChannel_;
|
||||
NewSequenceSignalType notificationChannel_;
|
||||
DbStalledSignalType dbStalledChannel_;
|
||||
|
||||
struct UpdateData {
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay;
|
||||
std::chrono::steady_clock::time_point lastDbCheckTime;
|
||||
uint32_t lastSeenMaxSeqInDb = 0u;
|
||||
};
|
||||
|
||||
util::Mutex<UpdateData> updateData_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
@@ -56,12 +67,16 @@ public:
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence
|
||||
uint32_t startSequence,
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay
|
||||
);
|
||||
~Monitor() override;
|
||||
|
||||
void
|
||||
notifyLedgerLoaded(uint32_t seq) override;
|
||||
notifySequenceLoaded(uint32_t seq) override;
|
||||
|
||||
void
|
||||
notifyWriteConflict(uint32_t seq) override;
|
||||
|
||||
void
|
||||
run(std::chrono::steady_clock::duration repeatInterval) override;
|
||||
@@ -70,7 +85,10 @@ public:
|
||||
stop() override;
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) override;
|
||||
subscribeToNewSequence(NewSequenceSignalType::slot_type const& subscriber) override;
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
subscribeToDbStalled(DbStalledSignalType::slot_type const& subscriber) override;
|
||||
|
||||
private:
|
||||
void
|
||||
|
||||
53
src/etlng/impl/MonitorProvider.hpp
Normal file
53
src/etlng/impl/MonitorProvider.hpp
Normal file
@@ -0,0 +1,53 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "etlng/MonitorProviderInterface.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class MonitorProvider : public MonitorProviderInterface {
|
||||
public:
|
||||
std::unique_ptr<MonitorInterface>
|
||||
make(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence,
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay
|
||||
) override
|
||||
{
|
||||
return std::make_unique<Monitor>(
|
||||
std::move(ctx), std::move(backend), std::move(validatedLedgers), startSequence, dbStalledReportDelay
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
@@ -19,10 +19,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etl/impl/ForwardingSource.hpp"
|
||||
#include "etl/impl/SubscriptionSource.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/Source.hpp"
|
||||
#include "etlng/impl/ForwardingSource.hpp"
|
||||
#include "etlng/impl/GrpcSource.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
|
||||
@@ -53,7 +54,7 @@ namespace etlng::impl {
|
||||
template <
|
||||
typename GrpcSourceType = GrpcSource,
|
||||
typename SubscriptionSourceTypePtr = std::unique_ptr<etl::impl::SubscriptionSource>,
|
||||
typename ForwardingSourceType = etl::impl::ForwardingSource>
|
||||
typename ForwardingSourceType = etlng::impl::ForwardingSource>
|
||||
class SourceImpl : public SourceBase {
|
||||
std::string ip_;
|
||||
std::string wsPort_;
|
||||
@@ -107,6 +108,7 @@ public:
|
||||
stop(boost::asio::yield_context yield) final
|
||||
{
|
||||
subscriptionSource_->stop(yield);
|
||||
grpcSource_.stop(yield);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -202,7 +204,7 @@ public:
|
||||
* @param loader InitialLoadObserverInterface implementation
|
||||
* @return A std::pair of the data and a bool indicating whether the download was successful
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
InitialLedgerLoadResult
|
||||
loadInitialLedger(uint32_t sequence, std::uint32_t numMarkers, etlng::InitialLoadObserverInterface& loader) final
|
||||
{
|
||||
return grpcSource_.loadInitialLedger(sequence, numMarkers, loader);
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "etlng/SchedulerInterface.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "etlng/impl/TaskQueue.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
@@ -102,29 +103,49 @@ TaskManager::spawnExtractor(TaskQueue& queue)
|
||||
if (stopRequested)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// TODO: how do we signal to the loaders that it's time to shutdown? some special task?
|
||||
break; // TODO: handle server shutdown or other node took over ETL
|
||||
}
|
||||
} else {
|
||||
// TODO (https://github.com/XRPLF/clio/issues/1852)
|
||||
std::this_thread::sleep_for(kDELAY_BETWEEN_ATTEMPTS);
|
||||
}
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Extractor (one of) coroutine stopped";
|
||||
});
|
||||
}
|
||||
|
||||
util::async::AnyOperation<void>
|
||||
TaskManager::spawnLoader(TaskQueue& queue)
|
||||
{
|
||||
static constexpr auto kNANO_TO_SECOND = 1.0e9;
|
||||
|
||||
return ctx_.execute([this, &queue](auto stopRequested) {
|
||||
while (not stopRequested) {
|
||||
// TODO (https://github.com/XRPLF/clio/issues/66): does not tell the loader whether it's out of order or not
|
||||
if (auto data = queue.dequeue(); data.has_value()) {
|
||||
auto nanos = util::timed<std::chrono::nanoseconds>([this, data = *data] { loader_.get().load(data); });
|
||||
auto const seconds = nanos / kNANO_TO_SECOND;
|
||||
auto [expectedSuccess, nanos] =
|
||||
util::timed<std::chrono::nanoseconds>([&] { return loader_.get().load(*data); });
|
||||
|
||||
auto const shouldExitOnError = [&] {
|
||||
if (expectedSuccess.has_value())
|
||||
return false;
|
||||
|
||||
switch (expectedSuccess.error()) {
|
||||
case LoaderError::WriteConflict:
|
||||
LOG(log_.warn()) << "Immediately stopping loader on write conflict"
|
||||
<< "; latest ledger cache loaded for " << data->seq;
|
||||
monitor_.get().notifyWriteConflict(data->seq);
|
||||
return true;
|
||||
case LoaderError::AmendmentBlocked:
|
||||
LOG(log_.warn()) << "Immediately stopping loader on amendment block";
|
||||
return true;
|
||||
}
|
||||
|
||||
std::unreachable();
|
||||
}();
|
||||
|
||||
if (shouldExitOnError)
|
||||
break;
|
||||
|
||||
auto const seconds = nanos / util::kNANO_PER_SECOND;
|
||||
auto const txnCount = data->transactions.size();
|
||||
auto const objCount = data->objects.size();
|
||||
|
||||
@@ -133,9 +154,11 @@ TaskManager::spawnLoader(TaskQueue& queue)
|
||||
<< " seconds;"
|
||||
<< " tps[" << txnCount / seconds << "], ops[" << objCount / seconds << "]";
|
||||
|
||||
monitor_.get().notifyLedgerLoaded(data->seq);
|
||||
monitor_.get().notifySequenceLoaded(data->seq);
|
||||
}
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Loader coroutine stopped";
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user