mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-19 11:15:50 +00:00
Compare commits
41 Commits
2.6.0-rc2
...
7c8152d76f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7c8152d76f | ||
|
|
0425d34b55 | ||
|
|
8c8a7ff3b8 | ||
|
|
16493abd0d | ||
|
|
3dd72d94e1 | ||
|
|
5e914abf29 | ||
|
|
9603968808 | ||
|
|
0124c06a53 | ||
|
|
1bfdd0dd89 | ||
|
|
f41d574204 | ||
|
|
d0ec60381b | ||
|
|
0b19a42a96 | ||
|
|
030f4f1b22 | ||
|
|
2de49b4d33 | ||
|
|
3de2bf2910 | ||
|
|
7538efb01e | ||
|
|
685f611434 | ||
|
|
2528dee6b6 | ||
|
|
b2be4b51d1 | ||
|
|
b4e40558c9 | ||
|
|
b361e3a108 | ||
|
|
a4b47da57a | ||
|
|
2ed1a45ef1 | ||
|
|
dabaa5bf80 | ||
|
|
b4fb3e42b8 | ||
|
|
aa64bb7b6b | ||
|
|
dc5f8b9c23 | ||
|
|
7300529484 | ||
|
|
33802f475f | ||
|
|
213752862c | ||
|
|
a189eeb952 | ||
|
|
3c1811233a | ||
|
|
693ed2061c | ||
|
|
1e2f4b5ca2 | ||
|
|
1da8464d75 | ||
|
|
d48fb168c6 | ||
|
|
92595f95a0 | ||
|
|
fc9de87136 | ||
|
|
67f5ca445f | ||
|
|
897c255b8c | ||
|
|
aa9eea0d99 |
31
.github/actions/build-clio/action.yml
vendored
Normal file
31
.github/actions/build-clio/action.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
|
||||
inputs:
|
||||
targets:
|
||||
description: Space-separated build target names
|
||||
default: all
|
||||
nproc_subtract:
|
||||
description: The number of processors to subtract when calculating parallelism.
|
||||
required: true
|
||||
default: "0"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ inputs.nproc_subtract }}
|
||||
|
||||
- name: Build targets
|
||||
shell: bash
|
||||
env:
|
||||
CMAKE_TARGETS: ${{ inputs.targets }}
|
||||
run: |
|
||||
cd build
|
||||
cmake \
|
||||
--build . \
|
||||
--parallel "${{ steps.nproc.outputs.nproc }}" \
|
||||
--target ${CMAKE_TARGETS}
|
||||
@@ -34,14 +34,14 @@ runs:
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USER }}
|
||||
password: ${{ env.DOCKERHUB_PW }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ inputs.push_image == 'true' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
29
.github/actions/build_clio/action.yml
vendored
29
.github/actions/build_clio/action.yml
vendored
@@ -1,29 +0,0 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
|
||||
inputs:
|
||||
targets:
|
||||
description: Space-separated build target names
|
||||
default: all
|
||||
subtract_threads:
|
||||
description: An option for the action get_number_of_threads. See get_number_of_threads
|
||||
required: true
|
||||
default: "0"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
with:
|
||||
subtract_threads: ${{ inputs.subtract_threads }}
|
||||
|
||||
- name: Build targets
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
cmake \
|
||||
--build . \
|
||||
--parallel "${{ steps.number_of_threads.outputs.threads_number }}" \
|
||||
--target ${{ inputs.targets }}
|
||||
@@ -24,7 +24,7 @@ runs:
|
||||
-j8 --exclude-throw-branches
|
||||
|
||||
- name: Archive coverage report
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build/coverage_report.xml
|
||||
@@ -28,12 +28,17 @@ runs:
|
||||
- name: Create an issue
|
||||
id: create_issue
|
||||
shell: bash
|
||||
env:
|
||||
ISSUE_BODY: ${{ inputs.body }}
|
||||
ISSUE_ASSIGNEES: ${{ inputs.assignees }}
|
||||
ISSUE_LABELS: ${{ inputs.labels }}
|
||||
ISSUE_TITLE: ${{ inputs.title }}
|
||||
run: |
|
||||
echo -e '${{ inputs.body }}' > issue.md
|
||||
echo -e "${ISSUE_BODY}" > issue.md
|
||||
gh issue create \
|
||||
--assignee '${{ inputs.assignees }}' \
|
||||
--label '${{ inputs.labels }}' \
|
||||
--title '${{ inputs.title }}' \
|
||||
--assignee "${ISSUE_ASSIGNEES}" \
|
||||
--label "${ISSUE_LABELS}" \
|
||||
--title "${ISSUE_TITLE}" \
|
||||
--body-file ./issue.md \
|
||||
> create_issue.log
|
||||
created_issue="$(sed 's|.*/||' create_issue.log)"
|
||||
36
.github/actions/get_number_of_threads/action.yml
vendored
36
.github/actions/get_number_of_threads/action.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: Get number of threads
|
||||
description: Determines number of threads to use on macOS and Linux
|
||||
|
||||
inputs:
|
||||
subtract_threads:
|
||||
description: How many threads to subtract from the calculated number
|
||||
required: true
|
||||
default: "0"
|
||||
outputs:
|
||||
threads_number:
|
||||
description: Number of threads to use
|
||||
value: ${{ steps.number_of_threads_export.outputs.num }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Shift and export number of threads
|
||||
id: number_of_threads_export
|
||||
shell: bash
|
||||
run: |
|
||||
num_of_threads="${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}"
|
||||
shift_by="${{ inputs.subtract_threads }}"
|
||||
shifted="$((num_of_threads - shift_by))"
|
||||
echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT
|
||||
@@ -27,10 +27,10 @@ runs:
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
uses: ./.github/actions/git-common-ancestor
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
id: ccache_cache
|
||||
if: ${{ env.CCACHE_DISABLE != '1' }}
|
||||
with:
|
||||
@@ -28,11 +28,11 @@ runs:
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
uses: ./.github/actions/git-common-ancestor
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
|
||||
uses: actions/cache/save@v4
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
27
.github/dependabot.yml
vendored
27
.github/dependabot.yml
vendored
@@ -14,7 +14,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/build_clio/
|
||||
directory: .github/actions/build-clio/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -27,7 +27,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/build_docker_image/
|
||||
directory: .github/actions/build-docker-image/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -53,7 +53,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/code_coverage/
|
||||
directory: .github/actions/code-coverage/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -79,7 +79,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/create_issue/
|
||||
directory: .github/actions/create-issue/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -92,7 +92,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/get_number_of_threads/
|
||||
directory: .github/actions/git-common-ancestor/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -105,7 +105,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/git_common_ancestor/
|
||||
directory: .github/actions/restore-cache/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -118,20 +118,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/restore_cache/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
time: "04:00"
|
||||
timezone: Etc/GMT
|
||||
reviewers:
|
||||
- XRPLF/clio-dev-team
|
||||
commit-message:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/save_cache/
|
||||
directory: .github/actions/save-cache/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
|
||||
4
.github/scripts/conan/generate_matrix.py
vendored
4
.github/scripts/conan/generate_matrix.py
vendored
@@ -3,7 +3,9 @@ import itertools
|
||||
import json
|
||||
|
||||
LINUX_OS = ["heavy", "heavy-arm64"]
|
||||
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }']
|
||||
LINUX_CONTAINERS = [
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
]
|
||||
LINUX_COMPILERS = ["gcc", "clang"]
|
||||
|
||||
MACOS_OS = ["macos15"]
|
||||
|
||||
@@ -31,15 +31,16 @@ TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {su
|
||||
OUTPUT_DIR="./.sanitizer-report"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
export TSAN_OPTIONS="die_after_fork=0"
|
||||
export MallocNanoZone='0' # for MacOSX
|
||||
|
||||
for TEST in $TESTS; do
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}"
|
||||
export TSAN_OPTIONS="log_path=\"$OUTPUT_FILE\" die_after_fork=0"
|
||||
export ASAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export UBSAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export MallocNanoZone='0' # for MacOSX
|
||||
$TEST_BINARY --gtest_filter="$TEST" > /dev/null 2>&1
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}.log"
|
||||
$TEST_BINARY --gtest_filter="$TEST" > "$OUTPUT_FILE" 2>&1
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "'$TEST' failed a sanitizer check."
|
||||
else
|
||||
rm "$OUTPUT_FILE"
|
||||
fi
|
||||
done
|
||||
@@ -44,11 +44,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Download Clio binary from artifact
|
||||
if: ${{ inputs.artifact_name != null }}
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: ${{ inputs.artifact_name }}
|
||||
path: ./docker/clio/artifact/
|
||||
@@ -56,9 +56,12 @@ jobs:
|
||||
- name: Download Clio binary from url
|
||||
if: ${{ inputs.clio_server_binary_url != null }}
|
||||
shell: bash
|
||||
env:
|
||||
BINARY_URL: ${{ inputs.clio_server_binary_url }}
|
||||
BINARY_SHA256: ${{ inputs.binary_sha256 }}
|
||||
run: |
|
||||
wget "${{inputs.clio_server_binary_url}}" -P ./docker/clio/artifact/
|
||||
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${{inputs.binary_sha256}}" ]; then
|
||||
wget "${BINARY_URL}" -P ./docker/clio/artifact/
|
||||
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${BINARY_SHA256}" ]; then
|
||||
echo "Binary sha256 sum doesn't match"
|
||||
exit 1
|
||||
fi
|
||||
@@ -89,7 +92,7 @@ jobs:
|
||||
echo "GHCR_REPO=$(echo ghcr.io/${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_OUTPUT}
|
||||
|
||||
- name: Build Docker image
|
||||
uses: ./.github/actions/build_docker_image
|
||||
uses: ./.github/actions/build-docker-image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
30
.github/workflows/build.yml
vendored
30
.github/workflows/build.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/build.yml
|
||||
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
- .github/workflows/upload_coverage_report.yml
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
- .github/workflows/reusable-upload-coverage-report.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/build_docker_image/**"
|
||||
- "!.github/actions/create_issue/**"
|
||||
- "!.github/actions/build-docker-image/**"
|
||||
- "!.github/actions/create-issue/**"
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
build_type: [Release, Debug]
|
||||
container:
|
||||
[
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }',
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }',
|
||||
]
|
||||
static: [true]
|
||||
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
container: ""
|
||||
static: false
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -72,10 +72,10 @@ jobs:
|
||||
code_coverage:
|
||||
name: Run Code Coverage
|
||||
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
download_ccache: true
|
||||
@@ -91,10 +91,10 @@ jobs:
|
||||
package:
|
||||
name: Build packages
|
||||
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
download_ccache: true
|
||||
@@ -111,12 +111,12 @@ jobs:
|
||||
needs: build-and-test
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: clio_server_Linux_Release_gcc
|
||||
|
||||
|
||||
@@ -17,10 +17,10 @@ jobs:
|
||||
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -51,13 +51,13 @@ jobs:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
uses: ./.github/actions/build-clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_tests_check_libxrpl
|
||||
path: build/clio_tests
|
||||
@@ -67,10 +67,10 @@ jobs:
|
||||
needs: build
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: clio_tests_check_libxrpl
|
||||
|
||||
@@ -90,10 +90,10 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Create an issue
|
||||
uses: ./.github/actions/create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
@@ -10,8 +10,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # v1.4.2
|
||||
- uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # 1.4.2
|
||||
with:
|
||||
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
|
||||
add_label: false
|
||||
custom_labels: '{"build":"build", "feat":"enhancement", "fix":"bug", "docs":"documentation", "test":"testability", "ci":"ci", "style":"refactoring", "refactor":"refactoring", "perf":"performance", "chore":"tooling"}'
|
||||
|
||||
- name: Check if message starts with upper-case letter
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
run: |
|
||||
if [[ ! "${PR_TITLE}" =~ ^[a-z]+:\ [\[A-Z] ]]; then
|
||||
echo "Error: PR title must start with an upper-case letter."
|
||||
exit 1
|
||||
fi
|
||||
16
.github/workflows/clang-tidy.yml
vendored
16
.github/workflows/clang-tidy.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
uses: ./.github/actions/restore-cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
@@ -61,16 +61,16 @@ jobs:
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
|
||||
- name: Run clang-tidy
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
|
||||
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.nproc.outputs.nproc }}" -fix -quiet 1>output.txt
|
||||
|
||||
- name: Fix local includes and clang-format style
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
- name: Create an issue
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
|
||||
id: create_issue
|
||||
uses: ./.github/actions/create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
||||
10
.github/workflows/docs.yml
vendored
10
.github/workflows/docs.yml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
@@ -39,10 +39,10 @@ jobs:
|
||||
run: cmake --build . --target docs
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.0.0
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0
|
||||
with:
|
||||
path: build_docs/html
|
||||
name: docs-develop
|
||||
@@ -62,6 +62,6 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5
|
||||
with:
|
||||
artifact_name: docs-develop
|
||||
|
||||
32
.github/workflows/nightly.yml
vendored
32
.github/workflows/nightly.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/nightly.yml
|
||||
|
||||
- .github/workflows/release_impl.yml
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
- .github/workflows/build_clio_docker_image.yml
|
||||
- .github/workflows/reusable-release.yml
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
- .github/workflows/build-clio-docker-image.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/code_coverage/**"
|
||||
- "!.github/actions/code-coverage/**"
|
||||
- .github/scripts/prepare-release-artifacts.sh
|
||||
|
||||
concurrency:
|
||||
@@ -39,19 +39,19 @@ jobs:
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
- os: heavy
|
||||
conan_profile: gcc.ubsan
|
||||
build_type: Release
|
||||
static: false
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -73,13 +73,13 @@ jobs:
|
||||
include:
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
static: true
|
||||
- os: macos15
|
||||
conan_profile: apple-clang
|
||||
container: ""
|
||||
static: false
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
|
||||
nightly_release:
|
||||
needs: build-and-test
|
||||
uses: ./.github/workflows/release_impl.yml
|
||||
uses: ./.github/workflows/reusable-release.yml
|
||||
with:
|
||||
overwrite_release: true
|
||||
prerelease: true
|
||||
@@ -109,7 +109,7 @@ jobs:
|
||||
draft: false
|
||||
|
||||
build_and_publish_docker_image:
|
||||
uses: ./.github/workflows/build_clio_docker_image.yml
|
||||
uses: ./.github/workflows/build-clio-docker-image.yml
|
||||
needs: build-and-test
|
||||
secrets: inherit
|
||||
with:
|
||||
@@ -130,10 +130,10 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Create an issue
|
||||
uses: ./.github/actions/create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
||||
4
.github/workflows/pre-commit.yml
vendored
4
.github/workflows/pre-commit.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
|
||||
jobs:
|
||||
run-hooks:
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@afbcbdafbe0ce5439492fb87eda6441371086386
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
|
||||
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
@@ -29,9 +29,9 @@ jobs:
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -47,13 +47,13 @@ jobs:
|
||||
|
||||
release:
|
||||
needs: build-and-test
|
||||
uses: ./.github/workflows/release_impl.yml
|
||||
uses: ./.github/workflows/reusable-release.yml
|
||||
with:
|
||||
overwrite_release: false
|
||||
prerelease: ${{ contains(github.ref_name, '-') }}
|
||||
title: "${{ github.ref_name}}"
|
||||
title: "${{ github.ref_name }}"
|
||||
version: "${{ github.ref_name }}"
|
||||
header: >
|
||||
${{ contains(github.ref_name, '-') && '> **Note:** Please remember that this is a release candidate and it is not recommended for production use.' || '' }}
|
||||
generate_changelog: ${{ !contains(github.ref_name, '-') }}
|
||||
draft: true
|
||||
draft: ${{ !contains(github.ref_name, '-') }}
|
||||
|
||||
@@ -77,7 +77,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
with:
|
||||
runs_on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container }}
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
|
||||
test:
|
||||
needs: build
|
||||
uses: ./.github/workflows/test_impl.yml
|
||||
uses: ./.github/workflows/reusable-test.yml
|
||||
with:
|
||||
runs_on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container }}
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# We need to fetch tags to have correct version in the release
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
|
||||
- name: Restore cache
|
||||
if: ${{ inputs.download_ccache }}
|
||||
uses: ./.github/actions/restore_cache
|
||||
uses: ./.github/actions/restore-cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
@@ -131,7 +131,7 @@ jobs:
|
||||
package: ${{ inputs.package }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
uses: ./.github/actions/build-clio
|
||||
with:
|
||||
targets: ${{ inputs.targets }}
|
||||
|
||||
@@ -145,7 +145,7 @@ jobs:
|
||||
|
||||
- name: Upload build time analyze report
|
||||
if: ${{ inputs.analyze_build_time }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: build_time_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build_time_report.txt
|
||||
@@ -170,35 +170,35 @@ jobs:
|
||||
|
||||
- name: Upload clio_server
|
||||
if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Upload clio_integration_tests
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_integration_tests
|
||||
|
||||
- name: Upload Clio Linux package
|
||||
if: ${{ inputs.package }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/*.deb
|
||||
|
||||
- name: Save cache
|
||||
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save_cache
|
||||
uses: ./.github/actions/save-cache
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
@@ -216,17 +216,19 @@ jobs:
|
||||
# It's all available in the build job, but not in the test job
|
||||
- name: Run code coverage
|
||||
if: ${{ inputs.code_coverage }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
uses: ./.github/actions/code-coverage
|
||||
|
||||
- name: Verify expected version
|
||||
if: ${{ inputs.expected_version != '' }}
|
||||
shell: bash
|
||||
env:
|
||||
INPUT_EXPECTED_VERSION: ${{ inputs.expected_version }}
|
||||
run: |
|
||||
set -e
|
||||
EXPECTED_VERSION="clio-${{ inputs.expected_version }}"
|
||||
EXPECTED_VERSION="clio-${INPUT_EXPECTED_VERSION}"
|
||||
actual_version=$(./build/clio_server --version)
|
||||
if [[ "$actual_version" != "$EXPECTED_VERSION" ]]; then
|
||||
echo "Expected version '$EXPECTED_VERSION', but got '$actual_version'"
|
||||
echo "Expected version '${EXPECTED_VERSION}', but got '${actual_version}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -238,6 +240,6 @@ jobs:
|
||||
if: ${{ inputs.code_coverage }}
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
uses: ./.github/workflows/reusable-upload-coverage-report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
release:
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -60,17 +60,19 @@ jobs:
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
path: release_artifacts
|
||||
pattern: clio_server_*
|
||||
|
||||
- name: Create release notes
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_HEADER: ${{ inputs.header }}
|
||||
run: |
|
||||
echo "# Release notes" > "${RUNNER_TEMP}/release_notes.md"
|
||||
echo "" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
printf '%s\n' "${{ inputs.header }}" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
printf '%s\n' "${RELEASE_HEADER}" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
|
||||
- name: Generate changelog
|
||||
shell: bash
|
||||
@@ -87,7 +89,7 @@ jobs:
|
||||
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
|
||||
|
||||
- name: Upload release notes
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: release_notes_${{ inputs.version }}
|
||||
path: "${RUNNER_TEMP}/release_notes.md"
|
||||
@@ -95,18 +97,25 @@ jobs:
|
||||
- name: Remove current release and tag
|
||||
if: ${{ github.event_name != 'pull_request' && inputs.overwrite_release }}
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_VERSION: ${{ inputs.version }}
|
||||
run: |
|
||||
gh release delete ${{ inputs.version }} --yes || true
|
||||
git push origin :${{ inputs.version }} || true
|
||||
gh release delete "${RELEASE_VERSION}" --yes || true
|
||||
git push origin :"${RELEASE_VERSION}" || true
|
||||
|
||||
- name: Publish release
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_VERSION: ${{ inputs.version }}
|
||||
PRERELEASE_OPTION: ${{ inputs.prerelease && '--prerelease' || '' }}
|
||||
RELEASE_TITLE: ${{ inputs.title }}
|
||||
DRAFT_OPTION: ${{ inputs.draft && '--draft' || '' }}
|
||||
run: |
|
||||
gh release create "${{ inputs.version }}" \
|
||||
${{ inputs.prerelease && '--prerelease' || '' }} \
|
||||
--title "${{ inputs.title }}" \
|
||||
gh release create "${RELEASE_VERSION}" \
|
||||
${PRERELEASE_OPTION} \
|
||||
--title "${RELEASE_TITLE}" \
|
||||
--target "${GITHUB_SHA}" \
|
||||
${{ inputs.draft && '--draft' || '' }} \
|
||||
${DRAFT_OPTION} \
|
||||
--notes-file "${RUNNER_TEMP}/release_notes.md" \
|
||||
./release_artifacts/clio_server*
|
||||
@@ -43,18 +43,18 @@ jobs:
|
||||
|
||||
env:
|
||||
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || inputs.conan_profile == 'clang.asan' || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
|
||||
|
||||
steps:
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
|
||||
- name: Run clio_tests (sanitizer errors ignored)
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
|
||||
run: ./.github/scripts/execute-tests-under-sanitizer ./clio_tests
|
||||
run: ./.github/scripts/execute-tests-under-sanitizer.sh ./clio_tests
|
||||
|
||||
- name: Check for sanitizer report
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
|
||||
- name: Upload sanitizer report
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: .sanitizer-report/*
|
||||
@@ -91,7 +91,7 @@ jobs:
|
||||
|
||||
- name: Create an issue
|
||||
if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: ./.github/actions/create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
@@ -144,7 +144,7 @@ jobs:
|
||||
sleep 5
|
||||
done
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
name: Upload report
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
@@ -13,12 +12,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download report artifact
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build
|
||||
16
.github/workflows/sanitizers.yml
vendored
16
.github/workflows/sanitizers.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/sanitizers.yml
|
||||
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/build_docker_image/**"
|
||||
- "!.github/actions/create_issue/**"
|
||||
- .github/scripts/execute-tests-under-sanitizer
|
||||
- "!.github/actions/build-docker-image/**"
|
||||
- "!.github/actions/create-issue/**"
|
||||
- .github/scripts/execute-tests-under-sanitizer.sh
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
@@ -41,10 +41,10 @@ jobs:
|
||||
sanitizer_ext: [.asan, .tsan, .ubsan]
|
||||
build_type: [Release, Debug]
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
download_ccache: false
|
||||
upload_ccache: false
|
||||
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
|
||||
@@ -3,23 +3,23 @@ name: Update CI docker image
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
- .github/workflows/update-docker-ci.yml
|
||||
|
||||
- ".github/actions/build_docker_image/**"
|
||||
- ".github/actions/build-docker-image/**"
|
||||
|
||||
- "docker/ci/**"
|
||||
- "docker/compilers/**"
|
||||
- "docker/tools/**"
|
||||
- "docker/**"
|
||||
- "!docker/clio/**"
|
||||
- "!docker/develop/**"
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
- .github/workflows/update-docker-ci.yml
|
||||
|
||||
- ".github/actions/build_docker_image/**"
|
||||
- ".github/actions/build-docker-image/**"
|
||||
|
||||
- "docker/ci/**"
|
||||
- "docker/compilers/**"
|
||||
- "docker/tools/**"
|
||||
- "docker/**"
|
||||
- "!docker/clio/**"
|
||||
- "!docker/develop/**"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -90,15 +90,15 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -128,7 +128,7 @@ jobs:
|
||||
needs: [repo, gcc-amd64, gcc-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -137,11 +137,11 @@ jobs:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -149,7 +149,7 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PW }}
|
||||
@@ -179,7 +179,7 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -187,7 +187,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/compilers/clang/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -215,7 +215,7 @@ jobs:
|
||||
needs: [repo, gcc-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -223,7 +223,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -246,15 +246,15 @@ jobs:
|
||||
needs: [repo, gcc-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -277,7 +277,7 @@ jobs:
|
||||
needs: [repo, tools-amd64, tools-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -286,11 +286,11 @@ jobs:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -306,14 +306,36 @@ jobs:
|
||||
$image:arm64-latest \
|
||||
$image:amd64-latest
|
||||
|
||||
pre-commit:
|
||||
name: Build and push pre-commit docker image
|
||||
runs-on: heavy
|
||||
needs: [repo, tools-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-pre-commit
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/pre-commit
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_args: |
|
||||
GHCR_REPO=${{ needs.repo.outputs.GHCR_REPO }}
|
||||
|
||||
ci:
|
||||
name: Build and push CI docker image
|
||||
runs-on: heavy
|
||||
needs: [repo, gcc-merge, clang, tools-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
@@ -18,7 +18,7 @@ on:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
- .github/workflows/upload-conan-deps.yml
|
||||
|
||||
- .github/actions/conan/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
@@ -28,7 +28,7 @@ on:
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
- .github/workflows/upload-conan-deps.yml
|
||||
|
||||
- .github/actions/conan/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Calculate conan matrix
|
||||
id: set-matrix
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
@@ -99,4 +99,6 @@ jobs:
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
|
||||
run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
env:
|
||||
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
run: conan upload "*" -r=xrplf --confirm ${FORCE_OPTION}
|
||||
@@ -43,7 +43,7 @@ repos:
|
||||
# hadolint-docker is a special hook that runs hadolint in a Docker container
|
||||
# Docker is not installed in the environment where pre-commit is run
|
||||
stages: [manual]
|
||||
entry: hadolint/hadolint:v2.14 hadolint
|
||||
entry: hadolint/hadolint:v2.14.0 hadolint
|
||||
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: 63c8f8312b7559622c0d82815639671ae42132ac # frozen: v2.4.1
|
||||
|
||||
@@ -34,7 +34,6 @@ Below are some useful docs to learn more about Clio.
|
||||
|
||||
- [How to configure Clio and rippled](./docs/configure-clio.md)
|
||||
- [How to run Clio](./docs/run-clio.md)
|
||||
- [Logging](./docs/logging.md)
|
||||
- [Troubleshooting guide](./docs/trouble_shooting.md)
|
||||
|
||||
**General reference material:**
|
||||
|
||||
@@ -55,4 +55,4 @@
|
||||
]
|
||||
},
|
||||
"config_requires": []
|
||||
}
|
||||
}
|
||||
@@ -43,26 +43,20 @@ RUN apt-get update \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python tools
|
||||
ARG PYTHON_VERSION=3.13
|
||||
|
||||
RUN add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& apt-get update \
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-venv \
|
||||
python3 \
|
||||
python3-pip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}
|
||||
|
||||
# Create a virtual environment for python tools
|
||||
RUN python${PYTHON_VERSION} -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install -q --no-cache-dir \
|
||||
# TODO: Remove this once we switch to newer Ubuntu base image
|
||||
# lxml 6.0.0 is not compatible with our image
|
||||
'lxml<6.0.0' \
|
||||
cmake \
|
||||
conan==2.20.1 \
|
||||
gcovr \
|
||||
pre-commit
|
||||
gcovr
|
||||
|
||||
# Install LLVM tools
|
||||
ARG LLVM_TOOLS_VERSION=20
|
||||
|
||||
@@ -9,7 +9,7 @@ The image is based on Ubuntu 20.04 and contains:
|
||||
- Clang 19
|
||||
- ClangBuildAnalyzer 1.6.0
|
||||
- Conan 2.20.1
|
||||
- Doxygen 1.12
|
||||
- Doxygen 1.14
|
||||
- GCC 15.2.0
|
||||
- GDB 16.3
|
||||
- gh 2.74
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
clio_develop:
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
volumes:
|
||||
- clio_develop_conan_data:/root/.conan2/p
|
||||
- clio_develop_ccache:/root/.ccache
|
||||
|
||||
38
docker/pre-commit/Dockerfile
Normal file
38
docker/pre-commit/Dockerfile
Normal file
@@ -0,0 +1,38 @@
|
||||
ARG GHCR_REPO=invalid
|
||||
FROM ${GHCR_REPO}/clio-tools:latest AS clio-tools
|
||||
|
||||
# We're using Ubuntu 24.04 to have a more recent version of Python
|
||||
FROM ubuntu:24.04
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# hadolint ignore=DL3002
|
||||
USER root
|
||||
WORKDIR /root
|
||||
|
||||
# Install common tools and dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
curl \
|
||||
git \
|
||||
libatomic1 \
|
||||
software-properties-common \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python tools
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
python3 \
|
||||
python3-pip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install -q --no-cache-dir --break-system-packages \
|
||||
pre-commit
|
||||
|
||||
COPY --from=clio-tools \
|
||||
/usr/local/bin/doxygen \
|
||||
/usr/local/bin/
|
||||
@@ -51,7 +51,7 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG DOXYGEN_VERSION=1.12.0
|
||||
ARG DOXYGEN_VERSION=1.14.0
|
||||
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& cd "doxygen-${DOXYGEN_VERSION}" \
|
||||
|
||||
@@ -15,6 +15,7 @@ EXTRACT_ANON_NSPACES = NO
|
||||
SORT_MEMBERS_CTORS_1ST = YES
|
||||
|
||||
INPUT = ${SOURCE}/src
|
||||
USE_MDFILE_AS_MAINPAGE = ${SOURCE}/src/README.md
|
||||
EXCLUDE_SYMBOLS = ${EXCLUDES}
|
||||
RECURSIVE = YES
|
||||
HAVE_DOT = ${USE_DOT}
|
||||
|
||||
@@ -177,7 +177,7 @@ There are several CMake options you can use to customize the build:
|
||||
|
||||
### Generating API docs for Clio
|
||||
|
||||
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.12.0 on your system.
|
||||
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.14.0 on your system.
|
||||
|
||||
To generate the API docs, please use CMake option `-Ddocs=ON` as described above and build the `docs` target.
|
||||
|
||||
@@ -191,7 +191,7 @@ Open the `index.html` file in your browser to see the documentation pages.
|
||||
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
|
||||
|
||||
```sh
|
||||
docker run -it ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
docker run -it ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
git clone https://github.com/XRPLF/clio
|
||||
cd clio
|
||||
```
|
||||
|
||||
@@ -951,7 +951,7 @@ span.arrowhead {
|
||||
border-color: var(--primary-color);
|
||||
}
|
||||
|
||||
#nav-tree ul li:first-child > div > a {
|
||||
#nav-tree-contents > ul > li:first-child > div > a {
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ It's possible to configure `minimum`, `maximum` and `default` version like so:
|
||||
|
||||
All of the above are optional.
|
||||
|
||||
Clio will fallback to hardcoded defaults when these values are not specified in the config file, or if the configured values are outside of the minimum and maximum supported versions hardcoded in [src/rpc/common/APIVersion.h](../src/rpc/common/APIVersion.hpp).
|
||||
Clio will fallback to hardcoded defaults when these values are not specified in the config file, or if the configured values are outside of the minimum and maximum supported versions hardcoded in [src/rpc/common/APIVersion.hpp](../src/rpc/common/APIVersion.hpp).
|
||||
|
||||
> [!TIP]
|
||||
> See the [example-config.json](../docs/examples/config/example-config.json) for more details.
|
||||
|
||||
@@ -36,19 +36,19 @@ EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check version of doxygen is at least 1.12
|
||||
# Check version of doxygen is at least 1.14
|
||||
version=$($DOXYGEN --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "1.12.0" > "$version" ]]; then
|
||||
if [[ "1.14.0" > "$version" ]]; then
|
||||
# No hard error if doxygen version is not the one we want - let CI deal with it
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 1.12 of `which doxygen` is required.
|
||||
Your version is $version. Please upgrade it for next time.
|
||||
A minimum of version 1.14 of `which doxygen` is required.
|
||||
Your version is $version. Please upgrade it.
|
||||
|
||||
Your changes may fail to pass CI once pushed.
|
||||
Your changes may fail CI checks.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
|
||||
20
src/README.md
Normal file
20
src/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Clio API server
|
||||
|
||||
## Introduction
|
||||
|
||||
Clio is an XRP Ledger API server optimized for RPC calls over WebSocket or JSON-RPC.
|
||||
|
||||
It stores validated historical ledger and transaction data in a more space efficient format, and uses up to 4 times
|
||||
less space than [rippled](https://github.com/XRPLF/rippled).
|
||||
|
||||
Clio can be configured to store data in [Apache Cassandra](https://cassandra.apache.org/_/index.html) or
|
||||
[ScyllaDB](https://www.scylladb.com/), enabling scalable read throughput. Multiple Clio nodes can share
|
||||
access to the same dataset, which allows for a highly available cluster of Clio nodes without the need for redundant
|
||||
data storage or computation.
|
||||
|
||||
## Develop
|
||||
|
||||
As you prepare to develop code for Clio, please be sure you are aware of our current
|
||||
[Contribution guidelines](https://github.com/XRPLF/clio/blob/develop/CONTRIBUTING.md).
|
||||
|
||||
Read about @ref "rpc" carefully to know more about writing your own handlers for Clio.
|
||||
@@ -46,6 +46,7 @@ namespace data {
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
|
||||
{
|
||||
using namespace cassandra::impl;
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
@@ -56,7 +57,7 @@ makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheI
|
||||
|
||||
if (boost::iequals(type, "cassandra")) {
|
||||
auto const cfg = config.getObject("database." + type);
|
||||
if (cfg.getValueView("provider").asString() == toString(cassandra::impl::Provider::Keyspace)) {
|
||||
if (providerFromString(cfg.getValueView("provider").asString()) == Provider::Keyspace) {
|
||||
backend = std::make_shared<data::cassandra::KeyspaceBackend>(
|
||||
data::cassandra::SettingsProvider{cfg}, cache, readOnly
|
||||
);
|
||||
|
||||
@@ -189,10 +189,11 @@ public:
|
||||
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
|
||||
|
||||
for (auto i = 0u; i < nftIDs.size(); i++) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>();
|
||||
maybeRow.has_value()) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
|
||||
@@ -57,9 +57,9 @@ namespace data::cassandra {
|
||||
/**
|
||||
* @brief Implements @ref CassandraBackendFamily for Keyspace
|
||||
*
|
||||
* @tparam SettingsProviderType The settings provider type to use
|
||||
* @tparam ExecutionStrategyType The execution strategy type to use
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type to use
|
||||
* @tparam SettingsProviderType The settings provider type
|
||||
* @tparam ExecutionStrategyType The execution strategy type
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type
|
||||
*/
|
||||
template <
|
||||
SomeSettingsProvider SettingsProviderType,
|
||||
@@ -101,9 +101,9 @@ public:
|
||||
// !range_.has_value() means the table 'ledger_range' is not populated;
|
||||
// This would be the first write to the table.
|
||||
// In this case, insert both min_sequence/max_sequence range into the table.
|
||||
if (not(range_.has_value())) {
|
||||
executor_.writeSync(schema_->insertLedgerRange, false, ledgerSequence_);
|
||||
executor_.writeSync(schema_->insertLedgerRange, true, ledgerSequence_);
|
||||
if (not range_.has_value()) {
|
||||
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/false, ledgerSequence_);
|
||||
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/true, ledgerSequence_);
|
||||
}
|
||||
|
||||
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
|
||||
@@ -130,30 +130,30 @@ public:
|
||||
// Keyspace and ScyllaDB uses the same logic for taxon-filtered queries
|
||||
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
|
||||
} else {
|
||||
// --- Amazon Keyspaces Workflow for non-taxon queries ---
|
||||
// Amazon Keyspaces Workflow for non-taxon queries
|
||||
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
|
||||
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
|
||||
|
||||
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
Statement const firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
firstQuery.bindAt(1, startTaxon);
|
||||
firstQuery.bindAt(2, startTokenID);
|
||||
firstQuery.bindAt(3, Limit{limit});
|
||||
|
||||
auto const firstRes = executor_.read(yield, firstQuery);
|
||||
if (firstRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
|
||||
if (firstRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
|
||||
if (nftIDs.size() < limit) {
|
||||
auto const remainingLimit = limit - nftIDs.size();
|
||||
Statement secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
|
||||
Statement const secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
|
||||
secondQuery.bindAt(1, startTaxon);
|
||||
secondQuery.bindAt(2, Limit{remainingLimit});
|
||||
|
||||
auto const secondRes = executor_.read(yield, secondQuery);
|
||||
if (secondRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
|
||||
if (secondRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
}
|
||||
@@ -163,7 +163,7 @@ public:
|
||||
|
||||
/**
|
||||
* @brief (Unsupported in Keyspaces) Fetches account root object indexes by page.
|
||||
* * @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
|
||||
* @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
|
||||
* This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces does not support, and there is
|
||||
* no efficient alternative. This is acceptable as the cache is primarily loaded via diffs. Calling this
|
||||
* function will throw an exception.
|
||||
@@ -197,14 +197,14 @@ private:
|
||||
) const
|
||||
{
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
Statement statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
Statement const statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
statement.bindAt(1, taxon);
|
||||
statement.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
|
||||
statement.bindAt(3, Limit{limit});
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
if (res && res.value().hasRows()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(res.value()))
|
||||
if (res.has_value() && res->hasRows()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*res))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
return nftIDs;
|
||||
@@ -229,8 +229,8 @@ private:
|
||||
firstQuery.bindAt(3, Limit{limit});
|
||||
|
||||
auto const firstRes = executor_.read(yield, firstQuery);
|
||||
if (firstRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(firstRes.value()))
|
||||
if (firstRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
|
||||
@@ -241,8 +241,8 @@ private:
|
||||
secondQuery.bindAt(2, Limit{remainingLimit});
|
||||
|
||||
auto const secondRes = executor_.read(yield, secondQuery);
|
||||
if (secondRes) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(secondRes.value()))
|
||||
if (secondRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
}
|
||||
@@ -291,10 +291,11 @@ private:
|
||||
|
||||
// Combine the results into final NFT objects.
|
||||
for (auto i = 0u; i < nftIDs.size(); ++i) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>();
|
||||
maybeRow.has_value()) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
# Backend
|
||||
# Backend
|
||||
|
||||
@page "backend" Backend
|
||||
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. Currently, Cassandra and ScyllaDB are the only supported databases that are production-ready.
|
||||
|
||||
To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file.
|
||||
To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.hpp](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.hpp](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file.
|
||||
|
||||
## Data Model
|
||||
|
||||
|
||||
@@ -70,10 +70,10 @@ namespace data::cassandra {
|
||||
*
|
||||
* Note: This is a safer and more correct rewrite of the original implementation of the backend.
|
||||
*
|
||||
* @tparam SettingsProviderType The settings provider type to use
|
||||
* @tparam ExecutionStrategyType The execution strategy type to use
|
||||
* @tparam SchemaType The Schema type to use
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type to use
|
||||
* @tparam SettingsProviderType The settings provider type
|
||||
* @tparam ExecutionStrategyType The execution strategy type
|
||||
* @tparam SchemaType The Schema type
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type
|
||||
*/
|
||||
template <
|
||||
SomeSettingsProvider SettingsProviderType,
|
||||
@@ -100,8 +100,8 @@ public:
|
||||
/**
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
*
|
||||
* @param settingsProvider The settings provider to use
|
||||
* @param cache The ledger cache to use
|
||||
* @param settingsProvider The settings provider
|
||||
* @param cache The ledger cache
|
||||
* @param readOnly Whether the database should be in readonly mode
|
||||
*/
|
||||
CassandraBackendFamily(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
|
||||
@@ -111,18 +111,18 @@ public:
|
||||
, handle_{settingsProvider_.getSettings()}
|
||||
, executor_{settingsProvider_.getSettings(), handle_}
|
||||
{
|
||||
if (auto const res = handle_.connect(); not res)
|
||||
if (auto const res = handle_.connect(); not res.has_value())
|
||||
throw std::runtime_error("Could not connect to database: " + res.error());
|
||||
|
||||
if (not readOnly) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res.has_value()) {
|
||||
// on datastax, creation of keyspaces can be configured to only be done thru the admin
|
||||
// interface. this does not mean that the keyspace does not already exist tho.
|
||||
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
|
||||
throw std::runtime_error("Could not create keyspace: " + res.error());
|
||||
}
|
||||
|
||||
if (auto const res = handle_.executeEach(schema_.createSchema); not res)
|
||||
if (auto const res = handle_.executeEach(schema_.createSchema); not res.has_value())
|
||||
throw std::runtime_error("Could not create schema: " + res.error());
|
||||
}
|
||||
|
||||
@@ -146,9 +146,6 @@ public:
|
||||
*/
|
||||
CassandraBackendFamily(CassandraBackendFamily&&) = delete;
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchAccountTransactions
|
||||
*/
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
@@ -217,18 +214,12 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::waitForWritesToFinish
|
||||
*/
|
||||
void
|
||||
waitForWritesToFinish() override
|
||||
{
|
||||
executor_.sync();
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeLedger
|
||||
*/
|
||||
void
|
||||
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
|
||||
{
|
||||
@@ -239,16 +230,13 @@ public:
|
||||
ledgerSequence_ = ledgerHeader.seq;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchLatestLedgerSequence
|
||||
*/
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return maybeValue;
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res.has_value()) {
|
||||
if (auto const& rows = *res; rows) {
|
||||
if (auto const maybeRow = rows.template get<uint32_t>(); maybeRow.has_value())
|
||||
return maybeRow;
|
||||
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no rows";
|
||||
return std::nullopt;
|
||||
@@ -262,9 +250,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchLedgerBySequence
|
||||
*/
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -292,9 +277,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchLedgerByHash
|
||||
*/
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -315,9 +297,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::hardFetchLedgerRange(boost::asio::yield_context) const
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -356,9 +335,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchAllTransactionsInLedger
|
||||
*/
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -366,9 +342,6 @@ public:
|
||||
return fetchTransactions(hashes, yield);
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchAllTransactionHashesInLedger
|
||||
*/
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
@@ -402,9 +375,6 @@ public:
|
||||
return hashes;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchNFT
|
||||
*/
|
||||
std::optional<NFT>
|
||||
fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
@@ -444,9 +414,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchNFTTransactions
|
||||
*/
|
||||
TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
@@ -518,9 +485,6 @@ public:
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchMPTHolders
|
||||
*/
|
||||
MPTHoldersAndCursor
|
||||
fetchMPTHolders(
|
||||
ripple::uint192 const& mptID,
|
||||
@@ -560,9 +524,6 @@ public:
|
||||
return {mptObjects, {}};
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doFetchLedgerObject
|
||||
*/
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
@@ -585,9 +546,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doFetchLedgerObjectSeq
|
||||
*/
|
||||
std::optional<std::uint32_t>
|
||||
doFetchLedgerObjectSeq(
|
||||
ripple::uint256 const& key,
|
||||
@@ -609,9 +567,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchTransaction
|
||||
*/
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -629,9 +584,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doFetchSuccessorKey
|
||||
*/
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
@@ -654,9 +606,6 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchTransactions
|
||||
*/
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -698,9 +647,6 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doFetchLedgerObjects
|
||||
*/
|
||||
std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
@@ -741,9 +687,6 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchLedgerDiff
|
||||
*/
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -789,9 +732,6 @@ public:
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchMigratorStatus
|
||||
*/
|
||||
std::optional<std::string>
|
||||
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -812,9 +752,6 @@ public:
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::fetchClioNodesData
|
||||
*/
|
||||
std::expected<std::vector<std::pair<boost::uuids::uuid, std::string>>, std::string>
|
||||
fetchClioNodesData(boost::asio::yield_context yield) const override
|
||||
{
|
||||
@@ -831,9 +768,6 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::doWriteLedgerObject
|
||||
*/
|
||||
void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
|
||||
{
|
||||
@@ -845,9 +779,6 @@ public:
|
||||
executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeSuccessor
|
||||
*/
|
||||
void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
|
||||
{
|
||||
@@ -859,9 +790,6 @@ public:
|
||||
executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeAccountTransactions
|
||||
*/
|
||||
void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) override
|
||||
{
|
||||
@@ -881,9 +809,6 @@ public:
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeAccountTransaction
|
||||
*/
|
||||
void
|
||||
writeAccountTransaction(AccountTransactionsData record) override
|
||||
{
|
||||
@@ -901,9 +826,6 @@ public:
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeNFTTransactions
|
||||
*/
|
||||
void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
|
||||
{
|
||||
@@ -919,9 +841,6 @@ public:
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeTransaction
|
||||
*/
|
||||
void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
@@ -939,9 +858,6 @@ public:
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeNFTs
|
||||
*/
|
||||
void
|
||||
writeNFTs(std::vector<NFTsData> const& data) override
|
||||
{
|
||||
@@ -980,9 +896,6 @@ public:
|
||||
executor_.writeEach(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeNFTs
|
||||
*/
|
||||
void
|
||||
writeMPTHolders(std::vector<MPTHolderData> const& data) override
|
||||
{
|
||||
@@ -994,9 +907,6 @@ public:
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::startWrites
|
||||
*/
|
||||
void
|
||||
startWrites() const override
|
||||
{
|
||||
@@ -1004,9 +914,6 @@ public:
|
||||
// probably was used in PG to start a transaction or smth.
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeMigratorStatus
|
||||
*/
|
||||
void
|
||||
writeMigratorStatus(std::string const& migratorName, std::string const& status) override
|
||||
{
|
||||
@@ -1015,27 +922,18 @@ public:
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::writeNodeMessage
|
||||
*/
|
||||
void
|
||||
writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
|
||||
{
|
||||
executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::isTooBusy
|
||||
*/
|
||||
bool
|
||||
isTooBusy() const override
|
||||
{
|
||||
return executor_.isTooBusy();
|
||||
}
|
||||
|
||||
/**
|
||||
* @copydoc BackendInterface::stats
|
||||
*/
|
||||
boost::json::object
|
||||
stats() const override
|
||||
{
|
||||
|
||||
@@ -97,7 +97,7 @@ SettingsProvider::parseSettings() const
|
||||
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
|
||||
settings.provider = config_.get<std::string>("provider");
|
||||
settings.provider = impl::providerFromString(config_.get<std::string>("provider"));
|
||||
|
||||
if (config_.getValueView("connect_timeout").hasValue()) {
|
||||
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");
|
||||
|
||||
@@ -61,7 +61,7 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
// TODO: AWS keyspace reads should be local_one to save cost
|
||||
if (settings.provider == toString(cassandra::impl::Provider::Keyspace)) {
|
||||
if (settings.provider == cassandra::impl::Provider::Keyspace) {
|
||||
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting keyspace consistency: {}", cass_error_desc(rc)));
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
@@ -31,29 +32,22 @@
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
namespace {
|
||||
|
||||
enum class Provider { Cassandra, Keyspace };
|
||||
|
||||
inline std::string
|
||||
toString(Provider provider)
|
||||
inline Provider
|
||||
providerFromString(std::string const& provider)
|
||||
{
|
||||
switch (provider) {
|
||||
case Provider::Cassandra:
|
||||
return "cassandra";
|
||||
case Provider::Keyspace:
|
||||
return "aws_keyspace";
|
||||
}
|
||||
std::unreachable();
|
||||
ASSERT(
|
||||
provider == "cassandra" || provider == "aws_keyspace",
|
||||
"Provider type must be one of 'cassandra' or 'aws_keyspace'"
|
||||
);
|
||||
return provider == "cassandra" ? Provider::Cassandra : Provider::Keyspace;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// TODO: move Settings to public interface, not impl
|
||||
|
||||
/**
|
||||
@@ -109,7 +103,7 @@ struct Settings {
|
||||
std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE;
|
||||
|
||||
/** @brief Provider to know if we are using scylladb or keyspace */
|
||||
std::string provider = toString(kDEFAULT_PROVIDER);
|
||||
Provider provider = kDEFAULT_PROVIDER;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# ETL subsystem
|
||||
|
||||
@page "etl" ETL subsystem
|
||||
|
||||
A single Clio node has one or more ETL sources specified in the config file. Clio subscribes to the `ledgers` stream of each of the ETL sources. The stream sends a message whenever a new ledger is validated.
|
||||
|
||||
Upon receiving a message on the stream, Clio fetches the data associated with the newly validated ledger from one of the ETL sources. The fetch is performed via a gRPC request called `GetLedger`. This request returns the ledger header, transactions and metadata blobs, and every ledger object added/modified/deleted as part of this ledger. The ETL subsystem then writes all of this data to the databases, and moves on to the next ledger.
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <grpc/grpc.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/security/credentials.h>
|
||||
#include <grpcpp/support/channel_arguments.h>
|
||||
@@ -34,6 +35,7 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
@@ -52,17 +54,25 @@ GrpcSource::GrpcSource(std::string const& ip, std::string const& grpcPort, std::
|
||||
try {
|
||||
boost::asio::io_context ctx;
|
||||
boost::asio::ip::tcp::resolver resolver{ctx};
|
||||
|
||||
auto const resolverResult = resolver.resolve(ip, grpcPort);
|
||||
if (resolverResult.empty()) {
|
||||
if (resolverResult.empty())
|
||||
throw std::runtime_error("Failed to resolve " + ip + ":" + grpcPort);
|
||||
}
|
||||
|
||||
std::stringstream ss;
|
||||
ss << resolverResult.begin()->endpoint();
|
||||
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKEEPALIVE_PING_INTERVAL_MS);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKEEPALIVE_TIMEOUT_MS);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, static_cast<int>(kKEEPALIVE_PERMIT_WITHOUT_CALLS));
|
||||
chArgs.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, kMAX_PINGS_WITHOUT_DATA);
|
||||
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(ss.str(), grpc::InsecureChannelCredentials(), chArgs)
|
||||
);
|
||||
|
||||
LOG(log_.debug()) << "Made stub for remote.";
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.warn()) << "Exception while creating stub: " << e.what() << ".";
|
||||
@@ -76,10 +86,11 @@ GrpcSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNeighb
|
||||
if (!stub_)
|
||||
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
|
||||
|
||||
// Ledger header with txns and metadata
|
||||
org::xrpl::rpc::v1::GetLedgerRequest request;
|
||||
grpc::ClientContext context;
|
||||
|
||||
context.set_deadline(std::chrono::system_clock::now() + kDEADLINE); // Prevent indefinite blocking
|
||||
|
||||
request.mutable_ledger()->set_sequence(sequence);
|
||||
request.set_transactions(true);
|
||||
request.set_expand(true);
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@@ -38,6 +39,12 @@ class GrpcSource {
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
|
||||
static constexpr auto kKEEPALIVE_PING_INTERVAL_MS = 10000;
|
||||
static constexpr auto kKEEPALIVE_TIMEOUT_MS = 5000;
|
||||
static constexpr auto kKEEPALIVE_PERMIT_WITHOUT_CALLS = true; // Allow keepalive pings when no calls
|
||||
static constexpr auto kMAX_PINGS_WITHOUT_DATA = 0; // No limit
|
||||
static constexpr auto kDEADLINE = std::chrono::seconds(30);
|
||||
|
||||
public:
|
||||
GrpcSource(std::string const& ip, std::string const& grpcPort, std::shared_ptr<BackendInterface> backend);
|
||||
|
||||
|
||||
@@ -32,6 +32,12 @@ struct AmendmentBlockHandlerInterface {
|
||||
*/
|
||||
virtual void
|
||||
notifyAmendmentBlocked() = 0;
|
||||
|
||||
/**
|
||||
* @brief Stop the block handler from repeatedly executing
|
||||
*/
|
||||
virtual void
|
||||
stop() = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
@@ -45,6 +46,11 @@ AmendmentBlockHandler::AmendmentBlockHandler(
|
||||
{
|
||||
}
|
||||
|
||||
AmendmentBlockHandler::~AmendmentBlockHandler()
|
||||
{
|
||||
stop();
|
||||
}
|
||||
|
||||
void
|
||||
AmendmentBlockHandler::notifyAmendmentBlocked()
|
||||
{
|
||||
@@ -53,4 +59,13 @@ AmendmentBlockHandler::notifyAmendmentBlocked()
|
||||
operation_.emplace(ctx_.executeRepeatedly(interval_, action_));
|
||||
}
|
||||
|
||||
void
|
||||
AmendmentBlockHandler::stop()
|
||||
{
|
||||
if (operation_.has_value()) {
|
||||
operation_->abort();
|
||||
operation_.reset();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
|
||||
@@ -56,11 +56,10 @@ public:
|
||||
ActionType action = kDEFAULT_AMENDMENT_BLOCK_ACTION
|
||||
);
|
||||
|
||||
~AmendmentBlockHandler() override
|
||||
{
|
||||
if (operation_.has_value())
|
||||
operation_.value().abort();
|
||||
}
|
||||
~AmendmentBlockHandler() override;
|
||||
|
||||
void
|
||||
stop() override;
|
||||
|
||||
void
|
||||
notifyAmendmentBlocked() override;
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <grpc/grpc.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/security/credentials.h>
|
||||
#include <grpcpp/support/channel_arguments.h>
|
||||
@@ -36,6 +37,7 @@
|
||||
#include <org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
@@ -63,13 +65,18 @@ resolve(std::string const& ip, std::string const& port)
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
GrpcSource::GrpcSource(std::string const& ip, std::string const& grpcPort)
|
||||
GrpcSource::GrpcSource(std::string const& ip, std::string const& grpcPort, std::chrono::system_clock::duration deadline)
|
||||
: log_(fmt::format("ETL_Grpc[{}:{}]", ip, grpcPort))
|
||||
, initialLoadShouldStop_(std::make_unique<std::atomic_bool>(false))
|
||||
, deadline_{deadline}
|
||||
{
|
||||
try {
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKEEPALIVE_PING_INTERVAL_MS);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKEEPALIVE_TIMEOUT_MS);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, static_cast<int>(kKEEPALIVE_PERMIT_WITHOUT_CALLS));
|
||||
chArgs.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, kMAX_PINGS_WITHOUT_DATA);
|
||||
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(resolve(ip, grpcPort), grpc::InsecureChannelCredentials(), chArgs)
|
||||
@@ -88,10 +95,11 @@ GrpcSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNeighb
|
||||
if (!stub_)
|
||||
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
|
||||
|
||||
// Ledger header with txns and metadata
|
||||
org::xrpl::rpc::v1::GetLedgerRequest request;
|
||||
grpc::ClientContext context;
|
||||
|
||||
context.set_deadline(std::chrono::system_clock::now() + deadline_); // Prevent indefinite blocking
|
||||
|
||||
request.mutable_ledger()->set_sequence(sequence);
|
||||
request.set_transactions(true);
|
||||
request.set_expand(true);
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@@ -40,9 +41,20 @@ class GrpcSource {
|
||||
util::Logger log_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
std::unique_ptr<std::atomic_bool> initialLoadShouldStop_;
|
||||
std::chrono::system_clock::duration deadline_;
|
||||
|
||||
static constexpr auto kKEEPALIVE_PING_INTERVAL_MS = 10000;
|
||||
static constexpr auto kKEEPALIVE_TIMEOUT_MS = 5000;
|
||||
static constexpr auto kKEEPALIVE_PERMIT_WITHOUT_CALLS = true; // Allow keepalive pings when no calls
|
||||
static constexpr auto kMAX_PINGS_WITHOUT_DATA = 0; // No limit
|
||||
static constexpr auto kDEADLINE = std::chrono::seconds(30);
|
||||
|
||||
public:
|
||||
GrpcSource(std::string const& ip, std::string const& grpcPort);
|
||||
GrpcSource(
|
||||
std::string const& ip,
|
||||
std::string const& grpcPort,
|
||||
std::chrono::system_clock::duration deadline = kDEADLINE
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Fetch data for a specific ledger.
|
||||
|
||||
@@ -209,8 +209,9 @@ TransactionFeed::pub(
|
||||
rpc::insertDeliveredAmount(pubObj[JS(meta)].as_object(), tx, meta, txMeta.date);
|
||||
|
||||
auto& txnPubobj = pubObj[txKey].as_object();
|
||||
auto& metaPubobj = pubObj[JS(meta)].as_object();
|
||||
rpc::insertDeliverMaxAlias(txnPubobj, version);
|
||||
rpc::insertMPTIssuanceID(txnPubobj, meta);
|
||||
rpc::insertMPTIssuanceID(txnPubobj, tx, metaPubobj, meta);
|
||||
|
||||
Json::Value nftJson;
|
||||
ripple::RPC::insertNFTSyntheticInJson(nftJson, tx, *meta);
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/**
|
||||
* @mainpage Clio API server
|
||||
*
|
||||
* @section intro Introduction
|
||||
*
|
||||
* Clio is an XRP Ledger API server optimized for RPC calls over WebSocket or JSON-RPC.
|
||||
*
|
||||
* It stores validated historical ledger and transaction data in a more space efficient format, and uses up to 4 times
|
||||
* less space than <A HREF="https://github.com/XRPLF/rippled">rippled</A>.
|
||||
*
|
||||
* Clio can be configured to store data in <A HREF="https://cassandra.apache.org/_/index.html">Apache Cassandra</A> or
|
||||
* <A HREF="https://www.scylladb.com/">ScyllaDB</A>, enabling scalable read throughput. Multiple Clio nodes can share
|
||||
* access to the same dataset, which allows for a highly available cluster of Clio nodes without the need for redundant
|
||||
* data storage or computation.
|
||||
*
|
||||
* @section Develop
|
||||
*
|
||||
* As you prepare to develop code for Clio, please be sure you are aware of our current
|
||||
* <A HREF="https://github.com/XRPLF/clio/blob/develop/CONTRIBUTING.md">Contribution guidelines</A>.
|
||||
*
|
||||
* Read [rpc/README.md](../rpc/README.md) carefully to know more about writing your own handlers for
|
||||
* Clio.
|
||||
*/
|
||||
@@ -1,5 +1,7 @@
|
||||
# Clio Migration
|
||||
|
||||
@page "migration" Clio Migration
|
||||
|
||||
Clio maintains the off-chain data of XRPL and multiple indexes tables to powering complex queries. To simplify the creation of index tables, this migration framework handles the process of database change and facilitates the migration of historical data seamlessly.
|
||||
|
||||
## Command Line Usage
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
# RPC subsystem
|
||||
# RPC subsystem
|
||||
|
||||
@page "rpc" RPC subsystem
|
||||
|
||||
The RPC subsystem is where the common framework for handling incoming JSON requests is implemented.
|
||||
|
||||
|
||||
@@ -34,7 +34,6 @@
|
||||
#include "web/Context.hpp"
|
||||
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/format/format_fwd.hpp>
|
||||
#include <boost/format/free_funcs.hpp>
|
||||
@@ -258,7 +257,7 @@ toExpandedJson(
|
||||
auto metaJson = toJson(*meta);
|
||||
insertDeliveredAmount(metaJson, txn, meta, blobs.date);
|
||||
insertDeliverMaxAlias(txnJson, apiVersion);
|
||||
insertMPTIssuanceID(txnJson, meta);
|
||||
insertMPTIssuanceID(txnJson, txn, metaJson, meta);
|
||||
|
||||
if (nftEnabled == NFTokenjson::ENABLE) {
|
||||
Json::Value nftJson;
|
||||
@@ -343,36 +342,41 @@ getMPTIssuanceID(std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
/**
|
||||
* @brief Check if transaction has a new MPToken created
|
||||
*
|
||||
* @param txnJson The transaction Json
|
||||
* @param meta The metadata
|
||||
* @param txn The transaction object
|
||||
* @param meta The metadata object
|
||||
* @return true if the transaction can have a mpt_issuance_id
|
||||
*/
|
||||
static bool
|
||||
canHaveMPTIssuanceID(boost::json::object const& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
canHaveMPTIssuanceID(std::shared_ptr<ripple::STTx const> const& txn, std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
{
|
||||
if (txnJson.at(JS(TransactionType)).is_string() and
|
||||
not boost::iequals(txnJson.at(JS(TransactionType)).as_string(), JS(MPTokenIssuanceCreate)))
|
||||
if (txn->getTxnType() != ripple::ttMPTOKEN_ISSUANCE_CREATE)
|
||||
return false;
|
||||
|
||||
if (meta->getResultTER() != ripple::tesSUCCESS)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
return (meta->getResultTER() == ripple::tesSUCCESS);
|
||||
}
|
||||
|
||||
bool
|
||||
insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
insertMPTIssuanceID(
|
||||
boost::json::object& txnJson,
|
||||
std::shared_ptr<ripple::STTx const> const& txn,
|
||||
boost::json::object& metaJson,
|
||||
std::shared_ptr<ripple::TxMeta const> const& meta
|
||||
)
|
||||
{
|
||||
if (!canHaveMPTIssuanceID(txnJson, meta))
|
||||
return false;
|
||||
|
||||
if (txnJson.contains(JS(TransactionType)) && txnJson.at(JS(TransactionType)).is_string() and
|
||||
txnJson.at(JS(TransactionType)).as_string() == JS(MPTokenIssuanceCreate))
|
||||
if (!canHaveMPTIssuanceID(txn, meta))
|
||||
return false;
|
||||
|
||||
auto const id = getMPTIssuanceID(meta);
|
||||
ASSERT(id.has_value(), "MPTIssuanceID must have value");
|
||||
txnJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
|
||||
|
||||
// For mpttokenissuance create, add mpt_issuance_id to metajson
|
||||
// Otherwise, add it to txn json
|
||||
if (txnJson.contains(JS(TransactionType)) && txnJson.at(JS(TransactionType)).is_string() and
|
||||
txnJson.at(JS(TransactionType)).as_string() == JS(MPTokenIssuanceCreate)) {
|
||||
metaJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
|
||||
} else {
|
||||
txnJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -201,15 +201,23 @@ insertDeliveredAmount(
|
||||
|
||||
/**
|
||||
* @brief Add "mpt_issuance_id" into various MPTToken transaction json.
|
||||
* @note We exclude "mpt_issuance_id" for MPTokenIssuanceCreate only. The reason is because the mpt_issuance_id
|
||||
* is generated only after one submits MPTokenIssuanceCreate, so there’s no way to know what the id is. (rippled)
|
||||
* @note We add "mpt_issuance_id" into the meta part of MPTokenIssuanceCreate only. The reason is because the
|
||||
* mpt_issuance_id is generated only after one submits MPTokenIssuanceCreate, so there’s no way to know what the id is.
|
||||
* (rippled)
|
||||
*
|
||||
* @param txnJson The transaction Json object
|
||||
* @param txn The txn object
|
||||
* @param metaJson The metadata Json object
|
||||
* @param meta The metadata object
|
||||
* @return true if the "mpt_issuance_id" is added to the txnJson JSON object
|
||||
* @return true if the "mpt_issuance_id" is added to either txnJson or metaJson object
|
||||
*/
|
||||
bool
|
||||
insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta);
|
||||
insertMPTIssuanceID(
|
||||
boost::json::object& txnJson,
|
||||
std::shared_ptr<ripple::STTx const> const& txn,
|
||||
boost::json::object& metaJson,
|
||||
std::shared_ptr<ripple::TxMeta const> const& meta
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Convert STBase object to JSON
|
||||
|
||||
@@ -333,7 +333,13 @@ tag_invoke(boost::json::value_to_tag<LedgerEntryHandler::Input>, boost::json::va
|
||||
{JS(mptoken), ripple::ltMPTOKEN},
|
||||
{JS(permissioned_domain), ripple::ltPERMISSIONED_DOMAIN},
|
||||
{JS(vault), ripple::ltVAULT},
|
||||
{JS(delegate), ripple::ltDELEGATE}
|
||||
{JS(delegate), ripple::ltDELEGATE},
|
||||
{JS(amendments), ripple::ltAMENDMENTS},
|
||||
{JS(fee), ripple::ltFEE_SETTINGS},
|
||||
{JS(hashes), ripple::ltLEDGER_HASHES},
|
||||
{JS(nft_offer), ripple::ltNFTOKEN_OFFER},
|
||||
{JS(nunl), ripple::ltNEGATIVE_UNL},
|
||||
{JS(signer_list), ripple::ltSIGNER_LIST}
|
||||
};
|
||||
|
||||
auto const parseBridgeFromJson = [](boost::json::value const& bridgeJson) {
|
||||
|
||||
@@ -428,6 +428,12 @@ public:
|
||||
validation::CustomValidators::accountBase58Validator, Status(ClioError::RpcMalformedAddress)
|
||||
}}
|
||||
}}},
|
||||
{JS(amendments), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(fee), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(hashes), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(nft_offer), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(nunl), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(signer_list), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(ledger), check::Deprecated{}},
|
||||
{"include_deleted", validation::Type<bool>{}},
|
||||
};
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Async framework
|
||||
|
||||
@page "async" Async framework
|
||||
|
||||
## Introduction
|
||||
|
||||
Clio uses threads intensively. Multiple parts of Clio were/are implemented by running a `std::thread` with some sort of loop inside. Every time this pattern is reimplemented in a slightly different way. State is managed using asynchronous queues, atomic flags, mutexes and other low level primitives.
|
||||
|
||||
@@ -45,7 +45,7 @@ class ConfigValue;
|
||||
/**
|
||||
* @brief specific values that are accepted for logger levels in config.
|
||||
*/
|
||||
static constexpr std::array<char const*, 6> kLOG_LEVELS = {
|
||||
static constexpr std::array<std::string_view, 6> kLOG_LEVELS = {
|
||||
"trace",
|
||||
"debug",
|
||||
"info",
|
||||
@@ -57,7 +57,7 @@ static constexpr std::array<char const*, 6> kLOG_LEVELS = {
|
||||
/**
|
||||
* @brief specific values that are accepted for logger tag style in config.
|
||||
*/
|
||||
static constexpr std::array<char const*, 5> kLOG_TAGS = {
|
||||
static constexpr std::array<std::string_view, 5> kLOG_TAGS = {
|
||||
"int",
|
||||
"uint",
|
||||
"null",
|
||||
@@ -68,7 +68,7 @@ static constexpr std::array<char const*, 5> kLOG_TAGS = {
|
||||
/**
|
||||
* @brief specific values that are accepted for cache loading in config.
|
||||
*/
|
||||
static constexpr std::array<char const*, 3> kLOAD_CACHE_MODE = {
|
||||
static constexpr std::array<std::string_view, 3> kLOAD_CACHE_MODE = {
|
||||
"sync",
|
||||
"async",
|
||||
"none",
|
||||
@@ -77,17 +77,17 @@ static constexpr std::array<char const*, 3> kLOAD_CACHE_MODE = {
|
||||
/**
|
||||
* @brief specific values that are accepted for database type in config.
|
||||
*/
|
||||
static constexpr std::array<char const*, 1> kDATABASE_TYPE = {"cassandra"};
|
||||
static constexpr std::array<std::string_view, 1> kDATABASE_TYPE = {"cassandra"};
|
||||
|
||||
/**
|
||||
* @brief specific values that are accepted for server's processing_policy in config.
|
||||
*/
|
||||
static constexpr std::array<char const*, 2> kPROCESSING_POLICY = {"parallel", "sequent"};
|
||||
static constexpr std::array<std::string_view, 2> kPROCESSING_POLICY = {"parallel", "sequent"};
|
||||
|
||||
/**
|
||||
* @brief specific values that are accepted for database provider in config.
|
||||
*/
|
||||
static constexpr std::array<char const*, 2> kPROVIDER = {"cassandra", "aws_keyspace"};
|
||||
static constexpr std::array<std::string_view, 2> kPROVIDER = {"cassandra", "aws_keyspace"};
|
||||
|
||||
/**
|
||||
* @brief An interface to enforce constraints on certain values within ClioConfigDefinition.
|
||||
@@ -123,7 +123,7 @@ protected:
|
||||
*/
|
||||
template <std::size_t ArrSize>
|
||||
constexpr std::string
|
||||
makeErrorMsg(std::string_view key, Value const& value, std::array<char const*, ArrSize> arr) const
|
||||
makeErrorMsg(std::string_view key, Value const& value, std::array<std::string_view, ArrSize> arr) const
|
||||
{
|
||||
// Extract the value from the variant
|
||||
auto const valueStr = std::visit([](auto const& v) { return fmt::format("{}", v); }, value);
|
||||
@@ -271,7 +271,7 @@ public:
|
||||
* @param key The key of the ConfigValue that has this constraint
|
||||
* @param arr The value that has this constraint must be of the values in arr
|
||||
*/
|
||||
constexpr OneOf(std::string_view key, std::array<char const*, ArrSize> arr) : key_{key}, arr_{arr}
|
||||
constexpr OneOf(std::string_view key, std::array<std::string_view, ArrSize> arr) : key_{key}, arr_{arr}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -318,7 +318,7 @@ private:
|
||||
print(std::ostream& stream) const override
|
||||
{
|
||||
std::string valuesStream;
|
||||
std::ranges::for_each(arr_, [&valuesStream](std::string const& elem) {
|
||||
std::ranges::for_each(arr_, [&valuesStream](std::string_view elem) {
|
||||
valuesStream += fmt::format(" `{}`,", elem);
|
||||
});
|
||||
// replace the last "," with "."
|
||||
@@ -327,7 +327,7 @@ private:
|
||||
}
|
||||
|
||||
std::string_view key_;
|
||||
std::array<char const*, ArrSize> arr_;
|
||||
std::array<std::string_view, ArrSize> arr_;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -220,10 +220,10 @@ LogService::createFileSink(FileLoggingParams const& params, std::string const& f
|
||||
* @param defaultSeverity The default severity level to use if not overridden.
|
||||
* @return A map of channel names to their minimum severity levels, or an error message if parsing fails.
|
||||
*/
|
||||
static std::expected<std::unordered_map<std::string, Severity>, std::string>
|
||||
static std::expected<std::unordered_map<std::string_view, Severity>, std::string>
|
||||
getMinSeverity(config::ClioConfigDefinition const& config, Severity defaultSeverity)
|
||||
{
|
||||
std::unordered_map<std::string, Severity> minSeverity;
|
||||
std::unordered_map<std::string_view, Severity> minSeverity;
|
||||
for (auto const& channel : Logger::kCHANNELS)
|
||||
minSeverity[channel] = defaultSeverity;
|
||||
|
||||
@@ -284,13 +284,15 @@ LogServiceState::reset()
|
||||
}
|
||||
|
||||
std::shared_ptr<spdlog::logger>
|
||||
LogServiceState::registerLogger(std::string const& channel, std::optional<Severity> severity)
|
||||
LogServiceState::registerLogger(std::string_view channel, std::optional<Severity> severity)
|
||||
{
|
||||
if (not initialized_) {
|
||||
throw std::logic_error("LogService is not initialized");
|
||||
}
|
||||
|
||||
std::shared_ptr<spdlog::logger> existingLogger = spdlog::get(channel);
|
||||
std::string const channelStr{channel};
|
||||
|
||||
std::shared_ptr<spdlog::logger> existingLogger = spdlog::get(channelStr);
|
||||
if (existingLogger != nullptr) {
|
||||
if (severity.has_value())
|
||||
existingLogger->set_level(toSpdlogLevel(*severity));
|
||||
@@ -300,10 +302,10 @@ LogServiceState::registerLogger(std::string const& channel, std::optional<Severi
|
||||
std::shared_ptr<spdlog::logger> logger;
|
||||
if (isAsync_) {
|
||||
logger = std::make_shared<spdlog::async_logger>(
|
||||
channel, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block
|
||||
channelStr, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block
|
||||
);
|
||||
} else {
|
||||
logger = std::make_shared<spdlog::logger>(channel, sinks_.begin(), sinks_.end());
|
||||
logger = std::make_shared<spdlog::logger>(channelStr, sinks_.begin(), sinks_.end());
|
||||
}
|
||||
|
||||
logger->set_level(toSpdlogLevel(severity.value_or(defaultSeverity_)));
|
||||
@@ -427,10 +429,25 @@ LogServiceState::replaceSinks(std::vector<std::shared_ptr<spdlog::sinks::sink>>
|
||||
spdlog::apply_all([](std::shared_ptr<spdlog::logger> logger) { logger->sinks() = sinks_; });
|
||||
}
|
||||
|
||||
Logger::Logger(std::string channel) : logger_(LogServiceState::registerLogger(channel))
|
||||
Logger::Logger(std::string_view const channel) : logger_(LogServiceState::registerLogger(channel))
|
||||
{
|
||||
}
|
||||
|
||||
Logger::~Logger()
|
||||
{
|
||||
// One reference is held by logger_ and the other by spdlog registry
|
||||
static constexpr size_t kLAST_LOGGER_REF_COUNT = 2;
|
||||
|
||||
if (logger_ == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
bool const isDynamic = !std::ranges::contains(kCHANNELS, logger_->name());
|
||||
if (isDynamic && logger_.use_count() == kLAST_LOGGER_REF_COUNT) {
|
||||
spdlog::drop(logger_->name());
|
||||
}
|
||||
}
|
||||
|
||||
Logger::Pump::Pump(std::shared_ptr<spdlog::logger> logger, Severity sev, SourceLocationType const& loc)
|
||||
: logger_(std::move(logger))
|
||||
, severity_(sev)
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
// We forward declare spdlog::logger and spdlog::sinks::sink
|
||||
@@ -91,7 +92,7 @@ enum class Severity {
|
||||
* otherwise. See @ref LogService::init() for setup of the logging core and
|
||||
* severity levels for each channel.
|
||||
*/
|
||||
class Logger final {
|
||||
class Logger {
|
||||
std::shared_ptr<spdlog::logger> logger_;
|
||||
|
||||
friend class LogService; // to expose the Pump interface
|
||||
@@ -145,7 +146,7 @@ class Logger final {
|
||||
};
|
||||
|
||||
public:
|
||||
static constexpr std::array<char const*, 8> kCHANNELS = {
|
||||
static constexpr std::array<std::string_view, 8> kCHANNELS = {
|
||||
"General",
|
||||
"WebServer",
|
||||
"Backend",
|
||||
@@ -165,10 +166,10 @@ public:
|
||||
*
|
||||
* @param channel The channel this logger will report into.
|
||||
*/
|
||||
Logger(std::string channel);
|
||||
Logger(std::string_view const channel);
|
||||
|
||||
Logger(Logger const&) = default;
|
||||
~Logger() = default;
|
||||
~Logger();
|
||||
|
||||
Logger(Logger&&) = default;
|
||||
Logger&
|
||||
@@ -291,7 +292,7 @@ protected:
|
||||
* @return Shared pointer to the registered spdlog logger
|
||||
*/
|
||||
static std::shared_ptr<spdlog::logger>
|
||||
registerLogger(std::string const& channel, std::optional<Severity> severity = std::nullopt);
|
||||
registerLogger(std::string_view channel, std::optional<Severity> severity = std::nullopt);
|
||||
|
||||
protected:
|
||||
static bool isAsync_; // NOLINT(readability-identifier-naming)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Web server subsystem
|
||||
|
||||
@page "web" Web server subsystem
|
||||
|
||||
This folder contains all of the classes for running the web server.
|
||||
|
||||
The web server subsystem:
|
||||
|
||||
@@ -29,13 +29,14 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string_view>
|
||||
|
||||
void
|
||||
LoggerFixture::init()
|
||||
{
|
||||
util::LogServiceState::init(false, util::Severity::FTL, {});
|
||||
|
||||
std::ranges::for_each(util::Logger::kCHANNELS, [](char const* channel) {
|
||||
std::ranges::for_each(util::Logger::kCHANNELS, [](std::string_view const channel) {
|
||||
util::LogService::registerLogger(channel);
|
||||
});
|
||||
|
||||
|
||||
@@ -25,4 +25,5 @@
|
||||
|
||||
struct MockAmendmentBlockHandler : etlng::AmendmentBlockHandlerInterface {
|
||||
MOCK_METHOD(void, notifyAmendmentBlocked, (), (override));
|
||||
MOCK_METHOD(void, stop, (), (override));
|
||||
};
|
||||
|
||||
@@ -32,7 +32,9 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger_entry.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
@@ -90,8 +92,7 @@ struct WithMockXrpLedgerAPIService : virtual ::testing::Test {
|
||||
|
||||
~WithMockXrpLedgerAPIService() override
|
||||
{
|
||||
server_->Shutdown();
|
||||
serverThread_.join();
|
||||
shutdown();
|
||||
}
|
||||
|
||||
int
|
||||
@@ -99,6 +100,19 @@ struct WithMockXrpLedgerAPIService : virtual ::testing::Test {
|
||||
{
|
||||
return port_;
|
||||
}
|
||||
|
||||
void
|
||||
shutdown(std::optional<std::chrono::system_clock::duration> deadline = std::nullopt)
|
||||
{
|
||||
if (deadline.has_value()) {
|
||||
server_->Shutdown(std::chrono::system_clock::now() + *deadline);
|
||||
} else {
|
||||
server_->Shutdown();
|
||||
}
|
||||
if (serverThread_.joinable())
|
||||
serverThread_.join();
|
||||
}
|
||||
|
||||
MockXrpLedgerAPIService mockXrpLedgerAPIService;
|
||||
|
||||
private:
|
||||
|
||||
@@ -85,15 +85,17 @@ using namespace data::cassandra;
|
||||
|
||||
class BackendCassandraTestBase : public SyncAsioContextTest, public WithPrometheus {
|
||||
protected:
|
||||
static constexpr auto kCASSANDRA = "cassandra";
|
||||
|
||||
ClioConfigDefinition cfg_{
|
||||
{"database.type", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
|
||||
{"database.type", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
|
||||
{"database.cassandra.contact_points",
|
||||
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)},
|
||||
{"database.cassandra.secure_connect_bundle", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.port", ConfigValue{ConfigType::Integer}.optional()},
|
||||
{"database.cassandra.keyspace",
|
||||
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)},
|
||||
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
|
||||
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
|
||||
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
|
||||
{"database.cassandra.table_prefix", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.max_write_requests_outstanding", ConfigValue{ConfigType::Integer}.defaultValue(10'000)},
|
||||
|
||||
@@ -95,14 +95,15 @@ class MigrationCassandraSimpleTest : public WithPrometheus {
|
||||
}
|
||||
|
||||
protected:
|
||||
ClioConfigDefinition cfg_{
|
||||
static constexpr auto kCASSANDRA = "cassandra";
|
||||
|
||||
{{"database.type", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
|
||||
ClioConfigDefinition cfg_{
|
||||
{{"database.type", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
|
||||
{"database.cassandra.contact_points",
|
||||
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)},
|
||||
{"database.cassandra.keyspace",
|
||||
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)},
|
||||
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
|
||||
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
|
||||
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
|
||||
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
|
||||
{"database.cassandra.connect_timeout", ConfigValue{ConfigType::Integer}.defaultValue(2)},
|
||||
|
||||
@@ -36,7 +36,10 @@ struct AmendmentBlockHandlerTest : util::prometheus::WithPrometheus, SyncAsioCon
|
||||
etl::SystemState state;
|
||||
};
|
||||
|
||||
TEST_F(AmendmentBlockHandlerTest, CallTonotifyAmendmentBlockedSetsStateAndRepeatedlyCallsAction)
|
||||
// Note: This test can be flaky due to the way it was written (depends on time)
|
||||
// Since the old ETL is going to be replaced by ETLng all tests including this one will be deleted anyway so the fix for
|
||||
// flakiness is to increase the context runtime to 50ms until then (to not waste time).
|
||||
TEST_F(AmendmentBlockHandlerTest, CallToNotifyAmendmentBlockedSetsStateAndRepeatedlyCallsAction)
|
||||
{
|
||||
AmendmentBlockHandler handler{ctx_, state, std::chrono::nanoseconds{1}, actionMock.AsStdFunction()};
|
||||
|
||||
@@ -45,12 +48,7 @@ TEST_F(AmendmentBlockHandlerTest, CallTonotifyAmendmentBlockedSetsStateAndRepeat
|
||||
handler.notifyAmendmentBlocked();
|
||||
EXPECT_TRUE(state.isAmendmentBlocked);
|
||||
|
||||
// Code runs significantly slower when assertions are enabled
|
||||
#ifdef _GLIBCXX_ASSERTIONS
|
||||
runContextFor(std::chrono::milliseconds{10});
|
||||
#else
|
||||
runContextFor(std::chrono::milliseconds{1});
|
||||
#endif
|
||||
runContextFor(std::chrono::milliseconds{50});
|
||||
}
|
||||
|
||||
struct DefaultAmendmentBlockActionTest : LoggerFixture {};
|
||||
|
||||
@@ -40,7 +40,7 @@ protected:
|
||||
util::async::CoroExecutionContext ctx_;
|
||||
};
|
||||
|
||||
TEST_F(AmendmentBlockHandlerNgTests, CallTonotifyAmendmentBlockedSetsStateAndRepeatedlyCallsAction)
|
||||
TEST_F(AmendmentBlockHandlerNgTests, CallToNotifyAmendmentBlockedSetsStateAndRepeatedlyCallsAction)
|
||||
{
|
||||
static constexpr auto kMAX_ITERATIONS = 10uz;
|
||||
etlng::impl::AmendmentBlockHandler handler{ctx_, state_, std::chrono::nanoseconds{1}, actionMock_.AsStdFunction()};
|
||||
@@ -55,6 +55,7 @@ TEST_F(AmendmentBlockHandlerNgTests, CallTonotifyAmendmentBlockedSetsStateAndRep
|
||||
|
||||
handler.notifyAmendmentBlocked();
|
||||
stop.acquire(); // wait for the counter to reach over kMAX_ITERATIONS
|
||||
handler.stop();
|
||||
|
||||
EXPECT_TRUE(state_.isAmendmentBlocked);
|
||||
}
|
||||
|
||||
@@ -41,15 +41,18 @@
|
||||
#include <xrpl/basics/strHex.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <future>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <queue>
|
||||
#include <semaphore>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@@ -357,3 +360,36 @@ TEST_F(GrpcSourceStopTests, LoadInitialLedgerStopsWhenRequested)
|
||||
ASSERT_FALSE(res.has_value());
|
||||
EXPECT_EQ(res.error(), etlng::InitialLedgerLoadError::Cancelled);
|
||||
}
|
||||
|
||||
TEST_F(GrpcSourceNgTests, DeadlineIsHandledCorrectly)
|
||||
{
|
||||
static constexpr auto kDEADLINE = std::chrono::milliseconds{5};
|
||||
|
||||
uint32_t const sequence = 123u;
|
||||
bool const getObjects = true;
|
||||
bool const getObjectNeighbors = false;
|
||||
|
||||
std::binary_semaphore sem(0);
|
||||
|
||||
auto grpcSource =
|
||||
std::make_unique<etlng::impl::GrpcSource>("localhost", std::to_string(getXRPLMockPort()), kDEADLINE);
|
||||
|
||||
// Note: this may not be called at all if gRPC cancels before it gets a chance to call the stub
|
||||
EXPECT_CALL(mockXrpLedgerAPIService, GetLedger)
|
||||
.Times(testing::AtMost(1))
|
||||
.WillRepeatedly([&](grpc::ServerContext*,
|
||||
org::xrpl::rpc::v1::GetLedgerRequest const*,
|
||||
org::xrpl::rpc::v1::GetLedgerResponse*) {
|
||||
// wait for main thread to discard us and fail the test if unsuccessful within expected timeframe
|
||||
[&] { ASSERT_TRUE(sem.try_acquire_for(std::chrono::milliseconds{50})); }();
|
||||
return grpc::Status{};
|
||||
});
|
||||
|
||||
auto const [status, response] = grpcSource->fetchLedger(sequence, getObjects, getObjectNeighbors);
|
||||
ASSERT_FALSE(status.ok()); // timed out after kDEADLINE
|
||||
|
||||
sem.release(); // we don't need to hold GetLedger thread any longer
|
||||
grpcSource.reset();
|
||||
|
||||
shutdown(std::chrono::milliseconds{10});
|
||||
}
|
||||
|
||||
@@ -1282,7 +1282,8 @@ TEST_F(FeedTransactionTest, PublishesMPTokenIssuanceCreateTx)
|
||||
}
|
||||
],
|
||||
"TransactionIndex": 0,
|
||||
"TransactionResult": "tesSUCCESS"
|
||||
"TransactionResult": "tesSUCCESS",
|
||||
"mpt_issuance_id": "000000014B4E9C06F24296074F7BC48F92A97916C6DC5EA9"
|
||||
},
|
||||
"ctid": "C000002100000000",
|
||||
"type": "transaction",
|
||||
|
||||
@@ -1625,7 +1625,8 @@ TEST_F(RPCAccountTxHandlerTest, MPTTxs_API_v2)
|
||||
}}
|
||||
],
|
||||
"TransactionIndex": 0,
|
||||
"TransactionResult": "tesSUCCESS"
|
||||
"TransactionResult": "tesSUCCESS",
|
||||
"mpt_issuance_id": "000000014B4E9C06F24296074F7BC48F92A97916C6DC5EA9"
|
||||
}},
|
||||
"hash": "A52221F4003C281D3C83F501F418B55A1F9DC1C6A129EF13E1A8F0E5C008DAE3",
|
||||
"ledger_index": 11,
|
||||
|
||||
@@ -2311,11 +2311,23 @@ struct IndexTest : public HandlerBaseTest, public WithParamInterface<std::string
|
||||
};
|
||||
};
|
||||
|
||||
// content of index, payment_channel, nft_page and check fields is ledger index.
|
||||
// content of index, amendments, check, fee, hashes, nft_offer, nunl, nft_page, payment_channel, signer_list fields is
|
||||
// ledger index.
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
RPCLedgerEntryGroup3,
|
||||
IndexTest,
|
||||
Values("index", "nft_page", "payment_channel", "check"),
|
||||
Values(
|
||||
"index",
|
||||
"amendments",
|
||||
"check",
|
||||
"fee",
|
||||
"hashes",
|
||||
"nft_offer",
|
||||
"nunl",
|
||||
"nft_page",
|
||||
"payment_channel",
|
||||
"signer_list"
|
||||
),
|
||||
IndexTest::NameGenerator{}
|
||||
);
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
using namespace util::config;
|
||||
|
||||
@@ -164,7 +165,7 @@ TEST_F(ConstraintTest, SetValuesOnPortConstraint)
|
||||
|
||||
TEST_F(ConstraintTest, OneOfConstraintOneValue)
|
||||
{
|
||||
std::array<char const*, 1> const arr = {"tracer"};
|
||||
std::array<std::string_view, 1> const arr = {"tracer"};
|
||||
auto const databaseConstraint{OneOf{"database.type", arr}};
|
||||
EXPECT_FALSE(databaseConstraint.checkConstraint("tracer").has_value());
|
||||
|
||||
@@ -180,7 +181,7 @@ TEST_F(ConstraintTest, OneOfConstraintOneValue)
|
||||
|
||||
TEST_F(ConstraintTest, OneOfConstraint)
|
||||
{
|
||||
std::array<char const*, 3> const arr = {"123", "trace", "haha"};
|
||||
std::array<std::string_view, 3> const arr = {"123", "trace", "haha"};
|
||||
auto const oneOfCons{OneOf{"log.level", arr}};
|
||||
|
||||
EXPECT_FALSE(oneOfCons.checkConstraint("trace").has_value());
|
||||
|
||||
@@ -101,7 +101,7 @@ TEST_F(LogServiceInitTests, DefaultLogLevel)
|
||||
EXPECT_TRUE(LogService::init(config_));
|
||||
|
||||
std::string const logString = "some log";
|
||||
for (auto const& channel : Logger::kCHANNELS) {
|
||||
for (std::string_view const channel : Logger::kCHANNELS) {
|
||||
Logger const log{channel};
|
||||
log.trace() << logString;
|
||||
auto loggerStr = getLoggerString();
|
||||
|
||||
@@ -21,11 +21,24 @@
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <spdlog/logger.h>
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
using namespace util;
|
||||
|
||||
namespace {
|
||||
size_t
|
||||
loggersNum()
|
||||
{
|
||||
size_t counter = 0;
|
||||
spdlog::apply_all([&counter](std::shared_ptr<spdlog::logger>) { ++counter; });
|
||||
return counter;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Used as a fixture for tests with enabled logging
|
||||
class LoggerTest : public LoggerFixture {};
|
||||
|
||||
@@ -71,3 +84,24 @@ TEST_F(LoggerTest, LOGMacro)
|
||||
EXPECT_TRUE(computeCalled);
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST_F(LoggerTest, ManyDynamicLoggers)
|
||||
{
|
||||
static constexpr size_t kNUM_LOGGERS = 10'000;
|
||||
|
||||
auto initialLoggers = loggersNum();
|
||||
|
||||
for (size_t i = 0; i < kNUM_LOGGERS; ++i) {
|
||||
std::string const loggerName = "DynamicLogger" + std::to_string(i);
|
||||
|
||||
Logger const log{loggerName};
|
||||
log.info() << "Logger number " << i;
|
||||
ASSERT_EQ(getLoggerString(), "inf:" + loggerName + " - Logger number " + std::to_string(i) + "\n");
|
||||
|
||||
Logger const copy = log;
|
||||
copy.info() << "Copy of logger number " << i;
|
||||
ASSERT_EQ(getLoggerString(), "inf:" + loggerName + " - Copy of logger number " + std::to_string(i) + "\n");
|
||||
}
|
||||
|
||||
ASSERT_EQ(loggersNum(), initialLoggers);
|
||||
}
|
||||
|
||||
@@ -44,8 +44,10 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <ranges>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
@@ -114,24 +116,33 @@ TEST_F(WebWsConnectionTests, DisconnectClientOnInactivity)
|
||||
auto work = boost::asio::make_work_guard(clientCtx);
|
||||
std::thread clientThread{[&clientCtx]() { clientCtx.run(); }};
|
||||
|
||||
util::spawn(clientCtx, [&work, this](boost::asio::yield_context yield) {
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
bool finished{false};
|
||||
|
||||
util::spawn(clientCtx, [&](boost::asio::yield_context yield) {
|
||||
auto expectedSuccess =
|
||||
wsClient_.connect("localhost", httpServer_.port(), yield, std::chrono::milliseconds{100});
|
||||
[&]() { ASSERT_TRUE(expectedSuccess.has_value()) << expectedSuccess.error().message(); }();
|
||||
boost::asio::steady_timer timer{yield.get_executor(), std::chrono::milliseconds{5}};
|
||||
timer.async_wait(yield);
|
||||
std::unique_lock lock{mutex};
|
||||
// Wait for 2 seconds to not block the test infinitely in case of failure
|
||||
auto const gotNotified = cv.wait_for(lock, std::chrono::seconds{2}, [&finished]() { return finished; });
|
||||
[&]() { EXPECT_TRUE(gotNotified); }();
|
||||
work.reset();
|
||||
});
|
||||
|
||||
runSpawn([this](boost::asio::yield_context yield) {
|
||||
runSpawn([&, this](boost::asio::yield_context yield) {
|
||||
auto wsConnection = acceptConnection(yield);
|
||||
wsConnection->setTimeout(std::chrono::milliseconds{1});
|
||||
// Client will not respond to pings because there is no reading operation scheduled for it.
|
||||
|
||||
auto const start = std::chrono::steady_clock::now();
|
||||
// Client will not respond to pings because there is no reading operation scheduled for it.
|
||||
auto const receivedMessage = wsConnection->receive(yield);
|
||||
auto const end = std::chrono::steady_clock::now();
|
||||
EXPECT_LT(end - start, std::chrono::milliseconds{4}); // Should be 2 ms, double it in case of slow CI.
|
||||
|
||||
{
|
||||
std::unique_lock lock{mutex};
|
||||
finished = true;
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
EXPECT_FALSE(receivedMessage.has_value());
|
||||
EXPECT_EQ(receivedMessage.error().value(), boost::asio::error::no_permission);
|
||||
|
||||
Reference in New Issue
Block a user