mirror of
https://github.com/XRPLF/clio.git
synced 2025-12-05 16:58:00 +00:00
Compare commits
1 Commits
104ef6a9dc
...
update/pre
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
429753b9bb |
31
.github/actions/build-clio/action.yml
vendored
31
.github/actions/build-clio/action.yml
vendored
@@ -1,31 +0,0 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
|
||||
inputs:
|
||||
targets:
|
||||
description: Space-separated build target names
|
||||
default: all
|
||||
nproc_subtract:
|
||||
description: The number of processors to subtract when calculating parallelism.
|
||||
required: true
|
||||
default: "0"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ inputs.nproc_subtract }}
|
||||
|
||||
- name: Build targets
|
||||
shell: bash
|
||||
env:
|
||||
CMAKE_TARGETS: ${{ inputs.targets }}
|
||||
run: |
|
||||
cd build
|
||||
cmake \
|
||||
--build . \
|
||||
--parallel "${{ steps.nproc.outputs.nproc }}" \
|
||||
--target ${CMAKE_TARGETS}
|
||||
29
.github/actions/build_clio/action.yml
vendored
Normal file
29
.github/actions/build_clio/action.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
|
||||
inputs:
|
||||
targets:
|
||||
description: Space-separated build target names
|
||||
default: all
|
||||
subtract_threads:
|
||||
description: An option for the action get_number_of_threads. See get_number_of_threads
|
||||
required: true
|
||||
default: "0"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
with:
|
||||
subtract_threads: ${{ inputs.subtract_threads }}
|
||||
|
||||
- name: Build targets
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
cmake \
|
||||
--build . \
|
||||
--parallel "${{ steps.number_of_threads.outputs.threads_number }}" \
|
||||
--target ${{ inputs.targets }}
|
||||
@@ -34,14 +34,14 @@ runs:
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USER }}
|
||||
password: ${{ env.DOCKERHUB_PW }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ inputs.push_image == 'true' }}
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -24,7 +24,7 @@ runs:
|
||||
-j8 --exclude-throw-branches
|
||||
|
||||
- name: Archive coverage report
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build/coverage_report.xml
|
||||
@@ -28,17 +28,12 @@ runs:
|
||||
- name: Create an issue
|
||||
id: create_issue
|
||||
shell: bash
|
||||
env:
|
||||
ISSUE_BODY: ${{ inputs.body }}
|
||||
ISSUE_ASSIGNEES: ${{ inputs.assignees }}
|
||||
ISSUE_LABELS: ${{ inputs.labels }}
|
||||
ISSUE_TITLE: ${{ inputs.title }}
|
||||
run: |
|
||||
echo -e "${ISSUE_BODY}" > issue.md
|
||||
echo -e '${{ inputs.body }}' > issue.md
|
||||
gh issue create \
|
||||
--assignee "${ISSUE_ASSIGNEES}" \
|
||||
--label "${ISSUE_LABELS}" \
|
||||
--title "${ISSUE_TITLE}" \
|
||||
--assignee '${{ inputs.assignees }}' \
|
||||
--label '${{ inputs.labels }}' \
|
||||
--title '${{ inputs.title }}' \
|
||||
--body-file ./issue.md \
|
||||
> create_issue.log
|
||||
created_issue="$(sed 's|.*/||' create_issue.log)"
|
||||
36
.github/actions/get_number_of_threads/action.yml
vendored
Normal file
36
.github/actions/get_number_of_threads/action.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Get number of threads
|
||||
description: Determines number of threads to use on macOS and Linux
|
||||
|
||||
inputs:
|
||||
subtract_threads:
|
||||
description: How many threads to subtract from the calculated number
|
||||
required: true
|
||||
default: "0"
|
||||
outputs:
|
||||
threads_number:
|
||||
description: Number of threads to use
|
||||
value: ${{ steps.number_of_threads_export.outputs.num }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Shift and export number of threads
|
||||
id: number_of_threads_export
|
||||
shell: bash
|
||||
run: |
|
||||
num_of_threads="${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}"
|
||||
shift_by="${{ inputs.subtract_threads }}"
|
||||
shifted="$((num_of_threads - shift_by))"
|
||||
echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT
|
||||
@@ -27,10 +27,10 @@ runs:
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git-common-ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache/restore@v4
|
||||
id: ccache_cache
|
||||
if: ${{ env.CCACHE_DISABLE != '1' }}
|
||||
with:
|
||||
@@ -28,11 +28,11 @@ runs:
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git-common-ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-${{ inputs.conan_profile }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
27
.github/dependabot.yml
vendored
27
.github/dependabot.yml
vendored
@@ -14,7 +14,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/build-clio/
|
||||
directory: .github/actions/build_clio/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -27,7 +27,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/build-docker-image/
|
||||
directory: .github/actions/build_docker_image/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -53,7 +53,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/code-coverage/
|
||||
directory: .github/actions/code_coverage/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -79,7 +79,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/create-issue/
|
||||
directory: .github/actions/create_issue/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -92,7 +92,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/git-common-ancestor/
|
||||
directory: .github/actions/get_number_of_threads/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -105,7 +105,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/restore-cache/
|
||||
directory: .github/actions/git_common_ancestor/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
@@ -118,7 +118,20 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/save-cache/
|
||||
directory: .github/actions/restore_cache/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
time: "04:00"
|
||||
timezone: Etc/GMT
|
||||
reviewers:
|
||||
- XRPLF/clio-dev-team
|
||||
commit-message:
|
||||
prefix: "ci: [DEPENDABOT] "
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/save_cache/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
|
||||
4
.github/scripts/conan/generate_matrix.py
vendored
4
.github/scripts/conan/generate_matrix.py
vendored
@@ -3,9 +3,7 @@ import itertools
|
||||
import json
|
||||
|
||||
LINUX_OS = ["heavy", "heavy-arm64"]
|
||||
LINUX_CONTAINERS = [
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
]
|
||||
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }']
|
||||
LINUX_COMPILERS = ["gcc", "clang"]
|
||||
|
||||
MACOS_OS = ["macos15"]
|
||||
|
||||
@@ -31,16 +31,15 @@ TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {su
|
||||
OUTPUT_DIR="./.sanitizer-report"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
export TSAN_OPTIONS="die_after_fork=0"
|
||||
export MallocNanoZone='0' # for MacOSX
|
||||
|
||||
for TEST in $TESTS; do
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}.log"
|
||||
$TEST_BINARY --gtest_filter="$TEST" > "$OUTPUT_FILE" 2>&1
|
||||
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}"
|
||||
export TSAN_OPTIONS="log_path=\"$OUTPUT_FILE\" die_after_fork=0"
|
||||
export ASAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export UBSAN_OPTIONS="log_path=\"$OUTPUT_FILE\""
|
||||
export MallocNanoZone='0' # for MacOSX
|
||||
$TEST_BINARY --gtest_filter="$TEST" > /dev/null 2>&1
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "'$TEST' failed a sanitizer check."
|
||||
else
|
||||
rm "$OUTPUT_FILE"
|
||||
fi
|
||||
done
|
||||
30
.github/workflows/build.yml
vendored
30
.github/workflows/build.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/build.yml
|
||||
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
- .github/workflows/reusable-upload-coverage-report.yml
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
- .github/workflows/upload_coverage_report.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/build-docker-image/**"
|
||||
- "!.github/actions/create-issue/**"
|
||||
- "!.github/actions/build_docker_image/**"
|
||||
- "!.github/actions/create_issue/**"
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
build_type: [Release, Debug]
|
||||
container:
|
||||
[
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }',
|
||||
'{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }',
|
||||
]
|
||||
static: [true]
|
||||
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
container: ""
|
||||
static: false
|
||||
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -72,10 +72,10 @@ jobs:
|
||||
code_coverage:
|
||||
name: Run Code Coverage
|
||||
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
download_ccache: true
|
||||
@@ -91,10 +91,10 @@ jobs:
|
||||
package:
|
||||
name: Build packages
|
||||
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
download_ccache: true
|
||||
@@ -111,12 +111,12 @@ jobs:
|
||||
needs: build-and-test
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: clio_server_Linux_Release_gcc
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container }}
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
|
||||
test:
|
||||
needs: build
|
||||
uses: ./.github/workflows/reusable-test.yml
|
||||
uses: ./.github/workflows/test_impl.yml
|
||||
with:
|
||||
runs_on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container }}
|
||||
@@ -44,11 +44,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download Clio binary from artifact
|
||||
if: ${{ inputs.artifact_name != null }}
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: ${{ inputs.artifact_name }}
|
||||
path: ./docker/clio/artifact/
|
||||
@@ -56,12 +56,9 @@ jobs:
|
||||
- name: Download Clio binary from url
|
||||
if: ${{ inputs.clio_server_binary_url != null }}
|
||||
shell: bash
|
||||
env:
|
||||
BINARY_URL: ${{ inputs.clio_server_binary_url }}
|
||||
BINARY_SHA256: ${{ inputs.binary_sha256 }}
|
||||
run: |
|
||||
wget "${BINARY_URL}" -P ./docker/clio/artifact/
|
||||
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${BINARY_SHA256}" ]; then
|
||||
wget "${{inputs.clio_server_binary_url}}" -P ./docker/clio/artifact/
|
||||
if [ "$(sha256sum ./docker/clio/clio_server | awk '{print $1}')" != "${{inputs.binary_sha256}}" ]; then
|
||||
echo "Binary sha256 sum doesn't match"
|
||||
exit 1
|
||||
fi
|
||||
@@ -92,7 +89,7 @@ jobs:
|
||||
echo "GHCR_REPO=$(echo ghcr.io/${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_OUTPUT}
|
||||
|
||||
- name: Build Docker image
|
||||
uses: ./.github/actions/build-docker-image
|
||||
uses: ./.github/actions/build_docker_image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# We need to fetch tags to have correct version in the release
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
|
||||
- name: Restore cache
|
||||
if: ${{ inputs.download_ccache }}
|
||||
uses: ./.github/actions/restore-cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
@@ -131,7 +131,7 @@ jobs:
|
||||
package: ${{ inputs.package }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build-clio
|
||||
uses: ./.github/actions/build_clio
|
||||
with:
|
||||
targets: ${{ inputs.targets }}
|
||||
|
||||
@@ -145,7 +145,7 @@ jobs:
|
||||
|
||||
- name: Upload build time analyze report
|
||||
if: ${{ inputs.analyze_build_time }}
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: build_time_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build_time_report.txt
|
||||
@@ -170,35 +170,35 @@ jobs:
|
||||
|
||||
- name: Upload clio_server
|
||||
if: ${{ inputs.upload_clio_server && !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Upload clio_integration_tests
|
||||
if: ${{ !inputs.code_coverage && !inputs.analyze_build_time && !inputs.package }}
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/clio_integration_tests
|
||||
|
||||
- name: Upload Clio Linux package
|
||||
if: ${{ inputs.package }}
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_deb_package_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: build/*.deb
|
||||
|
||||
- name: Save cache
|
||||
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save-cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
@@ -216,19 +216,17 @@ jobs:
|
||||
# It's all available in the build job, but not in the test job
|
||||
- name: Run code coverage
|
||||
if: ${{ inputs.code_coverage }}
|
||||
uses: ./.github/actions/code-coverage
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
- name: Verify expected version
|
||||
if: ${{ inputs.expected_version != '' }}
|
||||
shell: bash
|
||||
env:
|
||||
INPUT_EXPECTED_VERSION: ${{ inputs.expected_version }}
|
||||
run: |
|
||||
set -e
|
||||
EXPECTED_VERSION="clio-${INPUT_EXPECTED_VERSION}"
|
||||
EXPECTED_VERSION="clio-${{ inputs.expected_version }}"
|
||||
actual_version=$(./build/clio_server --version)
|
||||
if [[ "$actual_version" != "$EXPECTED_VERSION" ]]; then
|
||||
echo "Expected version '${EXPECTED_VERSION}', but got '${actual_version}'"
|
||||
echo "Expected version '$EXPECTED_VERSION', but got '$actual_version'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -240,6 +238,6 @@ jobs:
|
||||
if: ${{ inputs.code_coverage }}
|
||||
name: Codecov
|
||||
needs: build
|
||||
uses: ./.github/workflows/reusable-upload-coverage-report.yml
|
||||
uses: ./.github/workflows/upload_coverage_report.yml
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -17,10 +17,10 @@ jobs:
|
||||
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -51,13 +51,13 @@ jobs:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build-clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: clio_tests_check_libxrpl
|
||||
path: build/clio_tests
|
||||
@@ -67,10 +67,10 @@ jobs:
|
||||
needs: build
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: clio_tests_check_libxrpl
|
||||
|
||||
@@ -90,10 +90,10 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Create an issue
|
||||
uses: ./.github/actions/create-issue
|
||||
uses: ./.github/actions/create_issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
@@ -10,17 +10,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # 1.4.2
|
||||
- uses: ytanikin/pr-conventional-commits@b72758283dcbee706975950e96bc4bf323a8d8c0 # v1.4.2
|
||||
with:
|
||||
task_types: '["build","feat","fix","docs","test","ci","style","refactor","perf","chore"]'
|
||||
add_label: false
|
||||
custom_labels: '{"build":"build", "feat":"enhancement", "fix":"bug", "docs":"documentation", "test":"testability", "ci":"ci", "style":"refactoring", "refactor":"refactoring", "perf":"performance", "chore":"tooling"}'
|
||||
|
||||
- name: Check if message starts with upper-case letter
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
run: |
|
||||
if [[ ! "${PR_TITLE}" =~ ^[a-z]+:\ [\[A-Z] ]]; then
|
||||
echo "Error: PR title must start with an upper-case letter."
|
||||
exit 1
|
||||
fi
|
||||
16
.github/workflows/clang-tidy.yml
vendored
16
.github/workflows/clang-tidy.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
if: github.event_name != 'push' || contains(github.event.head_commit.message, 'clang-tidy auto fixes')
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore-cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
@@ -61,16 +61,16 @@ jobs:
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/.github/actions/get-nproc@046b1620f6bfd6cd0985dc82c3df02786801fe0a
|
||||
id: nproc
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
|
||||
- name: Run clang-tidy
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.nproc.outputs.nproc }}" -fix -quiet 1>output.txt
|
||||
run-clang-tidy-${{ env.LLVM_TOOLS_VERSION }} -p build -j "${{ steps.number_of_threads.outputs.threads_number }}" -fix -quiet 1>output.txt
|
||||
|
||||
- name: Fix local includes and clang-format style
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
- name: Create an issue
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' && github.event_name != 'pull_request' }}
|
||||
id: create_issue
|
||||
uses: ./.github/actions/create-issue
|
||||
uses: ./.github/actions/create_issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
||||
10
.github/workflows/docs.yml
vendored
10
.github/workflows/docs.yml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
@@ -39,10 +39,10 @@ jobs:
|
||||
run: cmake --build . --target docs
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.0.0
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
with:
|
||||
path: build_docs/html
|
||||
name: docs-develop
|
||||
@@ -62,6 +62,6 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5
|
||||
uses: actions/deploy-pages@v4
|
||||
with:
|
||||
artifact_name: docs-develop
|
||||
|
||||
49
.github/workflows/nightly.yml
vendored
49
.github/workflows/nightly.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/nightly.yml
|
||||
|
||||
- .github/workflows/reusable-release.yml
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
- .github/workflows/build-clio-docker-image.yml
|
||||
- .github/workflows/release_impl.yml
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
- .github/workflows/build_clio_docker_image.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/code-coverage/**"
|
||||
- "!.github/actions/code_coverage/**"
|
||||
- .github/scripts/prepare-release-artifacts.sh
|
||||
|
||||
concurrency:
|
||||
@@ -39,19 +39,19 @@ jobs:
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
- os: heavy
|
||||
conan_profile: gcc.ubsan
|
||||
build_type: Release
|
||||
static: false
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -73,13 +73,13 @@ jobs:
|
||||
include:
|
||||
- os: heavy
|
||||
conan_profile: clang
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
static: true
|
||||
- os: macos15
|
||||
conan_profile: apple-clang
|
||||
container: ""
|
||||
static: false
|
||||
uses: ./.github/workflows/reusable-build.yml
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -93,25 +93,14 @@ jobs:
|
||||
targets: all
|
||||
analyze_build_time: true
|
||||
|
||||
get_date:
|
||||
name: Get Date
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
date: ${{ steps.get_date.outputs.date }}
|
||||
steps:
|
||||
- name: Get current date
|
||||
id: get_date
|
||||
run: |
|
||||
echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
nightly_release:
|
||||
needs: [build-and-test, get_date]
|
||||
uses: ./.github/workflows/reusable-release.yml
|
||||
needs: build-and-test
|
||||
uses: ./.github/workflows/release_impl.yml
|
||||
with:
|
||||
delete_pattern: "nightly-*"
|
||||
overwrite_release: true
|
||||
prerelease: true
|
||||
title: "Clio development (nightly) build"
|
||||
version: nightly-${{ needs.get_date.outputs.date }}
|
||||
version: nightly
|
||||
header: >
|
||||
> **Note:** Please remember that this is a development release and it is not recommended for production use.
|
||||
|
||||
@@ -120,7 +109,7 @@ jobs:
|
||||
draft: false
|
||||
|
||||
build_and_publish_docker_image:
|
||||
uses: ./.github/workflows/build-clio-docker-image.yml
|
||||
uses: ./.github/workflows/build_clio_docker_image.yml
|
||||
needs: build-and-test
|
||||
secrets: inherit
|
||||
with:
|
||||
@@ -141,10 +130,10 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Create an issue
|
||||
uses: ./.github/actions/create-issue
|
||||
uses: ./.github/actions/create_issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
|
||||
4
.github/workflows/pre-commit.yml
vendored
4
.github/workflows/pre-commit.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
|
||||
jobs:
|
||||
run-hooks:
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@34790936fae4c6c751f62ec8c06696f9c1a5753a
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@afbcbdafbe0ce5439492fb87eda6441371086386
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
|
||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -29,9 +29,9 @@ jobs:
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
runs_on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container }}
|
||||
@@ -47,13 +47,13 @@ jobs:
|
||||
|
||||
release:
|
||||
needs: build-and-test
|
||||
uses: ./.github/workflows/reusable-release.yml
|
||||
uses: ./.github/workflows/release_impl.yml
|
||||
with:
|
||||
delete_pattern: ""
|
||||
overwrite_release: false
|
||||
prerelease: ${{ contains(github.ref_name, '-') }}
|
||||
title: "${{ github.ref_name }}"
|
||||
title: "${{ github.ref_name}}"
|
||||
version: "${{ github.ref_name }}"
|
||||
header: >
|
||||
${{ contains(github.ref_name, '-') && '> **Note:** Please remember that this is a release candidate and it is not recommended for production use.' || '' }}
|
||||
generate_changelog: ${{ !contains(github.ref_name, '-') }}
|
||||
draft: ${{ !contains(github.ref_name, '-') }}
|
||||
draft: true
|
||||
|
||||
@@ -3,10 +3,10 @@ name: Make release
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
delete_pattern:
|
||||
description: "Pattern to delete previous releases"
|
||||
overwrite_release:
|
||||
description: "Overwrite the current release and tag"
|
||||
required: true
|
||||
type: string
|
||||
type: boolean
|
||||
|
||||
prerelease:
|
||||
description: "Create a prerelease"
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
release:
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -60,19 +60,17 @@ jobs:
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: release_artifacts
|
||||
pattern: clio_server_*
|
||||
|
||||
- name: Create release notes
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_HEADER: ${{ inputs.header }}
|
||||
run: |
|
||||
echo "# Release notes" > "${RUNNER_TEMP}/release_notes.md"
|
||||
echo "" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
printf '%s\n' "${RELEASE_HEADER}" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
printf '%s\n' "${{ inputs.header }}" >> "${RUNNER_TEMP}/release_notes.md"
|
||||
|
||||
- name: Generate changelog
|
||||
shell: bash
|
||||
@@ -89,38 +87,26 @@ jobs:
|
||||
run: .github/scripts/prepare-release-artifacts.sh release_artifacts
|
||||
|
||||
- name: Upload release notes
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release_notes_${{ inputs.version }}
|
||||
path: "${RUNNER_TEMP}/release_notes.md"
|
||||
|
||||
- name: Remove previous release with a pattern
|
||||
if: ${{ github.event_name != 'pull_request' && inputs.delete_pattern != '' }}
|
||||
- name: Remove current release and tag
|
||||
if: ${{ github.event_name != 'pull_request' && inputs.overwrite_release }}
|
||||
shell: bash
|
||||
env:
|
||||
DELETE_PATTERN: ${{ inputs.delete_pattern }}
|
||||
run: |
|
||||
RELEASES_TO_DELETE=$(gh release list --limit 50 --repo "${GH_REPO}" | grep -E "${DELETE_PATTERN}" | awk -F'\t' '{print $3}' || true)
|
||||
if [ -n "$RELEASES_TO_DELETE" ]; then
|
||||
for RELEASE in $RELEASES_TO_DELETE; do
|
||||
echo "Deleting release: $RELEASE"
|
||||
gh release delete "$RELEASE" --repo "${GH_REPO}" --yes --cleanup-tag
|
||||
done
|
||||
fi
|
||||
gh release delete ${{ inputs.version }} --yes || true
|
||||
git push origin :${{ inputs.version }} || true
|
||||
|
||||
- name: Publish release
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_VERSION: ${{ inputs.version }}
|
||||
PRERELEASE_OPTION: ${{ inputs.prerelease && '--prerelease' || '' }}
|
||||
RELEASE_TITLE: ${{ inputs.title }}
|
||||
DRAFT_OPTION: ${{ inputs.draft && '--draft' || '' }}
|
||||
run: |
|
||||
gh release create "${RELEASE_VERSION}" \
|
||||
${PRERELEASE_OPTION} \
|
||||
--title "${RELEASE_TITLE}" \
|
||||
gh release create "${{ inputs.version }}" \
|
||||
${{ inputs.prerelease && '--prerelease' || '' }} \
|
||||
--title "${{ inputs.title }}" \
|
||||
--target "${GITHUB_SHA}" \
|
||||
${DRAFT_OPTION} \
|
||||
${{ inputs.draft && '--draft' || '' }} \
|
||||
--notes-file "${RUNNER_TEMP}/release_notes.md" \
|
||||
./release_artifacts/clio_server*
|
||||
16
.github/workflows/sanitizers.yml
vendored
16
.github/workflows/sanitizers.yml
vendored
@@ -8,14 +8,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/sanitizers.yml
|
||||
|
||||
- .github/workflows/reusable-build-test.yml
|
||||
- .github/workflows/reusable-build.yml
|
||||
- .github/workflows/reusable-test.yml
|
||||
- .github/workflows/build_and_test.yml
|
||||
- .github/workflows/build_impl.yml
|
||||
- .github/workflows/test_impl.yml
|
||||
|
||||
- ".github/actions/**"
|
||||
- "!.github/actions/build-docker-image/**"
|
||||
- "!.github/actions/create-issue/**"
|
||||
- .github/scripts/execute-tests-under-sanitizer.sh
|
||||
- "!.github/actions/build_docker_image/**"
|
||||
- "!.github/actions/create_issue/**"
|
||||
- .github/scripts/execute-tests-under-sanitizer
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
@@ -41,10 +41,10 @@ jobs:
|
||||
sanitizer_ext: [.asan, .tsan, .ubsan]
|
||||
build_type: [Release, Debug]
|
||||
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d" }'
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'
|
||||
download_ccache: false
|
||||
upload_ccache: false
|
||||
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
|
||||
@@ -43,18 +43,18 @@ jobs:
|
||||
|
||||
env:
|
||||
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') || inputs.conan_profile == 'clang.asan' || (inputs.conan_profile == 'gcc.asan' && inputs.build_type == 'Release') }}
|
||||
|
||||
steps:
|
||||
- name: Cleanup workspace
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: XRPLF/actions/.github/actions/cleanup-workspace@ea9970b7c211b18f4c8bcdb28c29f5711752029f
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
|
||||
- name: Run clio_tests (sanitizer errors ignored)
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
|
||||
run: ./.github/scripts/execute-tests-under-sanitizer.sh ./clio_tests
|
||||
run: ./.github/scripts/execute-tests-under-sanitizer ./clio_tests
|
||||
|
||||
- name: Check for sanitizer report
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
|
||||
- name: Upload sanitizer report
|
||||
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
path: .sanitizer-report/*
|
||||
@@ -91,7 +91,7 @@ jobs:
|
||||
|
||||
- name: Create an issue
|
||||
if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
|
||||
uses: ./.github/actions/create-issue
|
||||
uses: ./.github/actions/create_issue
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
@@ -144,7 +144,7 @@ jobs:
|
||||
sleep 5
|
||||
done
|
||||
|
||||
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: clio_integration_tests_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||
|
||||
@@ -3,23 +3,23 @@ name: Update CI docker image
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/update-docker-ci.yml
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
|
||||
- ".github/actions/build-docker-image/**"
|
||||
- ".github/actions/build_docker_image/**"
|
||||
|
||||
- "docker/**"
|
||||
- "!docker/clio/**"
|
||||
- "!docker/develop/**"
|
||||
- "docker/ci/**"
|
||||
- "docker/compilers/**"
|
||||
- "docker/tools/**"
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/update-docker-ci.yml
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
|
||||
- ".github/actions/build-docker-image/**"
|
||||
- ".github/actions/build_docker_image/**"
|
||||
|
||||
- "docker/**"
|
||||
- "!docker/clio/**"
|
||||
- "!docker/develop/**"
|
||||
- "docker/ci/**"
|
||||
- "docker/compilers/**"
|
||||
- "docker/tools/**"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -90,15 +90,15 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -128,7 +128,7 @@ jobs:
|
||||
needs: [repo, gcc-amd64, gcc-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -137,11 +137,11 @@ jobs:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -149,7 +149,7 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PW }}
|
||||
@@ -179,7 +179,7 @@ jobs:
|
||||
needs: repo
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -187,7 +187,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/compilers/clang/**"
|
||||
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -215,7 +215,7 @@ jobs:
|
||||
needs: [repo, gcc-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -223,7 +223,7 @@ jobs:
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -246,15 +246,15 @@ jobs:
|
||||
needs: [repo, gcc-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
if: ${{ steps.changed-files.outputs.any_changed == 'true' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -277,7 +277,7 @@ jobs:
|
||||
needs: [repo, tools-amd64, tools-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
@@ -286,11 +286,11 @@ jobs:
|
||||
files: "docker/tools/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
@@ -306,36 +306,14 @@ jobs:
|
||||
$image:arm64-latest \
|
||||
$image:amd64-latest
|
||||
|
||||
pre-commit:
|
||||
name: Build and push pre-commit docker image
|
||||
runs-on: heavy
|
||||
needs: [repo, tools-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
images: |
|
||||
${{ needs.repo.outputs.GHCR_REPO }}/clio-pre-commit
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/pre-commit
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_args: |
|
||||
GHCR_REPO=${{ needs.repo.outputs.GHCR_REPO }}
|
||||
|
||||
ci:
|
||||
name: Build and push CI docker image
|
||||
runs-on: heavy
|
||||
needs: [repo, gcc-merge, clang, tools-merge]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: ./.github/actions/build-docker-image
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
@@ -18,7 +18,7 @@ on:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload-conan-deps.yml
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
|
||||
- .github/actions/conan/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
@@ -28,7 +28,7 @@ on:
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload-conan-deps.yml
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
|
||||
- .github/actions/conan/action.yml
|
||||
- ".github/scripts/conan/**"
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Calculate conan matrix
|
||||
id: set-matrix
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/.github/actions/prepare-runner@7951b682e5a2973b28b0719a72f01fc4b0d0c34f
|
||||
@@ -99,6 +99,4 @@ jobs:
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' }}
|
||||
env:
|
||||
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
run: conan upload "*" -r=xrplf --confirm ${FORCE_OPTION}
|
||||
run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
@@ -1,6 +1,7 @@
|
||||
name: Upload report
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
@@ -12,12 +13,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download report artifact
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build
|
||||
@@ -43,7 +43,7 @@ repos:
|
||||
# hadolint-docker is a special hook that runs hadolint in a Docker container
|
||||
# Docker is not installed in the environment where pre-commit is run
|
||||
stages: [manual]
|
||||
entry: hadolint/hadolint:v2.14.0 hadolint
|
||||
entry: hadolint/hadolint:v2.12.1-beta hadolint
|
||||
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: 63c8f8312b7559622c0d82815639671ae42132ac # frozen: v2.4.1
|
||||
|
||||
@@ -34,6 +34,7 @@ Below are some useful docs to learn more about Clio.
|
||||
|
||||
- [How to configure Clio and rippled](./docs/configure-clio.md)
|
||||
- [How to run Clio](./docs/run-clio.md)
|
||||
- [Logging](./docs/logging.md)
|
||||
- [Troubleshooting guide](./docs/trouble_shooting.md)
|
||||
|
||||
**General reference material:**
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1756234269.497",
|
||||
"xxhash/0.8.3#681d36a0a6111fc56e5e45ea182c19cc%1756234289.683",
|
||||
"xrpl/2.6.1#973af2bf9631f239941dd9f5a100bb84%1759275059.342",
|
||||
"xrpl/2.6.1-rc2#c14c6a4092fb2b97d3a93906dcee87b7%1759161400.392",
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||
"spdlog/1.15.3#3ca0e9e6b83af4d0151e26541d140c86%1754401846.61",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||
|
||||
@@ -18,7 +18,7 @@ class ClioConan(ConanFile):
|
||||
'protobuf/3.21.12',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1w',
|
||||
'xrpl/2.6.1',
|
||||
'xrpl/2.6.1-rc2',
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118',
|
||||
'spdlog/1.15.3',
|
||||
|
||||
@@ -43,20 +43,26 @@ RUN apt-get update \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python tools
|
||||
RUN apt-get update \
|
||||
ARG PYTHON_VERSION=3.13
|
||||
|
||||
RUN add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-venv \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}
|
||||
|
||||
# Create a virtual environment for python tools
|
||||
RUN python${PYTHON_VERSION} -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
RUN pip install -q --no-cache-dir \
|
||||
# TODO: Remove this once we switch to newer Ubuntu base image
|
||||
# lxml 6.0.0 is not compatible with our image
|
||||
'lxml<6.0.0' \
|
||||
cmake \
|
||||
conan==2.20.1 \
|
||||
gcovr
|
||||
gcovr \
|
||||
pre-commit
|
||||
|
||||
# Install LLVM tools
|
||||
ARG LLVM_TOOLS_VERSION=20
|
||||
|
||||
@@ -9,7 +9,7 @@ The image is based on Ubuntu 20.04 and contains:
|
||||
- Clang 19
|
||||
- ClangBuildAnalyzer 1.6.0
|
||||
- Conan 2.20.1
|
||||
- Doxygen 1.14
|
||||
- Doxygen 1.12
|
||||
- GCC 15.2.0
|
||||
- GDB 16.3
|
||||
- gh 2.74
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
clio_develop:
|
||||
image: ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
volumes:
|
||||
- clio_develop_conan_data:/root/.conan2/p
|
||||
- clio_develop_ccache:/root/.ccache
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
ARG GHCR_REPO=invalid
|
||||
FROM ${GHCR_REPO}/clio-tools:latest AS clio-tools
|
||||
|
||||
# We're using Ubuntu 24.04 to have a more recent version of Python
|
||||
FROM ubuntu:24.04
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
# hadolint ignore=DL3002
|
||||
USER root
|
||||
WORKDIR /root
|
||||
|
||||
# Install common tools and dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
curl \
|
||||
git \
|
||||
libatomic1 \
|
||||
software-properties-common \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python tools
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
python3 \
|
||||
python3-pip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install -q --no-cache-dir --break-system-packages \
|
||||
pre-commit
|
||||
|
||||
COPY --from=clio-tools \
|
||||
/usr/local/bin/doxygen \
|
||||
/usr/local/bin/
|
||||
@@ -51,7 +51,7 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG DOXYGEN_VERSION=1.14.0
|
||||
ARG DOXYGEN_VERSION=1.12.0
|
||||
RUN wget --progress=dot:giga "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& tar xf "doxygen-${DOXYGEN_VERSION}.src.tar.gz" \
|
||||
&& cd "doxygen-${DOXYGEN_VERSION}" \
|
||||
|
||||
@@ -15,7 +15,6 @@ EXTRACT_ANON_NSPACES = NO
|
||||
SORT_MEMBERS_CTORS_1ST = YES
|
||||
|
||||
INPUT = ${SOURCE}/src
|
||||
USE_MDFILE_AS_MAINPAGE = ${SOURCE}/src/README.md
|
||||
EXCLUDE_SYMBOLS = ${EXCLUDES}
|
||||
RECURSIVE = YES
|
||||
HAVE_DOT = ${USE_DOT}
|
||||
|
||||
@@ -177,7 +177,7 @@ There are several CMake options you can use to customize the build:
|
||||
|
||||
### Generating API docs for Clio
|
||||
|
||||
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.14.0 on your system.
|
||||
The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.12.0 on your system.
|
||||
|
||||
To generate the API docs, please use CMake option `-Ddocs=ON` as described above and build the `docs` target.
|
||||
|
||||
@@ -191,7 +191,7 @@ Open the `index.html` file in your browser to see the documentation pages.
|
||||
It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine.
|
||||
|
||||
```sh
|
||||
docker run -it ghcr.io/xrplf/clio-ci:b2be4b51d1d81548ca48e2f2b8f67356b880c96d
|
||||
docker run -it ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d
|
||||
git clone https://github.com/XRPLF/clio
|
||||
cd clio
|
||||
```
|
||||
|
||||
@@ -89,14 +89,6 @@ This document provides a list of all available Clio configuration properties in
|
||||
- **Constraints**: The minimum value is `1`. The maximum value is `4294967295`.
|
||||
- **Description**: Represents the number of threads that will be used for database operations.
|
||||
|
||||
### database.cassandra.provider
|
||||
|
||||
- **Required**: True
|
||||
- **Type**: string
|
||||
- **Default value**: `cassandra`
|
||||
- **Constraints**: The value must be one of the following: `cassandra`, `aws_keyspace`.
|
||||
- **Description**: The specific database backend provider we are using.
|
||||
|
||||
### database.cassandra.core_connections_per_host
|
||||
|
||||
- **Required**: True
|
||||
|
||||
@@ -951,7 +951,7 @@ span.arrowhead {
|
||||
border-color: var(--primary-color);
|
||||
}
|
||||
|
||||
#nav-tree-contents > ul > li:first-child > div > a {
|
||||
#nav-tree ul li:first-child > div > a {
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ It's possible to configure `minimum`, `maximum` and `default` version like so:
|
||||
|
||||
All of the above are optional.
|
||||
|
||||
Clio will fallback to hardcoded defaults when these values are not specified in the config file, or if the configured values are outside of the minimum and maximum supported versions hardcoded in [src/rpc/common/APIVersion.hpp](../src/rpc/common/APIVersion.hpp).
|
||||
Clio will fallback to hardcoded defaults when these values are not specified in the config file, or if the configured values are outside of the minimum and maximum supported versions hardcoded in [src/rpc/common/APIVersion.h](../src/rpc/common/APIVersion.hpp).
|
||||
|
||||
> [!TIP]
|
||||
> See the [example-config.json](../docs/examples/config/example-config.json) for more details.
|
||||
|
||||
@@ -36,19 +36,19 @@ EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check version of doxygen is at least 1.14
|
||||
# Check version of doxygen is at least 1.12
|
||||
version=$($DOXYGEN --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "1.14.0" > "$version" ]]; then
|
||||
if [[ "1.12.0" > "$version" ]]; then
|
||||
# No hard error if doxygen version is not the one we want - let CI deal with it
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 1.14 of `which doxygen` is required.
|
||||
Your version is $version. Please upgrade it.
|
||||
A minimum of version 1.12 of `which doxygen` is required.
|
||||
Your version is $version. Please upgrade it for next time.
|
||||
|
||||
Your changes may fail CI checks.
|
||||
Your changes may fail to pass CI once pushed.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
# Clio API server
|
||||
|
||||
## Introduction
|
||||
|
||||
Clio is an XRP Ledger API server optimized for RPC calls over WebSocket or JSON-RPC.
|
||||
|
||||
It stores validated historical ledger and transaction data in a more space efficient format, and uses up to 4 times
|
||||
less space than [rippled](https://github.com/XRPLF/rippled).
|
||||
|
||||
Clio can be configured to store data in [Apache Cassandra](https://cassandra.apache.org/_/index.html) or
|
||||
[ScyllaDB](https://www.scylladb.com/), enabling scalable read throughput. Multiple Clio nodes can share
|
||||
access to the same dataset, which allows for a highly available cluster of Clio nodes without the need for redundant
|
||||
data storage or computation.
|
||||
|
||||
## Develop
|
||||
|
||||
As you prepare to develop code for Clio, please be sure you are aware of our current
|
||||
[Contribution guidelines](https://github.com/XRPLF/clio/blob/develop/CONTRIBUTING.md).
|
||||
|
||||
Read about @ref "rpc" carefully to know more about writing your own handlers for Clio.
|
||||
@@ -21,7 +21,6 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/CassandraBackend.hpp"
|
||||
#include "data/KeyspaceBackend.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
@@ -46,7 +45,6 @@ namespace data {
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheInterface& cache)
|
||||
{
|
||||
using namespace cassandra::impl;
|
||||
static util::Logger const log{"Backend"}; // NOLINT(readability-identifier-naming)
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
@@ -57,15 +55,9 @@ makeBackend(util::config::ClioConfigDefinition const& config, data::LedgerCacheI
|
||||
|
||||
if (boost::iequals(type, "cassandra")) {
|
||||
auto const cfg = config.getObject("database." + type);
|
||||
if (providerFromString(cfg.getValueView("provider").asString()) == Provider::Keyspace) {
|
||||
backend = std::make_shared<data::cassandra::KeyspaceBackend>(
|
||||
data::cassandra::SettingsProvider{cfg}, cache, readOnly
|
||||
);
|
||||
} else {
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(
|
||||
data::cassandra::SettingsProvider{cfg}, cache, readOnly
|
||||
);
|
||||
}
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(
|
||||
data::cassandra::SettingsProvider{cfg}, cache, readOnly
|
||||
);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
|
||||
@@ -295,7 +295,7 @@ public:
|
||||
* @param account The account to fetch transactions for
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param forward Whether to fetch the page forwards or backwards from the given cursor
|
||||
* @param txnCursor The cursor to resume fetching from
|
||||
* @param cursor The cursor to resume fetching from
|
||||
* @param yield The coroutine context
|
||||
* @return Results and a cursor to resume from
|
||||
*/
|
||||
@@ -304,7 +304,7 @@ public:
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& txnCursor,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,309 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/LedgerHeaderCache.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "data/cassandra/CassandraBackendFamily.hpp"
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/KeyspaceSchema.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ExecutionStrategy.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/uuid/string_generator.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/nft.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Implements @ref CassandraBackendFamily for Keyspace
|
||||
*
|
||||
* @tparam SettingsProviderType The settings provider type
|
||||
* @tparam ExecutionStrategyType The execution strategy type
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type
|
||||
*/
|
||||
template <
|
||||
SomeSettingsProvider SettingsProviderType,
|
||||
SomeExecutionStrategy ExecutionStrategyType,
|
||||
typename FetchLedgerCacheType = FetchLedgerCache>
|
||||
class BasicKeyspaceBackend : public CassandraBackendFamily<
|
||||
SettingsProviderType,
|
||||
ExecutionStrategyType,
|
||||
KeyspaceSchema<SettingsProviderType>,
|
||||
FetchLedgerCacheType> {
|
||||
using DefaultCassandraFamily = CassandraBackendFamily<
|
||||
SettingsProviderType,
|
||||
ExecutionStrategyType,
|
||||
KeyspaceSchema<SettingsProviderType>,
|
||||
FetchLedgerCacheType>;
|
||||
|
||||
using DefaultCassandraFamily::executor_;
|
||||
using DefaultCassandraFamily::ledgerSequence_;
|
||||
using DefaultCassandraFamily::log_;
|
||||
using DefaultCassandraFamily::range_;
|
||||
using DefaultCassandraFamily::schema_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Inherit the constructors of the base class.
|
||||
*/
|
||||
using DefaultCassandraFamily::DefaultCassandraFamily;
|
||||
|
||||
/**
|
||||
* @brief Move constructor is deleted because handle_ is shared by reference with executor
|
||||
*/
|
||||
BasicKeyspaceBackend(BasicKeyspaceBackend&&) = delete;
|
||||
|
||||
bool
|
||||
doFinishWrites() override
|
||||
{
|
||||
this->waitForWritesToFinish();
|
||||
|
||||
// !range_.has_value() means the table 'ledger_range' is not populated;
|
||||
// This would be the first write to the table.
|
||||
// In this case, insert both min_sequence/max_sequence range into the table.
|
||||
if (not range_.has_value()) {
|
||||
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/false, ledgerSequence_);
|
||||
executor_.writeSync(schema_->insertLedgerRange, /* isLatestLedger =*/true, ledgerSequence_);
|
||||
}
|
||||
|
||||
if (not this->executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
|
||||
log_.warn() << "Update failed for ledger " << ledgerSequence_;
|
||||
return false;
|
||||
}
|
||||
|
||||
log_.info() << "Committed ledger " << ledgerSequence_;
|
||||
return true;
|
||||
}
|
||||
|
||||
NFTsAndCursor
|
||||
fetchNFTsByIssuer(
|
||||
ripple::AccountID const& issuer,
|
||||
std::optional<std::uint32_t> const& taxon,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
if (taxon.has_value()) {
|
||||
// Keyspace and ScyllaDB uses the same logic for taxon-filtered queries
|
||||
nftIDs = fetchNFTIDsByTaxon(issuer, *taxon, limit, cursorIn, yield);
|
||||
} else {
|
||||
// Amazon Keyspaces Workflow for non-taxon queries
|
||||
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
|
||||
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
|
||||
|
||||
Statement const firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
firstQuery.bindAt(1, startTaxon);
|
||||
firstQuery.bindAt(2, startTokenID);
|
||||
firstQuery.bindAt(3, Limit{limit});
|
||||
|
||||
auto const firstRes = executor_.read(yield, firstQuery);
|
||||
if (firstRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
|
||||
if (nftIDs.size() < limit) {
|
||||
auto const remainingLimit = limit - nftIDs.size();
|
||||
Statement const secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
|
||||
secondQuery.bindAt(1, startTaxon);
|
||||
secondQuery.bindAt(2, Limit{remainingLimit});
|
||||
|
||||
auto const secondRes = executor_.read(yield, secondQuery);
|
||||
if (secondRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
}
|
||||
}
|
||||
return populateNFTsAndCreateCursor(nftIDs, ledgerSequence, limit, yield);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief (Unsupported in Keyspaces) Fetches account root object indexes by page.
|
||||
* @note Loading the cache by enumerating all accounts is currently unsupported by the AWS Keyspaces backend.
|
||||
* This function's logic relies on "PER PARTITION LIMIT 1", which Keyspaces does not support, and there is
|
||||
* no efficient alternative. This is acceptable as the cache is primarily loaded via diffs. Calling this
|
||||
* function will throw an exception.
|
||||
*
|
||||
* @param number The total number of accounts to fetch.
|
||||
* @param pageSize The maximum number of accounts per page.
|
||||
* @param seq The accounts need to exist at this ledger sequence.
|
||||
* @param yield The coroutine context.
|
||||
* @return A vector of ripple::uint256 representing the account root hashes.
|
||||
*/
|
||||
std::vector<ripple::uint256>
|
||||
fetchAccountRoots(
|
||||
[[maybe_unused]] std::uint32_t number,
|
||||
[[maybe_unused]] std::uint32_t pageSize,
|
||||
[[maybe_unused]] std::uint32_t seq,
|
||||
[[maybe_unused]] boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
ASSERT(false, "Fetching account roots is not supported by the Keyspaces backend.");
|
||||
std::unreachable();
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<ripple::uint256>
|
||||
fetchNFTIDsByTaxon(
|
||||
ripple::AccountID const& issuer,
|
||||
std::uint32_t const taxon,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
Statement const statement = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
statement.bindAt(1, taxon);
|
||||
statement.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
|
||||
statement.bindAt(3, Limit{limit});
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
if (res.has_value() && res->hasRows()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*res))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
return nftIDs;
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchNFTIDsWithoutTaxon(
|
||||
ripple::AccountID const& issuer,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
|
||||
auto const startTaxon = cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0;
|
||||
auto const startTokenID = cursorIn.value_or(ripple::uint256(0));
|
||||
|
||||
Statement firstQuery = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
firstQuery.bindAt(1, startTaxon);
|
||||
firstQuery.bindAt(2, startTokenID);
|
||||
firstQuery.bindAt(3, Limit{limit});
|
||||
|
||||
auto const firstRes = executor_.read(yield, firstQuery);
|
||||
if (firstRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*firstRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
|
||||
if (nftIDs.size() < limit) {
|
||||
auto const remainingLimit = limit - nftIDs.size();
|
||||
Statement secondQuery = schema_->selectNFTsAfterTaxonKeyspaces.bind(issuer);
|
||||
secondQuery.bindAt(1, startTaxon);
|
||||
secondQuery.bindAt(2, Limit{remainingLimit});
|
||||
|
||||
auto const secondRes = executor_.read(yield, secondQuery);
|
||||
if (secondRes.has_value()) {
|
||||
for (auto const [nftID] : extract<ripple::uint256>(*secondRes))
|
||||
nftIDs.push_back(nftID);
|
||||
}
|
||||
}
|
||||
return nftIDs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Takes a list of NFT IDs, fetches their full data, and assembles the final result with a cursor.
|
||||
*/
|
||||
NFTsAndCursor
|
||||
populateNFTsAndCreateCursor(
|
||||
std::vector<ripple::uint256> const& nftIDs,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
if (nftIDs.empty()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
NFTsAndCursor ret;
|
||||
if (nftIDs.size() == limit)
|
||||
ret.cursor = nftIDs.back();
|
||||
|
||||
// Prepare and execute queries to fetch NFT info and URIs in parallel.
|
||||
std::vector<Statement> selectNFTStatements;
|
||||
selectNFTStatements.reserve(nftIDs.size());
|
||||
std::transform(
|
||||
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
|
||||
return schema_->selectNFT.bind(nftID, ledgerSequence);
|
||||
}
|
||||
);
|
||||
|
||||
std::vector<Statement> selectNFTURIStatements;
|
||||
selectNFTURIStatements.reserve(nftIDs.size());
|
||||
std::transform(
|
||||
std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
|
||||
return schema_->selectNFTURI.bind(nftID, ledgerSequence);
|
||||
}
|
||||
);
|
||||
|
||||
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
|
||||
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
|
||||
|
||||
// Combine the results into final NFT objects.
|
||||
for (auto i = 0u; i < nftIDs.size(); ++i) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>();
|
||||
maybeRow.has_value()) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri.has_value())
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
using KeyspaceBackend = BasicKeyspaceBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;
|
||||
|
||||
} // namespace data::cassandra
|
||||
@@ -1,10 +1,8 @@
|
||||
# Backend
|
||||
|
||||
@page "backend" Backend
|
||||
# Backend
|
||||
|
||||
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. Currently, Cassandra and ScyllaDB are the only supported databases that are production-ready.
|
||||
|
||||
To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.hpp](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.hpp](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file.
|
||||
To support additional database types, you can create new classes that implement the virtual methods in [BackendInterface.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendInterface.hpp). Then, leveraging the Factory Object Design Pattern, modify [BackendFactory.h](https://github.com/XRPLF/clio/blob/develop/src/data/BackendFactory.hpp) with logic that returns the new database interface if the relevant `type` is provided in Clio's configuration file.
|
||||
|
||||
## Data Model
|
||||
|
||||
|
||||
@@ -1,975 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCacheInterface.hpp"
|
||||
#include "data/LedgerHeaderCache.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ExecutionStrategy.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/uuid/string_generator.hpp>
|
||||
#include <boost/uuid/uuid.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <fmt/format.h>
|
||||
#include <xrpl/basics/Blob.h>
|
||||
#include <xrpl/basics/base_uint.h>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/nft.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
class CacheBackendCassandraTest;
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Implements @ref BackendInterface for Cassandra/ScyllaDB/Keyspace.
|
||||
*
|
||||
* Note: This is a safer and more correct rewrite of the original implementation of the backend.
|
||||
*
|
||||
* @tparam SettingsProviderType The settings provider type
|
||||
* @tparam ExecutionStrategyType The execution strategy type
|
||||
* @tparam SchemaType The Schema type
|
||||
* @tparam FetchLedgerCacheType The ledger header cache type
|
||||
*/
|
||||
template <
|
||||
SomeSettingsProvider SettingsProviderType,
|
||||
SomeExecutionStrategy ExecutionStrategyType,
|
||||
typename SchemaType,
|
||||
typename FetchLedgerCacheType = FetchLedgerCache>
|
||||
class CassandraBackendFamily : public BackendInterface {
|
||||
protected:
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
SettingsProviderType settingsProvider_;
|
||||
SchemaType schema_;
|
||||
std::atomic_uint32_t ledgerSequence_ = 0u;
|
||||
friend class ::CacheBackendCassandraTest;
|
||||
|
||||
Handle handle_;
|
||||
|
||||
// have to be mutable because BackendInterface constness :(
|
||||
mutable ExecutionStrategyType executor_;
|
||||
// TODO: move to interface level
|
||||
mutable FetchLedgerCacheType ledgerCache_{};
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
*
|
||||
* @param settingsProvider The settings provider
|
||||
* @param cache The ledger cache
|
||||
* @param readOnly Whether the database should be in readonly mode
|
||||
*/
|
||||
CassandraBackendFamily(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
|
||||
: BackendInterface(cache)
|
||||
, settingsProvider_{std::move(settingsProvider)}
|
||||
, schema_{settingsProvider_}
|
||||
, handle_{settingsProvider_.getSettings()}
|
||||
, executor_{settingsProvider_.getSettings(), handle_}
|
||||
{
|
||||
if (auto const res = handle_.connect(); not res.has_value())
|
||||
throw std::runtime_error("Could not connect to database: " + res.error());
|
||||
|
||||
if (not readOnly) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res.has_value()) {
|
||||
// on datastax, creation of keyspaces can be configured to only be done thru the admin
|
||||
// interface. this does not mean that the keyspace does not already exist tho.
|
||||
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
|
||||
throw std::runtime_error("Could not create keyspace: " + res.error());
|
||||
}
|
||||
|
||||
if (auto const res = handle_.executeEach(schema_.createSchema); not res.has_value())
|
||||
throw std::runtime_error("Could not create schema: " + res.error());
|
||||
}
|
||||
|
||||
try {
|
||||
schema_.prepareStatements(handle_);
|
||||
} catch (std::runtime_error const& ex) {
|
||||
auto const error = fmt::format(
|
||||
"Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
|
||||
"node with write access to DB should be started first.",
|
||||
ex.what(),
|
||||
readOnly
|
||||
);
|
||||
LOG(log_.error()) << error;
|
||||
throw std::runtime_error(error);
|
||||
}
|
||||
LOG(log_.info()) << "Created (revamped) CassandraBackend";
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Move constructor is deleted because handle_ is shared by reference with executor
|
||||
*/
|
||||
CassandraBackendFamily(CassandraBackendFamily&&) = delete;
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& txnCursor,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {.txns = {}, .cursor = {}};
|
||||
|
||||
Statement const statement = [this, forward, &account]() {
|
||||
if (forward)
|
||||
return schema_->selectAccountTxForward.bind(account);
|
||||
|
||||
return schema_->selectAccountTx.bind(account);
|
||||
}();
|
||||
|
||||
auto cursor = txnCursor;
|
||||
if (cursor) {
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
} else {
|
||||
auto const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||
auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
|
||||
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " idx = " << seq
|
||||
<< " tuple = " << placeHolder;
|
||||
}
|
||||
|
||||
// FIXME: Limit is a hack to support uint32_t properly for the time
|
||||
// being. Should be removed later and schema updated to use proper
|
||||
// types.
|
||||
statement.bindAt(2, Limit{limit});
|
||||
auto const res = executor_.read(yield, statement);
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> hashes = {};
|
||||
auto numRows = results.numRows();
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
}
|
||||
}
|
||||
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
LOG(log_.debug()) << "Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit) {
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
void
|
||||
waitForWritesToFinish() override
|
||||
{
|
||||
executor_.sync();
|
||||
}
|
||||
|
||||
void
|
||||
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
|
||||
{
|
||||
executor_.write(schema_->insertLedgerHeader, ledgerHeader.seq, std::move(blob));
|
||||
|
||||
executor_.write(schema_->insertLedgerHash, ledgerHeader.hash, ledgerHeader.seq);
|
||||
|
||||
ledgerSequence_ = ledgerHeader.seq;
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res.has_value()) {
|
||||
if (auto const& rows = *res; rows) {
|
||||
if (auto const maybeRow = rows.template get<uint32_t>(); maybeRow.has_value())
|
||||
return maybeRow;
|
||||
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no result";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const lock = ledgerCache_.get(); lock.has_value() && lock->seq == sequence)
|
||||
return lock->ledger;
|
||||
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
|
||||
if (res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
|
||||
auto const header = util::deserializeHeader(ripple::makeSlice(*maybeValue));
|
||||
ledgerCache_.put(FetchLedgerCache::CacheEntry{header, sequence});
|
||||
return header;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return fetchLedgerBySequence(*maybeValue, yield);
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash - no result";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerRange);
|
||||
if (res) {
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "Could not fetch ledger range - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// TODO: this is probably a good place to use user type in
|
||||
// cassandra instead of having two rows with bool flag. or maybe at
|
||||
// least use tuple<int, int>?
|
||||
LedgerRange range;
|
||||
std::size_t idx = 0;
|
||||
for (auto [seq] : extract<uint32_t>(results)) {
|
||||
if (idx == 0) {
|
||||
range.maxSequence = range.minSequence = seq;
|
||||
} else if (idx == 1) {
|
||||
range.maxSequence = seq;
|
||||
}
|
||||
|
||||
++idx;
|
||||
}
|
||||
|
||||
if (range.minSequence > range.maxSequence)
|
||||
std::swap(range.minSequence, range.maxSequence);
|
||||
|
||||
LOG(log_.debug()) << "After hardFetchLedgerRange range is " << range.minSequence << ":"
|
||||
<< range.maxSequence;
|
||||
return range;
|
||||
}
|
||||
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
|
||||
return fetchTransactions(hashes, yield);
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
|
||||
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& result = res.value();
|
||||
if (not result.hasRows()) {
|
||||
LOG(log_.warn()) << "Could not fetch all transaction hashes - no rows; ledger = "
|
||||
<< std::to_string(ledgerSequence);
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> hashes;
|
||||
for (auto [hash] : extract<ripple::uint256>(result))
|
||||
hashes.push_back(std::move(hash));
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from database in "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
|
||||
<< " milliseconds";
|
||||
|
||||
return hashes;
|
||||
}
|
||||
|
||||
std::optional<NFT>
|
||||
fetchNFT(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
|
||||
if (not res)
|
||||
return std::nullopt;
|
||||
|
||||
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
|
||||
|
||||
// now fetch URI. Usually we will have the URI even for burned NFTs,
|
||||
// but if the first ledger on this clio included NFTokenBurn
|
||||
// transactions we will not have the URIs for any of those tokens.
|
||||
// In any other case not having the URI indicates something went
|
||||
// wrong with our data.
|
||||
//
|
||||
// TODO - in the future would be great for any handlers that use
|
||||
// this could inject a warning in this case (the case of not having
|
||||
// a URI because it was burned in the first ledger) to indicate that
|
||||
// even though we are returning a blank URI, the NFT might have had
|
||||
// one.
|
||||
auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
|
||||
if (uriRes) {
|
||||
if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
|
||||
result->uri = *maybeUri;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch NFT - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {.txns = {}, .cursor = {}};
|
||||
|
||||
Statement const statement = [this, forward, &tokenID]() {
|
||||
if (forward)
|
||||
return schema_->selectNFTTxForward.bind(tokenID);
|
||||
|
||||
return schema_->selectNFTTx.bind(tokenID);
|
||||
}();
|
||||
|
||||
auto cursor = cursorIn;
|
||||
if (cursor) {
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
} else {
|
||||
auto const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||
auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
|
||||
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq
|
||||
<< " tuple = " << placeHolder;
|
||||
}
|
||||
|
||||
statement.bindAt(2, Limit{limit});
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> hashes = {};
|
||||
auto numRows = results.numRows();
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
// forward queries by ledger/tx sequence `>=`
|
||||
// so we have to advance the index by one
|
||||
if (forward)
|
||||
++cursor->transactionIndex;
|
||||
}
|
||||
}
|
||||
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
LOG(log_.debug()) << "NFT Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit) {
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
MPTHoldersAndCursor
|
||||
fetchMPTHolders(
|
||||
ripple::uint192 const& mptID,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::AccountID> const& cursorIn,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto const holderEntries = executor_.read(
|
||||
yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
|
||||
);
|
||||
|
||||
auto const& holderResults = holderEntries.value();
|
||||
if (not holderResults.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> mptKeys;
|
||||
std::optional<ripple::AccountID> cursor;
|
||||
for (auto const [holder] : extract<ripple::AccountID>(holderResults)) {
|
||||
mptKeys.push_back(ripple::keylet::mptoken(mptID, holder).key);
|
||||
cursor = holder;
|
||||
}
|
||||
|
||||
auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
|
||||
|
||||
auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
|
||||
|
||||
mptObjects.erase(it, mptObjects.end());
|
||||
|
||||
ASSERT(mptKeys.size() <= limit, "Number of keys can't exceed the limit");
|
||||
if (mptKeys.size() == limit)
|
||||
return {mptObjects, cursor};
|
||||
|
||||
return {mptObjects, {}};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
if (auto const result = res->template get<Blob>(); result) {
|
||||
if (result->size())
|
||||
return result;
|
||||
} else {
|
||||
LOG(log_.debug()) << "Could not fetch ledger object - no rows";
|
||||
}
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
doFetchLedgerObjectSeq(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
if (auto const result = res->template get<Blob, std::uint32_t>(); result) {
|
||||
auto [_, seq] = result.value();
|
||||
return seq;
|
||||
}
|
||||
LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
|
||||
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
|
||||
auto [transaction, meta, seq, date] = *maybeValue;
|
||||
return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
|
||||
}
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch transaction - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch transaction: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
|
||||
if (auto const result = res->template get<ripple::uint256>(); result) {
|
||||
if (*result == kLAST_KEY)
|
||||
return std::nullopt;
|
||||
return result;
|
||||
}
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch successor - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch successor: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (hashes.empty())
|
||||
return {};
|
||||
|
||||
auto const numHashes = hashes.size();
|
||||
std::vector<TransactionAndMetadata> results;
|
||||
results.reserve(numHashes);
|
||||
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(numHashes);
|
||||
|
||||
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
|
||||
// TODO: seems like a job for "hash IN (list of hashes)" instead?
|
||||
std::transform(
|
||||
std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
|
||||
return schema_->selectTransaction.bind(hash);
|
||||
}
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
std::cbegin(entries),
|
||||
std::cend(entries),
|
||||
std::back_inserter(results),
|
||||
[](auto const& res) -> TransactionAndMetadata {
|
||||
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
|
||||
return *maybeRow;
|
||||
|
||||
return {};
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
ASSERT(numHashes == results.size(), "Number of hashes and results must match");
|
||||
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in " << timeDiff
|
||||
<< " milliseconds";
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
if (keys.empty())
|
||||
return {};
|
||||
|
||||
auto const numKeys = keys.size();
|
||||
LOG(log_.trace()) << "Fetching " << numKeys << " objects";
|
||||
|
||||
std::vector<Blob> results;
|
||||
results.reserve(numKeys);
|
||||
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(numKeys);
|
||||
|
||||
// TODO: seems like a job for "key IN (list of keys)" instead?
|
||||
std::transform(
|
||||
std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
|
||||
return schema_->selectObject.bind(key, sequence);
|
||||
}
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
|
||||
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
|
||||
return *maybeValue;
|
||||
|
||||
return {};
|
||||
}
|
||||
);
|
||||
|
||||
LOG(log_.trace()) << "Fetched " << numKeys << " objects";
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
|
||||
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& results = res.value();
|
||||
if (not results) {
|
||||
LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> resultKeys;
|
||||
for (auto [key] : extract<ripple::uint256>(results))
|
||||
resultKeys.push_back(key);
|
||||
|
||||
return resultKeys;
|
||||
});
|
||||
|
||||
// one of the above errors must have happened
|
||||
if (keys.empty())
|
||||
return {};
|
||||
|
||||
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in " << timeDiff
|
||||
<< " milliseconds";
|
||||
|
||||
auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
std::vector<LedgerObject> results;
|
||||
results.reserve(keys.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(keys),
|
||||
std::cend(keys),
|
||||
std::cbegin(objs),
|
||||
std::back_inserter(results),
|
||||
[](auto const& key, auto const& obj) { return LedgerObject{key, obj}; }
|
||||
);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
std::optional<std::string>
|
||||
fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch migrator status: " << res.error();
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& results = res.value();
|
||||
if (not results) {
|
||||
return {};
|
||||
}
|
||||
|
||||
for (auto [statusString] : extract<std::string>(results))
|
||||
return statusString;
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::expected<std::vector<std::pair<boost::uuids::uuid, std::string>>, std::string>
|
||||
fetchClioNodesData(boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const readResult = executor_.read(yield, schema_->selectClioNodesData);
|
||||
if (not readResult)
|
||||
return std::unexpected{readResult.error().message()};
|
||||
|
||||
std::vector<std::pair<boost::uuids::uuid, std::string>> result;
|
||||
|
||||
for (auto [uuid, message] : extract<boost::uuids::uuid, std::string>(*readResult)) {
|
||||
result.emplace_back(uuid, std::move(message));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
|
||||
{
|
||||
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
|
||||
|
||||
if (range_)
|
||||
executor_.write(schema_->insertDiff, seq, key);
|
||||
|
||||
executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
|
||||
{
|
||||
LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
|
||||
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
|
||||
ASSERT(!key.empty(), "Key must not be empty");
|
||||
ASSERT(!successor.empty(), "Successor must not be empty");
|
||||
|
||||
executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
|
||||
}
|
||||
|
||||
void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size() * 10); // assume 10 transactions avg
|
||||
|
||||
for (auto& record : data) {
|
||||
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeAccountTransaction(AccountTransactionsData record) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(record.accounts.size());
|
||||
|
||||
std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash
|
||||
);
|
||||
});
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
|
||||
std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
|
||||
return schema_->insertNFTTx.bind(
|
||||
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
|
||||
);
|
||||
});
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata
|
||||
) override
|
||||
{
|
||||
LOG(log_.trace()) << "Writing txn to database";
|
||||
|
||||
executor_.write(schema_->insertLedgerTransaction, seq, hash);
|
||||
executor_.write(
|
||||
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
writeNFTs(std::vector<NFTsData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size() * 3);
|
||||
|
||||
for (NFTsData const& record : data) {
|
||||
if (!record.onlyUriChanged) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID
|
||||
));
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// only uri changed, we update the uri table only
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
executor_.writeEach(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeMPTHolders(std::vector<MPTHolderData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
for (auto [mptId, holder] : data)
|
||||
statements.push_back(schema_->insertMPTHolder.bind(mptId, holder));
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
startWrites() const override
|
||||
{
|
||||
// Note: no-op in original implementation too.
|
||||
// probably was used in PG to start a transaction or smth.
|
||||
}
|
||||
|
||||
void
|
||||
writeMigratorStatus(std::string const& migratorName, std::string const& status) override
|
||||
{
|
||||
executor_.writeSync(
|
||||
schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
|
||||
{
|
||||
executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
|
||||
}
|
||||
|
||||
bool
|
||||
isTooBusy() const override
|
||||
{
|
||||
return executor_.isTooBusy();
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
stats() const override
|
||||
{
|
||||
return executor_.stats();
|
||||
}
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @brief Executes statements and tries to write to DB
|
||||
*
|
||||
* @param statement statement to execute
|
||||
* @return true if successful, false if it fails
|
||||
*/
|
||||
bool
|
||||
executeSyncUpdate(Statement statement)
|
||||
{
|
||||
auto const res = executor_.writeSync(statement);
|
||||
auto maybeSuccess = res->template get<bool>();
|
||||
if (not maybeSuccess) {
|
||||
LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (not maybeSuccess.value()) {
|
||||
LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
|
||||
|
||||
// error may indicate that another writer wrote something.
|
||||
// in this case let's just compare the current state of things
|
||||
// against what we were trying to write in the first place and
|
||||
// use that as the source of truth for the result.
|
||||
auto rng = hardFetchLedgerRangeNoThrow();
|
||||
return rng && rng->maxSequence == ledgerSequence_;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace data::cassandra
|
||||
@@ -1,178 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Schema.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/json/string.hpp>
|
||||
#include <fmt/compile.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Manages the DB schema and provides access to prepared statements.
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
class CassandraSchema : public Schema<SettingsProvider> {
|
||||
using Schema::Schema;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Cassandra Schema object
|
||||
*
|
||||
* @param settingsProvider The settings provider
|
||||
*/
|
||||
struct CassandraStatements : public Schema<SettingsProvider>::Statements {
|
||||
using Schema<SettingsProvider>::Statements::Statements;
|
||||
|
||||
//
|
||||
// Update (and "delete") queries
|
||||
//
|
||||
PreparedStatement updateLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = ?
|
||||
IF sequence IN (?, null)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
//
|
||||
// Select queries
|
||||
//
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuer = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND (taxon, token_id) > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountFromBeginning = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > 0
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountFromToken = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > token(?)
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPageKeys = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT key
|
||||
FROM {}
|
||||
WHERE TOKEN(key) >= ?
|
||||
AND sequence <= ?
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPage = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT object, key
|
||||
FROM {}
|
||||
WHERE TOKEN(key) >= ?
|
||||
AND sequence <= ?
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
)
|
||||
);
|
||||
}();
|
||||
};
|
||||
|
||||
void
|
||||
prepareStatements(Handle const& handle) override
|
||||
{
|
||||
LOG(log_.info()) << "Preparing cassandra statements";
|
||||
statements_ = std::make_unique<CassandraStatements>(settingsProvider_, handle);
|
||||
LOG(log_.info()) << "Finished preparing statements";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Provides access to statements.
|
||||
*
|
||||
* @return The statements
|
||||
*/
|
||||
std::unique_ptr<CassandraStatements> const&
|
||||
operator->() const
|
||||
{
|
||||
return statements_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<CassandraStatements> statements_{nullptr};
|
||||
};
|
||||
|
||||
} // namespace data::cassandra
|
||||
@@ -1,140 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Schema.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/json/string.hpp>
|
||||
#include <fmt/compile.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Manages the DB schema and provides access to prepared statements.
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
class KeyspaceSchema : public Schema<SettingsProvider> {
|
||||
public:
|
||||
using Schema::Schema;
|
||||
|
||||
/**
|
||||
* @brief Construct a new Keyspace Schema object
|
||||
*
|
||||
* @param settingsProvider The settings provider
|
||||
*/
|
||||
struct KeyspaceStatements : public Schema<SettingsProvider>::Statements {
|
||||
using Schema<SettingsProvider>::Statements::Statements;
|
||||
|
||||
//
|
||||
// Insert queries
|
||||
//
|
||||
PreparedStatement insertLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
INSERT INTO {} (is_latest, sequence) VALUES (?, ?) IF NOT EXISTS
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
//
|
||||
// Update (and "delete") queries
|
||||
//
|
||||
PreparedStatement updateLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = ?
|
||||
IF sequence = ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT sequence
|
||||
FROM {}
|
||||
WHERE is_latest in (True, False)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
//
|
||||
// Select queries
|
||||
//
|
||||
PreparedStatement selectNFTsAfterTaxonKeyspaces = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND taxon > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
)
|
||||
);
|
||||
}();
|
||||
};
|
||||
|
||||
void
|
||||
prepareStatements(Handle const& handle) override
|
||||
{
|
||||
LOG(log_.info()) << "Preparing aws keyspace statements";
|
||||
statements_ = std::make_unique<KeyspaceStatements>(settingsProvider_, handle);
|
||||
LOG(log_.info()) << "Finished preparing statements";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Provides access to statements.
|
||||
*
|
||||
* @return The statements
|
||||
*/
|
||||
std::unique_ptr<KeyspaceStatements> const&
|
||||
operator->() const
|
||||
{
|
||||
return statements_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<KeyspaceStatements> statements_{nullptr};
|
||||
};
|
||||
|
||||
} // namespace data::cassandra
|
||||
@@ -24,10 +24,10 @@
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/json/string.hpp>
|
||||
#include <fmt/compile.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
@@ -53,15 +53,12 @@ template <SomeSettingsProvider SettingsProviderType>
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType>
|
||||
class Schema {
|
||||
protected:
|
||||
util::Logger log_{"Backend"};
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
|
||||
public:
|
||||
virtual ~Schema() = default;
|
||||
|
||||
/**
|
||||
* @brief Shared Schema's between all Schema classes (Cassandra and Keyspace)
|
||||
* @brief Construct a new Schema object
|
||||
*
|
||||
* @param settingsProvider The settings provider
|
||||
*/
|
||||
@@ -337,7 +334,6 @@ public:
|
||||
* @brief Prepared statements holder.
|
||||
*/
|
||||
class Statements {
|
||||
protected:
|
||||
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
|
||||
std::reference_wrapper<Handle const> handle_;
|
||||
|
||||
@@ -530,6 +526,20 @@ public:
|
||||
// Update (and "delete") queries
|
||||
//
|
||||
|
||||
PreparedStatement updateLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
UPDATE {}
|
||||
SET sequence = ?
|
||||
WHERE is_latest = ?
|
||||
IF sequence IN (?, null)
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "ledger_range")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement deleteLedgerRange = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
@@ -644,6 +654,40 @@ public:
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPageKeys = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT key
|
||||
FROM {}
|
||||
WHERE TOKEN(key) >= ?
|
||||
AND sequence <= ?
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectLedgerPage = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT object, key
|
||||
FROM {}
|
||||
WHERE TOKEN(key) >= ?
|
||||
AND sequence <= ?
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
ALLOW FILTERING
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "objects")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement getToken = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
@@ -673,6 +717,36 @@ public:
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountFromBeginning = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > 0
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountFromToken = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT account
|
||||
FROM {}
|
||||
WHERE token(account) > token(?)
|
||||
PER PARTITION LIMIT 1
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "account_tx")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectAccountTxForward = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
@@ -753,6 +827,22 @@ public:
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuer = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
R"(
|
||||
SELECT token_id
|
||||
FROM {}
|
||||
WHERE issuer = ?
|
||||
AND (taxon, token_id) > ?
|
||||
ORDER BY taxon ASC, token_id ASC
|
||||
LIMIT ?
|
||||
)",
|
||||
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
|
||||
)
|
||||
);
|
||||
}();
|
||||
|
||||
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
|
||||
return handle_.get().prepare(
|
||||
fmt::format(
|
||||
@@ -870,8 +960,27 @@ public:
|
||||
*
|
||||
* @param handle The handle to the DB
|
||||
*/
|
||||
virtual void
|
||||
prepareStatements(Handle const& handle) = 0;
|
||||
void
|
||||
prepareStatements(Handle const& handle)
|
||||
{
|
||||
LOG(log_.info()) << "Preparing cassandra statements";
|
||||
statements_ = std::make_unique<Statements>(settingsProvider_, handle);
|
||||
LOG(log_.info()) << "Finished preparing statements";
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Provides access to statements.
|
||||
*
|
||||
* @return The statements
|
||||
*/
|
||||
std::unique_ptr<Statements> const&
|
||||
operator->() const
|
||||
{
|
||||
return statements_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<Statements> statements_{nullptr};
|
||||
};
|
||||
|
||||
} // namespace data::cassandra
|
||||
|
||||
@@ -97,7 +97,6 @@ SettingsProvider::parseSettings() const
|
||||
settings.coreConnectionsPerHost = config_.get<uint32_t>("core_connections_per_host");
|
||||
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
|
||||
settings.writeBatchSize = config_.get<std::size_t>("write_batch_size");
|
||||
settings.provider = impl::providerFromString(config_.get<std::string>("provider"));
|
||||
|
||||
if (config_.getValueView("connect_timeout").hasValue()) {
|
||||
auto const connectTimeoutSecond = config_.get<uint32_t>("connect_timeout");
|
||||
|
||||
@@ -36,18 +36,9 @@ constexpr auto kBATCH_DELETER = [](CassBatch* ptr) { cass_batch_free(ptr); };
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
/*
|
||||
* There are 2 main batches of Cassandra Statements:
|
||||
* LOGGED: Ensures all updates in the batch succeed together, or none do.
|
||||
* Use this for critical, related changes (e.g., for the same user), but it is slower.
|
||||
*
|
||||
* UNLOGGED: For performance. Sends many separate updates in one network trip to be fast.
|
||||
* Use this for bulk-loading unrelated data, but know there's NO all-or-nothing guarantee.
|
||||
*
|
||||
* More info here: https://docs.datastax.com/en/developer/cpp-driver-dse/1.10/features/basics/batches/index.html
|
||||
*/
|
||||
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
|
||||
Batch::Batch(std::vector<Statement> const& statements)
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_UNLOGGED), kBATCH_DELETER}
|
||||
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), kBATCH_DELETER}
|
||||
{
|
||||
cass_batch_set_is_idempotent(*this, cass_true);
|
||||
|
||||
|
||||
@@ -60,17 +60,6 @@ Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), k
|
||||
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
|
||||
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
|
||||
|
||||
// TODO: AWS keyspace reads should be local_one to save cost
|
||||
if (settings.provider == cassandra::impl::Provider::Keyspace) {
|
||||
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting keyspace consistency: {}", cass_error_desc(rc)));
|
||||
}
|
||||
} else {
|
||||
if (auto const rc = cass_cluster_set_consistency(*this, CASS_CONSISTENCY_QUORUM); rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Error setting cassandra consistency: {}", cass_error_desc(rc)));
|
||||
}
|
||||
}
|
||||
|
||||
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
|
||||
rc != CASS_OK) {
|
||||
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/cassandra/impl/ManagedObject.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <cassandra.h>
|
||||
@@ -36,18 +35,6 @@
|
||||
|
||||
namespace data::cassandra::impl {
|
||||
|
||||
enum class Provider { Cassandra, Keyspace };
|
||||
|
||||
inline Provider
|
||||
providerFromString(std::string const& provider)
|
||||
{
|
||||
ASSERT(
|
||||
provider == "cassandra" || provider == "aws_keyspace",
|
||||
"Provider type must be one of 'cassandra' or 'aws_keyspace'"
|
||||
);
|
||||
return provider == "cassandra" ? Provider::Cassandra : Provider::Keyspace;
|
||||
}
|
||||
|
||||
// TODO: move Settings to public interface, not impl
|
||||
|
||||
/**
|
||||
@@ -58,7 +45,6 @@ struct Settings {
|
||||
static constexpr uint32_t kDEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
|
||||
static constexpr uint32_t kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
|
||||
static constexpr std::size_t kDEFAULT_BATCH_SIZE = 20;
|
||||
static constexpr Provider kDEFAULT_PROVIDER = Provider::Cassandra;
|
||||
|
||||
/**
|
||||
* @brief Represents the configuration of contact points for cassandra.
|
||||
@@ -97,14 +83,11 @@ struct Settings {
|
||||
uint32_t maxReadRequestsOutstanding = kDEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
|
||||
|
||||
/** @brief The number of connection per host to always have active */
|
||||
uint32_t coreConnectionsPerHost = 3u;
|
||||
uint32_t coreConnectionsPerHost = 1u;
|
||||
|
||||
/** @brief Size of batches when writing */
|
||||
std::size_t writeBatchSize = kDEFAULT_BATCH_SIZE;
|
||||
|
||||
/** @brief Provider to know if we are using scylladb or keyspace */
|
||||
Provider provider = kDEFAULT_PROVIDER;
|
||||
|
||||
/** @brief Size of the IO queue */
|
||||
std::optional<uint32_t> queueSizeIO = std::nullopt; // NOLINT(readability-redundant-member-init)
|
||||
|
||||
|
||||
@@ -58,16 +58,14 @@ public:
|
||||
explicit Statement(std::string_view query, Args&&... args)
|
||||
: ManagedObject{cass_statement_new_n(query.data(), query.size(), sizeof...(args)), kDELETER}
|
||||
{
|
||||
// TODO: figure out how to set consistency level in config
|
||||
// NOTE: Keyspace doesn't support QUORUM at write level
|
||||
// cass_statement_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM);
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
bind<Args...>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/* implicit */ Statement(CassStatement* ptr) : ManagedObject{ptr, kDELETER}
|
||||
{
|
||||
// cass_statement_set_consistency(*this, CASS_CONSISTENCY_LOCAL_QUORUM);
|
||||
cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM);
|
||||
cass_statement_set_is_idempotent(*this, cass_true);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# ETL subsystem
|
||||
|
||||
@page "etl" ETL subsystem
|
||||
|
||||
A single Clio node has one or more ETL sources specified in the config file. Clio subscribes to the `ledgers` stream of each of the ETL sources. The stream sends a message whenever a new ledger is validated.
|
||||
|
||||
Upon receiving a message on the stream, Clio fetches the data associated with the newly validated ledger from one of the ETL sources. The fetch is performed via a gRPC request called `GetLedger`. This request returns the ledger header, transactions and metadata blobs, and every ledger object added/modified/deleted as part of this ledger. The ETL subsystem then writes all of this data to the databases, and moves on to the next ledger.
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <grpc/grpc.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/security/credentials.h>
|
||||
#include <grpcpp/support/channel_arguments.h>
|
||||
@@ -35,7 +34,6 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
@@ -54,25 +52,17 @@ GrpcSource::GrpcSource(std::string const& ip, std::string const& grpcPort, std::
|
||||
try {
|
||||
boost::asio::io_context ctx;
|
||||
boost::asio::ip::tcp::resolver resolver{ctx};
|
||||
|
||||
auto const resolverResult = resolver.resolve(ip, grpcPort);
|
||||
if (resolverResult.empty())
|
||||
if (resolverResult.empty()) {
|
||||
throw std::runtime_error("Failed to resolve " + ip + ":" + grpcPort);
|
||||
|
||||
}
|
||||
std::stringstream ss;
|
||||
ss << resolverResult.begin()->endpoint();
|
||||
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKEEPALIVE_PING_INTERVAL_MS);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKEEPALIVE_TIMEOUT_MS);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, static_cast<int>(kKEEPALIVE_PERMIT_WITHOUT_CALLS));
|
||||
chArgs.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, kMAX_PINGS_WITHOUT_DATA);
|
||||
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(ss.str(), grpc::InsecureChannelCredentials(), chArgs)
|
||||
);
|
||||
|
||||
LOG(log_.debug()) << "Made stub for remote.";
|
||||
} catch (std::exception const& e) {
|
||||
LOG(log_.warn()) << "Exception while creating stub: " << e.what() << ".";
|
||||
@@ -86,11 +76,10 @@ GrpcSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNeighb
|
||||
if (!stub_)
|
||||
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
|
||||
|
||||
// Ledger header with txns and metadata
|
||||
org::xrpl::rpc::v1::GetLedgerRequest request;
|
||||
grpc::ClientContext context;
|
||||
|
||||
context.set_deadline(std::chrono::system_clock::now() + kDEADLINE); // Prevent indefinite blocking
|
||||
|
||||
request.mutable_ledger()->set_sequence(sequence);
|
||||
request.set_transactions(true);
|
||||
request.set_expand(true);
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@@ -39,12 +38,6 @@ class GrpcSource {
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
|
||||
static constexpr auto kKEEPALIVE_PING_INTERVAL_MS = 10000;
|
||||
static constexpr auto kKEEPALIVE_TIMEOUT_MS = 5000;
|
||||
static constexpr auto kKEEPALIVE_PERMIT_WITHOUT_CALLS = true; // Allow keepalive pings when no calls
|
||||
static constexpr auto kMAX_PINGS_WITHOUT_DATA = 0; // No limit
|
||||
static constexpr auto kDEADLINE = std::chrono::seconds(30);
|
||||
|
||||
public:
|
||||
GrpcSource(std::string const& ip, std::string const& grpcPort, std::shared_ptr<BackendInterface> backend);
|
||||
|
||||
|
||||
@@ -32,12 +32,6 @@ struct AmendmentBlockHandlerInterface {
|
||||
*/
|
||||
virtual void
|
||||
notifyAmendmentBlocked() = 0;
|
||||
|
||||
/**
|
||||
* @brief Stop the block handler from repeatedly executing
|
||||
*/
|
||||
virtual void
|
||||
stop() = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
@@ -46,11 +45,6 @@ AmendmentBlockHandler::AmendmentBlockHandler(
|
||||
{
|
||||
}
|
||||
|
||||
AmendmentBlockHandler::~AmendmentBlockHandler()
|
||||
{
|
||||
stop();
|
||||
}
|
||||
|
||||
void
|
||||
AmendmentBlockHandler::notifyAmendmentBlocked()
|
||||
{
|
||||
@@ -59,13 +53,4 @@ AmendmentBlockHandler::notifyAmendmentBlocked()
|
||||
operation_.emplace(ctx_.executeRepeatedly(interval_, action_));
|
||||
}
|
||||
|
||||
void
|
||||
AmendmentBlockHandler::stop()
|
||||
{
|
||||
if (operation_.has_value()) {
|
||||
operation_->abort();
|
||||
operation_.reset();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
|
||||
@@ -56,10 +56,11 @@ public:
|
||||
ActionType action = kDEFAULT_AMENDMENT_BLOCK_ACTION
|
||||
);
|
||||
|
||||
~AmendmentBlockHandler() override;
|
||||
|
||||
void
|
||||
stop() override;
|
||||
~AmendmentBlockHandler() override
|
||||
{
|
||||
if (operation_.has_value())
|
||||
operation_.value().abort();
|
||||
}
|
||||
|
||||
void
|
||||
notifyAmendmentBlocked() override;
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <grpc/grpc.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/security/credentials.h>
|
||||
#include <grpcpp/support/channel_arguments.h>
|
||||
@@ -37,7 +36,6 @@
|
||||
#include <org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
@@ -65,18 +63,13 @@ resolve(std::string const& ip, std::string const& port)
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
GrpcSource::GrpcSource(std::string const& ip, std::string const& grpcPort, std::chrono::system_clock::duration deadline)
|
||||
GrpcSource::GrpcSource(std::string const& ip, std::string const& grpcPort)
|
||||
: log_(fmt::format("ETL_Grpc[{}:{}]", ip, grpcPort))
|
||||
, initialLoadShouldStop_(std::make_unique<std::atomic_bool>(false))
|
||||
, deadline_{deadline}
|
||||
{
|
||||
try {
|
||||
grpc::ChannelArguments chArgs;
|
||||
chArgs.SetMaxReceiveMessageSize(-1);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKEEPALIVE_PING_INTERVAL_MS);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKEEPALIVE_TIMEOUT_MS);
|
||||
chArgs.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, static_cast<int>(kKEEPALIVE_PERMIT_WITHOUT_CALLS));
|
||||
chArgs.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, kMAX_PINGS_WITHOUT_DATA);
|
||||
|
||||
stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub(
|
||||
grpc::CreateCustomChannel(resolve(ip, grpcPort), grpc::InsecureChannelCredentials(), chArgs)
|
||||
@@ -95,11 +88,10 @@ GrpcSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNeighb
|
||||
if (!stub_)
|
||||
return {{grpc::StatusCode::INTERNAL, "No Stub"}, response};
|
||||
|
||||
// Ledger header with txns and metadata
|
||||
org::xrpl::rpc::v1::GetLedgerRequest request;
|
||||
grpc::ClientContext context;
|
||||
|
||||
context.set_deadline(std::chrono::system_clock::now() + deadline_); // Prevent indefinite blocking
|
||||
|
||||
request.mutable_ledger()->set_sequence(sequence);
|
||||
request.set_transactions(true);
|
||||
request.set_expand(true);
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@@ -41,20 +40,9 @@ class GrpcSource {
|
||||
util::Logger log_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
std::unique_ptr<std::atomic_bool> initialLoadShouldStop_;
|
||||
std::chrono::system_clock::duration deadline_;
|
||||
|
||||
static constexpr auto kKEEPALIVE_PING_INTERVAL_MS = 10000;
|
||||
static constexpr auto kKEEPALIVE_TIMEOUT_MS = 5000;
|
||||
static constexpr auto kKEEPALIVE_PERMIT_WITHOUT_CALLS = true; // Allow keepalive pings when no calls
|
||||
static constexpr auto kMAX_PINGS_WITHOUT_DATA = 0; // No limit
|
||||
static constexpr auto kDEADLINE = std::chrono::seconds(30);
|
||||
|
||||
public:
|
||||
GrpcSource(
|
||||
std::string const& ip,
|
||||
std::string const& grpcPort,
|
||||
std::chrono::system_clock::duration deadline = kDEADLINE
|
||||
);
|
||||
GrpcSource(std::string const& ip, std::string const& grpcPort);
|
||||
|
||||
/**
|
||||
* @brief Fetch data for a specific ledger.
|
||||
|
||||
@@ -209,9 +209,8 @@ TransactionFeed::pub(
|
||||
rpc::insertDeliveredAmount(pubObj[JS(meta)].as_object(), tx, meta, txMeta.date);
|
||||
|
||||
auto& txnPubobj = pubObj[txKey].as_object();
|
||||
auto& metaPubobj = pubObj[JS(meta)].as_object();
|
||||
rpc::insertDeliverMaxAlias(txnPubobj, version);
|
||||
rpc::insertMPTIssuanceID(txnPubobj, tx, metaPubobj, meta);
|
||||
rpc::insertMPTIssuanceID(txnPubobj, meta);
|
||||
|
||||
Json::Value nftJson;
|
||||
ripple::RPC::insertNFTSyntheticInJson(nftJson, tx, *meta);
|
||||
|
||||
42
src/main/Mainpage.hpp
Normal file
42
src/main/Mainpage.hpp
Normal file
@@ -0,0 +1,42 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/**
|
||||
* @mainpage Clio API server
|
||||
*
|
||||
* @section intro Introduction
|
||||
*
|
||||
* Clio is an XRP Ledger API server optimized for RPC calls over WebSocket or JSON-RPC.
|
||||
*
|
||||
* It stores validated historical ledger and transaction data in a more space efficient format, and uses up to 4 times
|
||||
* less space than <A HREF="https://github.com/XRPLF/rippled">rippled</A>.
|
||||
*
|
||||
* Clio can be configured to store data in <A HREF="https://cassandra.apache.org/_/index.html">Apache Cassandra</A> or
|
||||
* <A HREF="https://www.scylladb.com/">ScyllaDB</A>, enabling scalable read throughput. Multiple Clio nodes can share
|
||||
* access to the same dataset, which allows for a highly available cluster of Clio nodes without the need for redundant
|
||||
* data storage or computation.
|
||||
*
|
||||
* @section Develop
|
||||
*
|
||||
* As you prepare to develop code for Clio, please be sure you are aware of our current
|
||||
* <A HREF="https://github.com/XRPLF/clio/blob/develop/CONTRIBUTING.md">Contribution guidelines</A>.
|
||||
*
|
||||
* Read [rpc/README.md](../rpc/README.md) carefully to know more about writing your own handlers for
|
||||
* Clio.
|
||||
*/
|
||||
@@ -1,7 +1,5 @@
|
||||
# Clio Migration
|
||||
|
||||
@page "migration" Clio Migration
|
||||
|
||||
Clio maintains the off-chain data of XRPL and multiple indexes tables to powering complex queries. To simplify the creation of index tables, this migration framework handles the process of database change and facilitates the migration of historical data seamlessly.
|
||||
|
||||
## Command Line Usage
|
||||
|
||||
@@ -24,8 +24,6 @@ target_sources(
|
||||
handlers/AccountCurrencies.cpp
|
||||
handlers/AccountInfo.cpp
|
||||
handlers/AccountLines.cpp
|
||||
handlers/AccountMPTokenIssuances.cpp
|
||||
handlers/AccountMPTokens.cpp
|
||||
handlers/AccountNFTs.cpp
|
||||
handlers/AccountObjects.cpp
|
||||
handlers/AccountOffers.cpp
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
# RPC subsystem
|
||||
|
||||
@page "rpc" RPC subsystem
|
||||
# RPC subsystem
|
||||
|
||||
The RPC subsystem is where the common framework for handling incoming JSON requests is implemented.
|
||||
|
||||
|
||||
@@ -35,8 +35,6 @@ handledRpcs()
|
||||
"account_currencies",
|
||||
"account_info",
|
||||
"account_lines",
|
||||
"account_mptoken_issuances",
|
||||
"account_mptokens",
|
||||
"account_nfts",
|
||||
"account_objects",
|
||||
"account_offers",
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
#include "web/Context.hpp"
|
||||
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/format/format_fwd.hpp>
|
||||
#include <boost/format/free_funcs.hpp>
|
||||
@@ -257,7 +258,7 @@ toExpandedJson(
|
||||
auto metaJson = toJson(*meta);
|
||||
insertDeliveredAmount(metaJson, txn, meta, blobs.date);
|
||||
insertDeliverMaxAlias(txnJson, apiVersion);
|
||||
insertMPTIssuanceID(txnJson, txn, metaJson, meta);
|
||||
insertMPTIssuanceID(txnJson, meta);
|
||||
|
||||
if (nftEnabled == NFTokenjson::ENABLE) {
|
||||
Json::Value nftJson;
|
||||
@@ -342,41 +343,36 @@ getMPTIssuanceID(std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
/**
|
||||
* @brief Check if transaction has a new MPToken created
|
||||
*
|
||||
* @param txn The transaction object
|
||||
* @param meta The metadata object
|
||||
* @param txnJson The transaction Json
|
||||
* @param meta The metadata
|
||||
* @return true if the transaction can have a mpt_issuance_id
|
||||
*/
|
||||
static bool
|
||||
canHaveMPTIssuanceID(std::shared_ptr<ripple::STTx const> const& txn, std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
canHaveMPTIssuanceID(boost::json::object const& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
{
|
||||
if (txn->getTxnType() != ripple::ttMPTOKEN_ISSUANCE_CREATE)
|
||||
if (txnJson.at(JS(TransactionType)).is_string() and
|
||||
not boost::iequals(txnJson.at(JS(TransactionType)).as_string(), JS(MPTokenIssuanceCreate)))
|
||||
return false;
|
||||
|
||||
return (meta->getResultTER() == ripple::tesSUCCESS);
|
||||
if (meta->getResultTER() != ripple::tesSUCCESS)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
insertMPTIssuanceID(
|
||||
boost::json::object& txnJson,
|
||||
std::shared_ptr<ripple::STTx const> const& txn,
|
||||
boost::json::object& metaJson,
|
||||
std::shared_ptr<ripple::TxMeta const> const& meta
|
||||
)
|
||||
insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta)
|
||||
{
|
||||
if (!canHaveMPTIssuanceID(txn, meta))
|
||||
if (!canHaveMPTIssuanceID(txnJson, meta))
|
||||
return false;
|
||||
|
||||
if (txnJson.contains(JS(TransactionType)) && txnJson.at(JS(TransactionType)).is_string() and
|
||||
txnJson.at(JS(TransactionType)).as_string() == JS(MPTokenIssuanceCreate))
|
||||
return false;
|
||||
|
||||
auto const id = getMPTIssuanceID(meta);
|
||||
ASSERT(id.has_value(), "MPTIssuanceID must have value");
|
||||
|
||||
// For mpttokenissuance create, add mpt_issuance_id to metajson
|
||||
// Otherwise, add it to txn json
|
||||
if (txnJson.contains(JS(TransactionType)) && txnJson.at(JS(TransactionType)).is_string() and
|
||||
txnJson.at(JS(TransactionType)).as_string() == JS(MPTokenIssuanceCreate)) {
|
||||
metaJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
|
||||
} else {
|
||||
txnJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
|
||||
}
|
||||
txnJson[JS(mpt_issuance_id)] = ripple::to_string(*id);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -201,23 +201,15 @@ insertDeliveredAmount(
|
||||
|
||||
/**
|
||||
* @brief Add "mpt_issuance_id" into various MPTToken transaction json.
|
||||
* @note We add "mpt_issuance_id" into the meta part of MPTokenIssuanceCreate only. The reason is because the
|
||||
* mpt_issuance_id is generated only after one submits MPTokenIssuanceCreate, so there’s no way to know what the id is.
|
||||
* (rippled)
|
||||
* @note We exclude "mpt_issuance_id" for MPTokenIssuanceCreate only. The reason is because the mpt_issuance_id
|
||||
* is generated only after one submits MPTokenIssuanceCreate, so there’s no way to know what the id is. (rippled)
|
||||
*
|
||||
* @param txnJson The transaction Json object
|
||||
* @param txn The txn object
|
||||
* @param metaJson The metadata Json object
|
||||
* @param meta The metadata object
|
||||
* @return true if the "mpt_issuance_id" is added to either txnJson or metaJson object
|
||||
* @return true if the "mpt_issuance_id" is added to the txnJson JSON object
|
||||
*/
|
||||
bool
|
||||
insertMPTIssuanceID(
|
||||
boost::json::object& txnJson,
|
||||
std::shared_ptr<ripple::STTx const> const& txn,
|
||||
boost::json::object& metaJson,
|
||||
std::shared_ptr<ripple::TxMeta const> const& meta
|
||||
);
|
||||
insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr<ripple::TxMeta const> const& meta);
|
||||
|
||||
/**
|
||||
* @brief Convert STBase object to JSON
|
||||
|
||||
@@ -31,8 +31,6 @@
|
||||
#include "rpc/handlers/AccountCurrencies.hpp"
|
||||
#include "rpc/handlers/AccountInfo.hpp"
|
||||
#include "rpc/handlers/AccountLines.hpp"
|
||||
#include "rpc/handlers/AccountMPTokenIssuances.hpp"
|
||||
#include "rpc/handlers/AccountMPTokens.hpp"
|
||||
#include "rpc/handlers/AccountNFTs.hpp"
|
||||
#include "rpc/handlers/AccountObjects.hpp"
|
||||
#include "rpc/handlers/AccountOffers.hpp"
|
||||
@@ -87,9 +85,6 @@ ProductionHandlerProvider::ProductionHandlerProvider(
|
||||
{"account_currencies", {.handler = AccountCurrenciesHandler{backend}}},
|
||||
{"account_info", {.handler = AccountInfoHandler{backend, amendmentCenter}}},
|
||||
{"account_lines", {.handler = AccountLinesHandler{backend}}},
|
||||
{"account_mptoken_issuances",
|
||||
{.handler = AccountMPTokenIssuancesHandler{backend}, .isClioOnly = true}}, // clio only
|
||||
{"account_mptokens", {.handler = AccountMPTokensHandler{backend}, .isClioOnly = true}}, // clio only
|
||||
{"account_nfts", {.handler = AccountNFTsHandler{backend}}},
|
||||
{"account_objects", {.handler = AccountObjectsHandler{backend}}},
|
||||
{"account_offers", {.handler = AccountOffersHandler{backend}}},
|
||||
|
||||
@@ -1,237 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "rpc/handlers/AccountMPTokenIssuances.hpp"
|
||||
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
#include "rpc/RPCHelpers.hpp"
|
||||
#include "rpc/common/Types.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/JsonUtils.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STAmount.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/UintTypes.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace rpc {
|
||||
|
||||
void
|
||||
AccountMPTokenIssuancesHandler::addMPTokenIssuance(
|
||||
std::vector<MPTokenIssuanceResponse>& issuances,
|
||||
ripple::SLE const& sle,
|
||||
ripple::AccountID const& account
|
||||
)
|
||||
{
|
||||
MPTokenIssuanceResponse issuance;
|
||||
|
||||
issuance.issuer = ripple::to_string(account);
|
||||
issuance.sequence = sle.getFieldU32(ripple::sfSequence);
|
||||
auto const flags = sle.getFieldU32(ripple::sfFlags);
|
||||
|
||||
auto const setFlag = [&](std::optional<bool>& field, std::uint32_t mask) {
|
||||
if ((flags & mask) != 0u)
|
||||
field = true;
|
||||
};
|
||||
|
||||
setFlag(issuance.mptLocked, ripple::lsfMPTLocked);
|
||||
setFlag(issuance.mptCanLock, ripple::lsfMPTCanLock);
|
||||
setFlag(issuance.mptRequireAuth, ripple::lsfMPTRequireAuth);
|
||||
setFlag(issuance.mptCanEscrow, ripple::lsfMPTCanEscrow);
|
||||
setFlag(issuance.mptCanTrade, ripple::lsfMPTCanTrade);
|
||||
setFlag(issuance.mptCanTransfer, ripple::lsfMPTCanTransfer);
|
||||
setFlag(issuance.mptCanClawback, ripple::lsfMPTCanClawback);
|
||||
|
||||
if (sle.isFieldPresent(ripple::sfTransferFee))
|
||||
issuance.transferFee = sle.getFieldU16(ripple::sfTransferFee);
|
||||
|
||||
if (sle.isFieldPresent(ripple::sfAssetScale))
|
||||
issuance.assetScale = sle.getFieldU8(ripple::sfAssetScale);
|
||||
|
||||
if (sle.isFieldPresent(ripple::sfMaximumAmount))
|
||||
issuance.maximumAmount = sle.getFieldU64(ripple::sfMaximumAmount);
|
||||
|
||||
if (sle.isFieldPresent(ripple::sfOutstandingAmount))
|
||||
issuance.outstandingAmount = sle.getFieldU64(ripple::sfOutstandingAmount);
|
||||
|
||||
if (sle.isFieldPresent(ripple::sfLockedAmount))
|
||||
issuance.lockedAmount = sle.getFieldU64(ripple::sfLockedAmount);
|
||||
|
||||
if (sle.isFieldPresent(ripple::sfMPTokenMetadata))
|
||||
issuance.mptokenMetadata = ripple::strHex(sle.getFieldVL(ripple::sfMPTokenMetadata));
|
||||
|
||||
if (sle.isFieldPresent(ripple::sfDomainID))
|
||||
issuance.domainID = ripple::strHex(sle.getFieldH256(ripple::sfDomainID));
|
||||
|
||||
issuances.push_back(issuance);
|
||||
}
|
||||
|
||||
AccountMPTokenIssuancesHandler::Result
|
||||
AccountMPTokenIssuancesHandler::process(AccountMPTokenIssuancesHandler::Input const& input, Context const& ctx) const
|
||||
{
|
||||
auto const range = sharedPtrBackend_->fetchLedgerRange();
|
||||
ASSERT(range.has_value(), "AccountMPTokenIssuances' ledger range must be available");
|
||||
auto const expectedLgrInfo = getLedgerHeaderFromHashOrSeq(
|
||||
*sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence
|
||||
);
|
||||
|
||||
if (!expectedLgrInfo.has_value())
|
||||
return Error{expectedLgrInfo.error()};
|
||||
|
||||
auto const& lgrInfo = expectedLgrInfo.value();
|
||||
auto const accountID = accountFromStringStrict(input.account);
|
||||
auto const accountLedgerObject =
|
||||
sharedPtrBackend_->fetchLedgerObject(ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield);
|
||||
|
||||
if (not accountLedgerObject.has_value())
|
||||
return Error{Status{RippledError::rpcACT_NOT_FOUND}};
|
||||
|
||||
Output response;
|
||||
response.issuances.reserve(input.limit);
|
||||
|
||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||
if (sle.getType() == ripple::ltMPTOKEN_ISSUANCE) {
|
||||
addMPTokenIssuance(response.issuances, sle, *accountID);
|
||||
}
|
||||
};
|
||||
|
||||
auto const expectedNext = traverseOwnedNodes(
|
||||
*sharedPtrBackend_, *accountID, lgrInfo.seq, input.limit, input.marker, ctx.yield, addToResponse
|
||||
);
|
||||
|
||||
if (!expectedNext.has_value())
|
||||
return Error{expectedNext.error()};
|
||||
|
||||
auto const nextMarker = expectedNext.value();
|
||||
|
||||
response.account = input.account;
|
||||
response.limit = input.limit;
|
||||
|
||||
response.ledgerHash = ripple::strHex(lgrInfo.hash);
|
||||
response.ledgerIndex = lgrInfo.seq;
|
||||
|
||||
if (nextMarker.isNonZero())
|
||||
response.marker = nextMarker.toString();
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
AccountMPTokenIssuancesHandler::Input
|
||||
tag_invoke(boost::json::value_to_tag<AccountMPTokenIssuancesHandler::Input>, boost::json::value const& jv)
|
||||
{
|
||||
auto input = AccountMPTokenIssuancesHandler::Input{};
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
input.account = boost::json::value_to<std::string>(jv.at(JS(account)));
|
||||
|
||||
if (jsonObject.contains(JS(limit)))
|
||||
input.limit = util::integralValueAs<uint32_t>(jv.at(JS(limit)));
|
||||
|
||||
if (jsonObject.contains(JS(marker)))
|
||||
input.marker = boost::json::value_to<std::string>(jv.at(JS(marker)));
|
||||
|
||||
if (jsonObject.contains(JS(ledger_hash)))
|
||||
input.ledgerHash = boost::json::value_to<std::string>(jv.at(JS(ledger_hash)));
|
||||
|
||||
if (jsonObject.contains(JS(ledger_index))) {
|
||||
if (!jsonObject.at(JS(ledger_index)).is_string()) {
|
||||
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
|
||||
} else if (jsonObject.at(JS(ledger_index)).as_string() != "validated") {
|
||||
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
|
||||
}
|
||||
}
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
void
|
||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountMPTokenIssuancesHandler::Output const& output)
|
||||
{
|
||||
using boost::json::value_from;
|
||||
|
||||
auto obj = boost::json::object{
|
||||
{JS(account), output.account},
|
||||
{JS(ledger_hash), output.ledgerHash},
|
||||
{JS(ledger_index), output.ledgerIndex},
|
||||
{JS(validated), output.validated},
|
||||
{JS(limit), output.limit},
|
||||
{"mpt_issuances", value_from(output.issuances)},
|
||||
};
|
||||
|
||||
if (output.marker.has_value())
|
||||
obj[JS(marker)] = *output.marker;
|
||||
|
||||
jv = std::move(obj);
|
||||
}
|
||||
|
||||
void
|
||||
tag_invoke(
|
||||
boost::json::value_from_tag,
|
||||
boost::json::value& jv,
|
||||
AccountMPTokenIssuancesHandler::MPTokenIssuanceResponse const& issuance
|
||||
)
|
||||
{
|
||||
auto obj = boost::json::object{
|
||||
{JS(issuer), issuance.issuer},
|
||||
{JS(sequence), issuance.sequence},
|
||||
};
|
||||
|
||||
auto const setIfPresent = [&](boost::json::string_view field, auto const& value) {
|
||||
if (value.has_value()) {
|
||||
obj[field] = *value;
|
||||
}
|
||||
};
|
||||
|
||||
setIfPresent("transfer_fee", issuance.transferFee);
|
||||
setIfPresent("asset_scale", issuance.assetScale);
|
||||
setIfPresent("maximum_amount", issuance.maximumAmount);
|
||||
setIfPresent("outstanding_amount", issuance.outstandingAmount);
|
||||
setIfPresent("locked_amount", issuance.lockedAmount);
|
||||
setIfPresent("mptoken_metadata", issuance.mptokenMetadata);
|
||||
setIfPresent("domain_id", issuance.domainID);
|
||||
|
||||
setIfPresent("mpt_locked", issuance.mptLocked);
|
||||
setIfPresent("mpt_can_lock", issuance.mptCanLock);
|
||||
setIfPresent("mpt_require_auth", issuance.mptRequireAuth);
|
||||
setIfPresent("mpt_can_escrow", issuance.mptCanEscrow);
|
||||
setIfPresent("mpt_can_trade", issuance.mptCanTrade);
|
||||
setIfPresent("mpt_can_transfer", issuance.mptCanTransfer);
|
||||
setIfPresent("mpt_can_clawback", issuance.mptCanClawback);
|
||||
|
||||
jv = std::move(obj);
|
||||
}
|
||||
|
||||
} // namespace rpc
|
||||
@@ -1,196 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
#include "rpc/common/Checkers.hpp"
|
||||
#include "rpc/common/MetaProcessors.hpp"
|
||||
#include "rpc/common/Modifiers.hpp"
|
||||
#include "rpc/common/Specs.hpp"
|
||||
#include "rpc/common/Types.hpp"
|
||||
#include "rpc/common/Validators.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/ErrorCodes.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace rpc {
|
||||
|
||||
/**
|
||||
* @brief The account_mptoken_issuances method returns information about all MPTokenIssuance objects the account has
|
||||
* created.
|
||||
*/
|
||||
class AccountMPTokenIssuancesHandler {
|
||||
// dependencies
|
||||
std::shared_ptr<BackendInterface> sharedPtrBackend_;
|
||||
|
||||
public:
|
||||
static constexpr auto kLIMIT_MIN = 10;
|
||||
static constexpr auto kLIMIT_MAX = 400;
|
||||
static constexpr auto kLIMIT_DEFAULT = 200;
|
||||
|
||||
/**
|
||||
* @brief A struct to hold data for one MPTokenIssuance response.
|
||||
*/
|
||||
struct MPTokenIssuanceResponse {
|
||||
std::string issuer;
|
||||
uint32_t sequence{};
|
||||
|
||||
std::optional<uint16_t> transferFee{};
|
||||
std::optional<uint8_t> assetScale{};
|
||||
|
||||
std::optional<std::uint64_t> maximumAmount;
|
||||
std::optional<std::uint64_t> outstandingAmount;
|
||||
std::optional<std::uint64_t> lockedAmount;
|
||||
std::optional<std::string> mptokenMetadata;
|
||||
std::optional<std::string> domainID;
|
||||
|
||||
std::optional<bool> mptLocked;
|
||||
std::optional<bool> mptCanLock;
|
||||
std::optional<bool> mptRequireAuth;
|
||||
std::optional<bool> mptCanEscrow;
|
||||
std::optional<bool> mptCanTrade;
|
||||
std::optional<bool> mptCanTransfer;
|
||||
std::optional<bool> mptCanClawback;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A struct to hold the output data of the command.
|
||||
*/
|
||||
struct Output {
|
||||
std::string account;
|
||||
std::vector<MPTokenIssuanceResponse> issuances;
|
||||
std::string ledgerHash;
|
||||
uint32_t ledgerIndex{};
|
||||
bool validated = true;
|
||||
std::optional<std::string> marker;
|
||||
uint32_t limit{};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A struct to hold the input data for the command.
|
||||
*/
|
||||
struct Input {
|
||||
std::string account;
|
||||
std::optional<std::string> ledgerHash;
|
||||
std::optional<uint32_t> ledgerIndex;
|
||||
uint32_t limit = kLIMIT_DEFAULT;
|
||||
std::optional<std::string> marker;
|
||||
};
|
||||
|
||||
using Result = HandlerReturnType<Output>;
|
||||
|
||||
/**
|
||||
* @brief Construct a new AccountMPTokenIssuancesHandler object.
|
||||
*
|
||||
* @param sharedPtrBackend The backend to use.
|
||||
*/
|
||||
AccountMPTokenIssuancesHandler(std::shared_ptr<BackendInterface> sharedPtrBackend)
|
||||
: sharedPtrBackend_(std::move(sharedPtrBackend))
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the API specification for the command.
|
||||
*
|
||||
* @param apiVersion The API version to return the spec for.
|
||||
* @return The spec for the given API version.
|
||||
*/
|
||||
static RpcSpecConstRef
|
||||
spec([[maybe_unused]] uint32_t apiVersion)
|
||||
{
|
||||
static auto const kRPC_SPEC = RpcSpec{
|
||||
{JS(account),
|
||||
validation::Required{},
|
||||
meta::WithCustomError{
|
||||
validation::CustomValidators::accountValidator, Status(RippledError::rpcACT_MALFORMED)
|
||||
}},
|
||||
{JS(ledger_hash), validation::CustomValidators::uint256HexStringValidator},
|
||||
{JS(limit),
|
||||
validation::Type<uint32_t>{},
|
||||
validation::Min(1u),
|
||||
modifiers::Clamp<int32_t>{kLIMIT_MIN, kLIMIT_MAX}},
|
||||
{JS(ledger_index), validation::CustomValidators::ledgerIndexValidator},
|
||||
{JS(marker), validation::CustomValidators::accountMarkerValidator},
|
||||
{JS(ledger), check::Deprecated{}},
|
||||
};
|
||||
|
||||
return kRPC_SPEC;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Process the AccountMPTokenIssuances command.
|
||||
*
|
||||
* @param input The input data for the command.
|
||||
* @param ctx The context of the request.
|
||||
* @return The result of the operation.
|
||||
*/
|
||||
Result
|
||||
process(Input const& input, Context const& ctx) const;
|
||||
|
||||
private:
|
||||
static void
|
||||
addMPTokenIssuance(
|
||||
std::vector<MPTokenIssuanceResponse>& issuances,
|
||||
ripple::SLE const& sle,
|
||||
ripple::AccountID const& account
|
||||
);
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Convert the Output to a JSON object
|
||||
*
|
||||
* @param [out] jv The JSON object to convert to
|
||||
* @param output The output to convert
|
||||
*/
|
||||
friend void
|
||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output);
|
||||
|
||||
/**
|
||||
* @brief Convert a JSON object to Input type
|
||||
*
|
||||
* @param jv The JSON object to convert
|
||||
* @return Input parsed from the JSON object
|
||||
*/
|
||||
friend Input
|
||||
tag_invoke(boost::json::value_to_tag<Input>, boost::json::value const& jv);
|
||||
|
||||
/**
|
||||
* @brief Convert the MPTokenIssuanceResponse to a JSON object
|
||||
*
|
||||
* @param [out] jv The JSON object to convert to
|
||||
* @param issuance The MPTokenIssuance response to convert
|
||||
*/
|
||||
friend void
|
||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, MPTokenIssuanceResponse const& issuance);
|
||||
};
|
||||
|
||||
} // namespace rpc
|
||||
@@ -1,191 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "rpc/handlers/AccountMPTokens.hpp"
|
||||
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
#include "rpc/RPCHelpers.hpp"
|
||||
#include "rpc/common/Types.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/JsonUtils.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <boost/json/value_to.hpp>
|
||||
#include <xrpl/basics/strHex.h>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/LedgerFormats.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
#include <xrpl/protocol/SField.h>
|
||||
#include <xrpl/protocol/STAmount.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/UintTypes.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace rpc {
|
||||
|
||||
void
|
||||
AccountMPTokensHandler::addMPToken(std::vector<MPTokenResponse>& mpts, ripple::SLE const& sle)
|
||||
{
|
||||
MPTokenResponse token{};
|
||||
auto const flags = sle.getFieldU32(ripple::sfFlags);
|
||||
|
||||
token.account = ripple::to_string(sle.getAccountID(ripple::sfAccount));
|
||||
token.MPTokenIssuanceID = ripple::strHex(sle.getFieldH192(ripple::sfMPTokenIssuanceID));
|
||||
token.MPTAmount = sle.getFieldU64(ripple::sfMPTAmount);
|
||||
|
||||
if (sle.isFieldPresent(ripple::sfLockedAmount))
|
||||
token.lockedAmount = sle.getFieldU64(ripple::sfLockedAmount);
|
||||
|
||||
auto const setFlag = [&](std::optional<bool>& field, std::uint32_t mask) {
|
||||
if ((flags & mask) != 0u)
|
||||
field = true;
|
||||
};
|
||||
|
||||
setFlag(token.mptLocked, ripple::lsfMPTLocked);
|
||||
setFlag(token.mptAuthorized, ripple::lsfMPTAuthorized);
|
||||
|
||||
mpts.push_back(token);
|
||||
}
|
||||
|
||||
AccountMPTokensHandler::Result
|
||||
AccountMPTokensHandler::process(AccountMPTokensHandler::Input const& input, Context const& ctx) const
|
||||
{
|
||||
auto const range = sharedPtrBackend_->fetchLedgerRange();
|
||||
ASSERT(range.has_value(), "AccountMPTokens' ledger range must be available");
|
||||
auto const expectedLgrInfo = getLedgerHeaderFromHashOrSeq(
|
||||
*sharedPtrBackend_, ctx.yield, input.ledgerHash, input.ledgerIndex, range->maxSequence
|
||||
);
|
||||
|
||||
if (!expectedLgrInfo.has_value())
|
||||
return Error{expectedLgrInfo.error()};
|
||||
|
||||
auto const& lgrInfo = expectedLgrInfo.value();
|
||||
auto const accountID = accountFromStringStrict(input.account);
|
||||
auto const accountLedgerObject =
|
||||
sharedPtrBackend_->fetchLedgerObject(ripple::keylet::account(*accountID).key, lgrInfo.seq, ctx.yield);
|
||||
|
||||
if (not accountLedgerObject.has_value())
|
||||
return Error{Status{RippledError::rpcACT_NOT_FOUND}};
|
||||
|
||||
Output response;
|
||||
response.mpts.reserve(input.limit);
|
||||
|
||||
auto const addToResponse = [&](ripple::SLE const& sle) {
|
||||
if (sle.getType() == ripple::ltMPTOKEN) {
|
||||
addMPToken(response.mpts, sle);
|
||||
}
|
||||
};
|
||||
|
||||
auto const expectedNext = traverseOwnedNodes(
|
||||
*sharedPtrBackend_, *accountID, lgrInfo.seq, input.limit, input.marker, ctx.yield, addToResponse
|
||||
);
|
||||
|
||||
if (!expectedNext.has_value())
|
||||
return Error{expectedNext.error()};
|
||||
|
||||
auto const& nextMarker = expectedNext.value();
|
||||
|
||||
response.account = input.account;
|
||||
response.limit = input.limit;
|
||||
|
||||
response.ledgerHash = ripple::strHex(lgrInfo.hash);
|
||||
response.ledgerIndex = lgrInfo.seq;
|
||||
|
||||
if (nextMarker.isNonZero())
|
||||
response.marker = nextMarker.toString();
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
AccountMPTokensHandler::Input
|
||||
tag_invoke(boost::json::value_to_tag<AccountMPTokensHandler::Input>, boost::json::value const& jv)
|
||||
{
|
||||
AccountMPTokensHandler::Input input{};
|
||||
auto const& jsonObject = jv.as_object();
|
||||
|
||||
input.account = boost::json::value_to<std::string>(jv.at(JS(account)));
|
||||
|
||||
if (jsonObject.contains(JS(limit)))
|
||||
input.limit = util::integralValueAs<uint32_t>(jv.at(JS(limit)));
|
||||
if (jsonObject.contains(JS(marker)))
|
||||
input.marker = boost::json::value_to<std::string>(jv.at(JS(marker)));
|
||||
if (jsonObject.contains(JS(ledger_hash)))
|
||||
input.ledgerHash = boost::json::value_to<std::string>(jv.at(JS(ledger_hash)));
|
||||
if (jsonObject.contains(JS(ledger_index))) {
|
||||
if (!jv.at(JS(ledger_index)).is_string()) {
|
||||
input.ledgerIndex = util::integralValueAs<uint32_t>(jv.at(JS(ledger_index)));
|
||||
} else if (boost::json::value_to<std::string>(jv.at(JS(ledger_index))) != "validated") {
|
||||
input.ledgerIndex = std::stoi(boost::json::value_to<std::string>(jv.at(JS(ledger_index))));
|
||||
}
|
||||
}
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
void
|
||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountMPTokensHandler::Output const& output)
|
||||
{
|
||||
auto obj = boost::json::object{
|
||||
{JS(account), output.account},
|
||||
{JS(ledger_hash), output.ledgerHash},
|
||||
{JS(ledger_index), output.ledgerIndex},
|
||||
{JS(validated), output.validated},
|
||||
{JS(limit), output.limit},
|
||||
{"mptokens", boost::json::value_from(output.mpts)},
|
||||
};
|
||||
|
||||
if (output.marker.has_value())
|
||||
obj[JS(marker)] = *output.marker;
|
||||
|
||||
jv = std::move(obj);
|
||||
}
|
||||
|
||||
void
|
||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountMPTokensHandler::MPTokenResponse const& mptoken)
|
||||
{
|
||||
auto obj = boost::json::object{
|
||||
{JS(account), mptoken.account},
|
||||
{JS(mpt_issuance_id), mptoken.MPTokenIssuanceID},
|
||||
{JS(mpt_amount), mptoken.MPTAmount},
|
||||
};
|
||||
|
||||
auto const setIfPresent = [&](boost::json::string_view field, auto const& value) {
|
||||
if (value.has_value()) {
|
||||
obj[field] = *value;
|
||||
}
|
||||
};
|
||||
|
||||
setIfPresent("locked_amount", mptoken.lockedAmount);
|
||||
setIfPresent("mpt_locked", mptoken.mptLocked);
|
||||
setIfPresent("mpt_authorized", mptoken.mptAuthorized);
|
||||
|
||||
jv = std::move(obj);
|
||||
}
|
||||
|
||||
} // namespace rpc
|
||||
@@ -1,178 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "rpc/JS.hpp"
|
||||
#include "rpc/common/Checkers.hpp"
|
||||
#include "rpc/common/MetaProcessors.hpp"
|
||||
#include "rpc/common/Modifiers.hpp"
|
||||
#include "rpc/common/Specs.hpp"
|
||||
#include "rpc/common/Types.hpp"
|
||||
#include "rpc/common/Validators.hpp"
|
||||
|
||||
#include <boost/json/conversion.hpp>
|
||||
#include <boost/json/value.hpp>
|
||||
#include <xrpl/protocol/AccountID.h>
|
||||
#include <xrpl/protocol/ErrorCodes.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace rpc {
|
||||
|
||||
/**
|
||||
* @brief The account_mptokens method returns information about the MPTokens the account currently holds.
|
||||
*/
|
||||
class AccountMPTokensHandler {
|
||||
// dependencies
|
||||
std::shared_ptr<BackendInterface> sharedPtrBackend_;
|
||||
|
||||
public:
|
||||
static constexpr auto kLIMIT_MIN = 10;
|
||||
static constexpr auto kLIMIT_MAX = 400;
|
||||
static constexpr auto kLIMIT_DEFAULT = 200;
|
||||
|
||||
/**
|
||||
* @brief A struct to hold data for one MPToken response.
|
||||
*/
|
||||
struct MPTokenResponse {
|
||||
std::string account;
|
||||
std::string MPTokenIssuanceID;
|
||||
uint64_t MPTAmount{};
|
||||
std::optional<uint64_t> lockedAmount;
|
||||
|
||||
std::optional<bool> mptLocked;
|
||||
std::optional<bool> mptAuthorized;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A struct to hold the output data of the command.
|
||||
*/
|
||||
struct Output {
|
||||
std::string account;
|
||||
std::vector<MPTokenResponse> mpts;
|
||||
std::string ledgerHash;
|
||||
uint32_t ledgerIndex{};
|
||||
bool validated = true;
|
||||
std::optional<std::string> marker;
|
||||
uint32_t limit{};
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A struct to hold the input data for the command.
|
||||
*/
|
||||
struct Input {
|
||||
std::string account;
|
||||
std::optional<std::string> ledgerHash;
|
||||
std::optional<uint32_t> ledgerIndex;
|
||||
uint32_t limit = kLIMIT_DEFAULT;
|
||||
std::optional<std::string> marker;
|
||||
};
|
||||
|
||||
using Result = HandlerReturnType<Output>;
|
||||
|
||||
/**
|
||||
* @brief Construct a new AccountMPTokensHandler object.
|
||||
*
|
||||
* @param sharedPtrBackend The backend to use.
|
||||
*/
|
||||
AccountMPTokensHandler(std::shared_ptr<BackendInterface> sharedPtrBackend)
|
||||
: sharedPtrBackend_(std::move(sharedPtrBackend))
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the API specification for the command.
|
||||
*
|
||||
* @param apiVersion The API version to return the spec for.
|
||||
* @return The spec for the given API version.
|
||||
*/
|
||||
static RpcSpecConstRef
|
||||
spec([[maybe_unused]] uint32_t apiVersion)
|
||||
{
|
||||
static auto const kRPC_SPEC = RpcSpec{
|
||||
{JS(account),
|
||||
validation::Required{},
|
||||
meta::WithCustomError{
|
||||
validation::CustomValidators::accountValidator, Status(RippledError::rpcACT_MALFORMED)
|
||||
}},
|
||||
{JS(ledger_hash), validation::CustomValidators::uint256HexStringValidator},
|
||||
{JS(limit),
|
||||
validation::Type<uint32_t>{},
|
||||
validation::Min(1u),
|
||||
modifiers::Clamp<int32_t>{kLIMIT_MIN, kLIMIT_MAX}},
|
||||
{JS(ledger_index), validation::CustomValidators::ledgerIndexValidator},
|
||||
{JS(marker), validation::CustomValidators::accountMarkerValidator},
|
||||
{JS(ledger), check::Deprecated{}},
|
||||
};
|
||||
|
||||
return kRPC_SPEC;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Process the AccountMPTokens command.
|
||||
*
|
||||
* @param input The input data for the command.
|
||||
* @param ctx The context of the request.
|
||||
* @return The result of the operation.
|
||||
*/
|
||||
Result
|
||||
process(Input const& input, Context const& ctx) const;
|
||||
|
||||
private:
|
||||
static void
|
||||
addMPToken(std::vector<MPTokenResponse>& mpts, ripple::SLE const& sle);
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief Convert the Output to a JSON object
|
||||
*
|
||||
* @param [out] jv The JSON object to convert to
|
||||
* @param output The output to convert
|
||||
*/
|
||||
friend void
|
||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, Output const& output);
|
||||
|
||||
/**
|
||||
* @brief Convert a JSON object to Input type
|
||||
*
|
||||
* @param jv The JSON object to convert
|
||||
* @return Input parsed from the JSON object
|
||||
*/
|
||||
friend Input
|
||||
tag_invoke(boost::json::value_to_tag<Input>, boost::json::value const& jv);
|
||||
|
||||
/**
|
||||
* @brief Convert the MPTokenResponse to a JSON object
|
||||
*
|
||||
* @param [out] jv The JSON object to convert to
|
||||
* @param mptoken The MPToken response to convert
|
||||
*/
|
||||
friend void
|
||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, MPTokenResponse const& mptoken);
|
||||
};
|
||||
|
||||
} // namespace rpc
|
||||
@@ -333,13 +333,7 @@ tag_invoke(boost::json::value_to_tag<LedgerEntryHandler::Input>, boost::json::va
|
||||
{JS(mptoken), ripple::ltMPTOKEN},
|
||||
{JS(permissioned_domain), ripple::ltPERMISSIONED_DOMAIN},
|
||||
{JS(vault), ripple::ltVAULT},
|
||||
{JS(delegate), ripple::ltDELEGATE},
|
||||
{JS(amendments), ripple::ltAMENDMENTS},
|
||||
{JS(fee), ripple::ltFEE_SETTINGS},
|
||||
{JS(hashes), ripple::ltLEDGER_HASHES},
|
||||
{JS(nft_offer), ripple::ltNFTOKEN_OFFER},
|
||||
{JS(nunl), ripple::ltNEGATIVE_UNL},
|
||||
{JS(signer_list), ripple::ltSIGNER_LIST}
|
||||
{JS(delegate), ripple::ltDELEGATE}
|
||||
};
|
||||
|
||||
auto const parseBridgeFromJson = [](boost::json::value const& bridgeJson) {
|
||||
|
||||
@@ -428,12 +428,6 @@ public:
|
||||
validation::CustomValidators::accountBase58Validator, Status(ClioError::RpcMalformedAddress)
|
||||
}}
|
||||
}}},
|
||||
{JS(amendments), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(fee), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(hashes), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(nft_offer), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(nunl), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(signer_list), kMALFORMED_REQUEST_HEX_STRING_VALIDATOR},
|
||||
{JS(ledger), check::Deprecated{}},
|
||||
{"include_deleted", validation::Type<bool>{}},
|
||||
};
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# Async framework
|
||||
|
||||
@page "async" Async framework
|
||||
|
||||
## Introduction
|
||||
|
||||
Clio uses threads intensively. Multiple parts of Clio were/are implemented by running a `std::thread` with some sort of loop inside. Every time this pattern is reimplemented in a slightly different way. State is managed using asynchronous queues, atomic flags, mutexes and other low level primitives.
|
||||
|
||||
@@ -45,7 +45,7 @@ class ConfigValue;
|
||||
/**
|
||||
* @brief specific values that are accepted for logger levels in config.
|
||||
*/
|
||||
static constexpr std::array<std::string_view, 6> kLOG_LEVELS = {
|
||||
static constexpr std::array<char const*, 6> kLOG_LEVELS = {
|
||||
"trace",
|
||||
"debug",
|
||||
"info",
|
||||
@@ -57,7 +57,7 @@ static constexpr std::array<std::string_view, 6> kLOG_LEVELS = {
|
||||
/**
|
||||
* @brief specific values that are accepted for logger tag style in config.
|
||||
*/
|
||||
static constexpr std::array<std::string_view, 5> kLOG_TAGS = {
|
||||
static constexpr std::array<char const*, 5> kLOG_TAGS = {
|
||||
"int",
|
||||
"uint",
|
||||
"null",
|
||||
@@ -68,7 +68,7 @@ static constexpr std::array<std::string_view, 5> kLOG_TAGS = {
|
||||
/**
|
||||
* @brief specific values that are accepted for cache loading in config.
|
||||
*/
|
||||
static constexpr std::array<std::string_view, 3> kLOAD_CACHE_MODE = {
|
||||
static constexpr std::array<char const*, 3> kLOAD_CACHE_MODE = {
|
||||
"sync",
|
||||
"async",
|
||||
"none",
|
||||
@@ -77,17 +77,12 @@ static constexpr std::array<std::string_view, 3> kLOAD_CACHE_MODE = {
|
||||
/**
|
||||
* @brief specific values that are accepted for database type in config.
|
||||
*/
|
||||
static constexpr std::array<std::string_view, 1> kDATABASE_TYPE = {"cassandra"};
|
||||
static constexpr std::array<char const*, 1> kDATABASE_TYPE = {"cassandra"};
|
||||
|
||||
/**
|
||||
* @brief specific values that are accepted for server's processing_policy in config.
|
||||
*/
|
||||
static constexpr std::array<std::string_view, 2> kPROCESSING_POLICY = {"parallel", "sequent"};
|
||||
|
||||
/**
|
||||
* @brief specific values that are accepted for database provider in config.
|
||||
*/
|
||||
static constexpr std::array<std::string_view, 2> kPROVIDER = {"cassandra", "aws_keyspace"};
|
||||
static constexpr std::array<char const*, 2> kPROCESSING_POLICY = {"parallel", "sequent"};
|
||||
|
||||
/**
|
||||
* @brief An interface to enforce constraints on certain values within ClioConfigDefinition.
|
||||
@@ -123,7 +118,7 @@ protected:
|
||||
*/
|
||||
template <std::size_t ArrSize>
|
||||
constexpr std::string
|
||||
makeErrorMsg(std::string_view key, Value const& value, std::array<std::string_view, ArrSize> arr) const
|
||||
makeErrorMsg(std::string_view key, Value const& value, std::array<char const*, ArrSize> arr) const
|
||||
{
|
||||
// Extract the value from the variant
|
||||
auto const valueStr = std::visit([](auto const& v) { return fmt::format("{}", v); }, value);
|
||||
@@ -271,7 +266,7 @@ public:
|
||||
* @param key The key of the ConfigValue that has this constraint
|
||||
* @param arr The value that has this constraint must be of the values in arr
|
||||
*/
|
||||
constexpr OneOf(std::string_view key, std::array<std::string_view, ArrSize> arr) : key_{key}, arr_{arr}
|
||||
constexpr OneOf(std::string_view key, std::array<char const*, ArrSize> arr) : key_{key}, arr_{arr}
|
||||
{
|
||||
}
|
||||
|
||||
@@ -318,7 +313,7 @@ private:
|
||||
print(std::ostream& stream) const override
|
||||
{
|
||||
std::string valuesStream;
|
||||
std::ranges::for_each(arr_, [&valuesStream](std::string_view elem) {
|
||||
std::ranges::for_each(arr_, [&valuesStream](std::string const& elem) {
|
||||
valuesStream += fmt::format(" `{}`,", elem);
|
||||
});
|
||||
// replace the last "," with "."
|
||||
@@ -327,7 +322,7 @@ private:
|
||||
}
|
||||
|
||||
std::string_view key_;
|
||||
std::array<std::string_view, ArrSize> arr_;
|
||||
std::array<char const*, ArrSize> arr_;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -475,7 +470,6 @@ static constinit OneOf gValidateCassandraName{"database.type", kDATABASE_TYPE};
|
||||
static constinit OneOf gValidateLoadMode{"cache.load", kLOAD_CACHE_MODE};
|
||||
static constinit OneOf gValidateLogTag{"log.tag_style", kLOG_TAGS};
|
||||
static constinit OneOf gValidateProcessingPolicy{"server.processing_policy", kPROCESSING_POLICY};
|
||||
static constinit OneOf gValidateProvider{"database.cassandra.provider", kPROVIDER};
|
||||
|
||||
static constinit PositiveDouble gValidatePositiveDouble{};
|
||||
|
||||
|
||||
@@ -285,8 +285,6 @@ getClioConfig()
|
||||
{"database.cassandra.username", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.password", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.certfile", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.provider",
|
||||
ConfigValue{ConfigType::String}.defaultValue("cassandra").withConstraint(gValidateProvider)},
|
||||
|
||||
{"allow_no_etl", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
|
||||
{"__ng_etl", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
|
||||
|
||||
@@ -173,7 +173,6 @@ This document provides a list of all available Clio configuration properties in
|
||||
"Maximum number of outstanding read requests. Read requests are API calls that read from the database."},
|
||||
KV{.key = "database.cassandra.threads",
|
||||
.value = "Represents the number of threads that will be used for database operations."},
|
||||
KV{.key = "database.cassandra.provider", .value = "The specific database backend provider we are using."},
|
||||
KV{.key = "database.cassandra.core_connections_per_host",
|
||||
.value = "The number of core connections per host for the Cassandra database."},
|
||||
KV{.key = "database.cassandra.queue_size_io",
|
||||
|
||||
@@ -220,10 +220,10 @@ LogService::createFileSink(FileLoggingParams const& params, std::string const& f
|
||||
* @param defaultSeverity The default severity level to use if not overridden.
|
||||
* @return A map of channel names to their minimum severity levels, or an error message if parsing fails.
|
||||
*/
|
||||
static std::expected<std::unordered_map<std::string_view, Severity>, std::string>
|
||||
static std::expected<std::unordered_map<std::string, Severity>, std::string>
|
||||
getMinSeverity(config::ClioConfigDefinition const& config, Severity defaultSeverity)
|
||||
{
|
||||
std::unordered_map<std::string_view, Severity> minSeverity;
|
||||
std::unordered_map<std::string, Severity> minSeverity;
|
||||
for (auto const& channel : Logger::kCHANNELS)
|
||||
minSeverity[channel] = defaultSeverity;
|
||||
|
||||
@@ -284,15 +284,13 @@ LogServiceState::reset()
|
||||
}
|
||||
|
||||
std::shared_ptr<spdlog::logger>
|
||||
LogServiceState::registerLogger(std::string_view channel, std::optional<Severity> severity)
|
||||
LogServiceState::registerLogger(std::string const& channel, std::optional<Severity> severity)
|
||||
{
|
||||
if (not initialized_) {
|
||||
throw std::logic_error("LogService is not initialized");
|
||||
}
|
||||
|
||||
std::string const channelStr{channel};
|
||||
|
||||
std::shared_ptr<spdlog::logger> existingLogger = spdlog::get(channelStr);
|
||||
std::shared_ptr<spdlog::logger> existingLogger = spdlog::get(channel);
|
||||
if (existingLogger != nullptr) {
|
||||
if (severity.has_value())
|
||||
existingLogger->set_level(toSpdlogLevel(*severity));
|
||||
@@ -302,10 +300,10 @@ LogServiceState::registerLogger(std::string_view channel, std::optional<Severity
|
||||
std::shared_ptr<spdlog::logger> logger;
|
||||
if (isAsync_) {
|
||||
logger = std::make_shared<spdlog::async_logger>(
|
||||
channelStr, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block
|
||||
channel, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block
|
||||
);
|
||||
} else {
|
||||
logger = std::make_shared<spdlog::logger>(channelStr, sinks_.begin(), sinks_.end());
|
||||
logger = std::make_shared<spdlog::logger>(channel, sinks_.begin(), sinks_.end());
|
||||
}
|
||||
|
||||
logger->set_level(toSpdlogLevel(severity.value_or(defaultSeverity_)));
|
||||
@@ -429,25 +427,10 @@ LogServiceState::replaceSinks(std::vector<std::shared_ptr<spdlog::sinks::sink>>
|
||||
spdlog::apply_all([](std::shared_ptr<spdlog::logger> logger) { logger->sinks() = sinks_; });
|
||||
}
|
||||
|
||||
Logger::Logger(std::string_view const channel) : logger_(LogServiceState::registerLogger(channel))
|
||||
Logger::Logger(std::string channel) : logger_(LogServiceState::registerLogger(channel))
|
||||
{
|
||||
}
|
||||
|
||||
Logger::~Logger()
|
||||
{
|
||||
// One reference is held by logger_ and the other by spdlog registry
|
||||
static constexpr size_t kLAST_LOGGER_REF_COUNT = 2;
|
||||
|
||||
if (logger_ == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
bool const isDynamic = !std::ranges::contains(kCHANNELS, logger_->name());
|
||||
if (isDynamic && logger_.use_count() == kLAST_LOGGER_REF_COUNT) {
|
||||
spdlog::drop(logger_->name());
|
||||
}
|
||||
}
|
||||
|
||||
Logger::Pump::Pump(std::shared_ptr<spdlog::logger> logger, Severity sev, SourceLocationType const& loc)
|
||||
: logger_(std::move(logger))
|
||||
, severity_(sev)
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
// We forward declare spdlog::logger and spdlog::sinks::sink
|
||||
@@ -92,7 +91,7 @@ enum class Severity {
|
||||
* otherwise. See @ref LogService::init() for setup of the logging core and
|
||||
* severity levels for each channel.
|
||||
*/
|
||||
class Logger {
|
||||
class Logger final {
|
||||
std::shared_ptr<spdlog::logger> logger_;
|
||||
|
||||
friend class LogService; // to expose the Pump interface
|
||||
@@ -146,7 +145,7 @@ class Logger {
|
||||
};
|
||||
|
||||
public:
|
||||
static constexpr std::array<std::string_view, 8> kCHANNELS = {
|
||||
static constexpr std::array<char const*, 8> kCHANNELS = {
|
||||
"General",
|
||||
"WebServer",
|
||||
"Backend",
|
||||
@@ -166,10 +165,10 @@ public:
|
||||
*
|
||||
* @param channel The channel this logger will report into.
|
||||
*/
|
||||
Logger(std::string_view const channel);
|
||||
Logger(std::string channel);
|
||||
|
||||
Logger(Logger const&) = default;
|
||||
~Logger();
|
||||
~Logger() = default;
|
||||
|
||||
Logger(Logger&&) = default;
|
||||
Logger&
|
||||
@@ -292,7 +291,7 @@ protected:
|
||||
* @return Shared pointer to the registered spdlog logger
|
||||
*/
|
||||
static std::shared_ptr<spdlog::logger>
|
||||
registerLogger(std::string_view channel, std::optional<Severity> severity = std::nullopt);
|
||||
registerLogger(std::string const& channel, std::optional<Severity> severity = std::nullopt);
|
||||
|
||||
protected:
|
||||
static bool isAsync_; // NOLINT(readability-identifier-naming)
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# Web server subsystem
|
||||
|
||||
@page "web" Web server subsystem
|
||||
|
||||
This folder contains all of the classes for running the web server.
|
||||
|
||||
The web server subsystem:
|
||||
|
||||
@@ -29,14 +29,13 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string_view>
|
||||
|
||||
void
|
||||
LoggerFixture::init()
|
||||
{
|
||||
util::LogServiceState::init(false, util::Severity::FTL, {});
|
||||
|
||||
std::ranges::for_each(util::Logger::kCHANNELS, [](std::string_view const channel) {
|
||||
std::ranges::for_each(util::Logger::kCHANNELS, [](char const* channel) {
|
||||
util::LogService::registerLogger(channel);
|
||||
});
|
||||
|
||||
|
||||
@@ -25,5 +25,4 @@
|
||||
|
||||
struct MockAmendmentBlockHandler : etlng::AmendmentBlockHandlerInterface {
|
||||
MOCK_METHOD(void, notifyAmendmentBlocked, (), (override));
|
||||
MOCK_METHOD(void, stop, (), (override));
|
||||
};
|
||||
|
||||
@@ -32,9 +32,7 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger_entry.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
@@ -92,7 +90,8 @@ struct WithMockXrpLedgerAPIService : virtual ::testing::Test {
|
||||
|
||||
~WithMockXrpLedgerAPIService() override
|
||||
{
|
||||
shutdown();
|
||||
server_->Shutdown();
|
||||
serverThread_.join();
|
||||
}
|
||||
|
||||
int
|
||||
@@ -100,19 +99,6 @@ struct WithMockXrpLedgerAPIService : virtual ::testing::Test {
|
||||
{
|
||||
return port_;
|
||||
}
|
||||
|
||||
void
|
||||
shutdown(std::optional<std::chrono::system_clock::duration> deadline = std::nullopt)
|
||||
{
|
||||
if (deadline.has_value()) {
|
||||
server_->Shutdown(std::chrono::system_clock::now() + *deadline);
|
||||
} else {
|
||||
server_->Shutdown();
|
||||
}
|
||||
if (serverThread_.joinable())
|
||||
serverThread_.join();
|
||||
}
|
||||
|
||||
MockXrpLedgerAPIService mockXrpLedgerAPIService;
|
||||
|
||||
private:
|
||||
|
||||
@@ -1442,69 +1442,38 @@ createLptCurrency(std::string_view assetCurrency, std::string_view asset2Currenc
|
||||
}
|
||||
|
||||
ripple::STObject
|
||||
createMptIssuanceObject(
|
||||
std::string_view accountId,
|
||||
std::uint32_t seq,
|
||||
std::optional<std::string_view> metadata,
|
||||
std::uint32_t flags,
|
||||
std::uint64_t outstandingAmount,
|
||||
std::optional<std::uint16_t> transferFee,
|
||||
std::optional<std::uint8_t> assetScale,
|
||||
std::optional<std::uint64_t> maxAmount,
|
||||
std::optional<std::uint64_t> lockedAmount,
|
||||
std::optional<std::string_view> domainId
|
||||
)
|
||||
createMptIssuanceObject(std::string_view accountId, std::uint32_t seq, std::string_view metadata)
|
||||
{
|
||||
ripple::STObject mptIssuance(ripple::sfLedgerEntry);
|
||||
mptIssuance.setAccountID(ripple::sfIssuer, getAccountIdWithString(accountId));
|
||||
mptIssuance.setFieldU16(ripple::sfLedgerEntryType, ripple::ltMPTOKEN_ISSUANCE);
|
||||
mptIssuance.setFieldU32(ripple::sfFlags, 0);
|
||||
mptIssuance.setFieldU32(ripple::sfSequence, seq);
|
||||
mptIssuance.setFieldU64(ripple::sfOwnerNode, 0);
|
||||
mptIssuance.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{});
|
||||
mptIssuance.setFieldU32(ripple::sfFlags, flags);
|
||||
mptIssuance.setFieldU32(ripple::sfPreviousTxnLgrSeq, 0);
|
||||
mptIssuance.setFieldU64(ripple::sfOutstandingAmount, outstandingAmount);
|
||||
|
||||
if (transferFee.has_value())
|
||||
mptIssuance.setFieldU16(ripple::sfTransferFee, *transferFee);
|
||||
if (assetScale.has_value())
|
||||
mptIssuance.setFieldU8(ripple::sfAssetScale, *assetScale);
|
||||
if (maxAmount.has_value())
|
||||
mptIssuance.setFieldU64(ripple::sfMaximumAmount, *maxAmount);
|
||||
if (lockedAmount.has_value())
|
||||
mptIssuance.setFieldU64(ripple::sfLockedAmount, *lockedAmount);
|
||||
if (metadata.has_value()) {
|
||||
ripple::Slice const sliceMetadata(metadata->data(), metadata->size());
|
||||
mptIssuance.setFieldVL(ripple::sfMPTokenMetadata, sliceMetadata);
|
||||
}
|
||||
if (domainId.has_value())
|
||||
mptIssuance.setFieldH256(ripple::sfDomainID, ripple::uint256{*domainId});
|
||||
mptIssuance.setFieldU64(ripple::sfMaximumAmount, 0);
|
||||
mptIssuance.setFieldU64(ripple::sfOutstandingAmount, 0);
|
||||
ripple::Slice const sliceMetadata(metadata.data(), metadata.size());
|
||||
mptIssuance.setFieldVL(ripple::sfMPTokenMetadata, sliceMetadata);
|
||||
|
||||
return mptIssuance;
|
||||
}
|
||||
|
||||
ripple::STObject
|
||||
createMpTokenObject(
|
||||
std::string_view accountId,
|
||||
ripple::uint192 issuanceID,
|
||||
std::uint64_t mptAmount,
|
||||
std::uint32_t flags,
|
||||
std::optional<uint64_t> lockedAmount
|
||||
)
|
||||
createMpTokenObject(std::string_view accountId, ripple::uint192 issuanceID, std::uint64_t mptAmount)
|
||||
{
|
||||
ripple::STObject mptoken(ripple::sfLedgerEntry);
|
||||
mptoken.setAccountID(ripple::sfAccount, getAccountIdWithString(accountId));
|
||||
mptoken[ripple::sfMPTokenIssuanceID] = issuanceID;
|
||||
mptoken.setFieldU16(ripple::sfLedgerEntryType, ripple::ltMPTOKEN);
|
||||
mptoken.setFieldU32(ripple::sfFlags, flags);
|
||||
mptoken.setFieldU32(ripple::sfFlags, 0);
|
||||
mptoken.setFieldU64(ripple::sfOwnerNode, 0);
|
||||
mptoken.setFieldH256(ripple::sfPreviousTxnID, ripple::uint256{});
|
||||
mptoken.setFieldU32(ripple::sfPreviousTxnLgrSeq, 0);
|
||||
|
||||
if (mptAmount != 0u)
|
||||
mptoken.setFieldU64(ripple::sfMPTAmount, mptAmount);
|
||||
if (lockedAmount.has_value())
|
||||
mptoken.setFieldU64(ripple::sfLockedAmount, *lockedAmount);
|
||||
|
||||
return mptoken;
|
||||
}
|
||||
|
||||
@@ -451,27 +451,10 @@ createDidObject(std::string_view accountId, std::string_view didDoc, std::string
|
||||
createLptCurrency(std::string_view assetCurrency, std::string_view asset2Currency);
|
||||
|
||||
[[nodiscard]] ripple::STObject
|
||||
createMptIssuanceObject(
|
||||
std::string_view accountId,
|
||||
std::uint32_t seq,
|
||||
std::optional<std::string_view> metadata = std::nullopt,
|
||||
std::uint32_t flags = 0,
|
||||
std::uint64_t outstandingAmount = 0,
|
||||
std::optional<std::uint16_t> transferFee = std::nullopt,
|
||||
std::optional<std::uint8_t> assetScale = std::nullopt,
|
||||
std::optional<std::uint64_t> maxAmount = std::nullopt,
|
||||
std::optional<std::uint64_t> lockedAmount = std::nullopt,
|
||||
std::optional<std::string_view> domainId = std::nullopt
|
||||
);
|
||||
createMptIssuanceObject(std::string_view accountId, std::uint32_t seq, std::string_view metadata);
|
||||
|
||||
[[nodiscard]] ripple::STObject
|
||||
createMpTokenObject(
|
||||
std::string_view accountId,
|
||||
ripple::uint192 issuanceID,
|
||||
std::uint64_t mptAmount = 1,
|
||||
std::uint32_t flags = 0,
|
||||
std::optional<uint64_t> lockedAmount = std::nullopt
|
||||
);
|
||||
createMpTokenObject(std::string_view accountId, ripple::uint192 issuanceID, std::uint64_t mptAmount = 1);
|
||||
|
||||
[[nodiscard]] ripple::STObject
|
||||
createMPTIssuanceCreateTx(std::string_view accountId, uint32_t fee, uint32_t seq);
|
||||
|
||||
@@ -44,7 +44,6 @@ using namespace util::config;
|
||||
|
||||
struct BackendCassandraFactoryTest : SyncAsioContextTest, util::prometheus::WithPrometheus {
|
||||
static constexpr auto kKEYSPACE = "factory_test";
|
||||
static constexpr auto kPROVIDER = "cassandra";
|
||||
|
||||
protected:
|
||||
ClioConfigDefinition cfg_{
|
||||
@@ -54,7 +53,6 @@ protected:
|
||||
{"database.cassandra.secure_connect_bundle", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.port", ConfigValue{ConfigType::Integer}.optional()},
|
||||
{"database.cassandra.keyspace", ConfigValue{ConfigType::String}.defaultValue(kKEYSPACE)},
|
||||
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kPROVIDER)},
|
||||
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
|
||||
{"database.cassandra.table_prefix", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.max_write_requests_outstanding", ConfigValue{ConfigType::Integer}.defaultValue(10'000)},
|
||||
|
||||
@@ -85,17 +85,14 @@ using namespace data::cassandra;
|
||||
|
||||
class BackendCassandraTestBase : public SyncAsioContextTest, public WithPrometheus {
|
||||
protected:
|
||||
static constexpr auto kCASSANDRA = "cassandra";
|
||||
|
||||
ClioConfigDefinition cfg_{
|
||||
{"database.type", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
|
||||
{"database.type", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
|
||||
{"database.cassandra.contact_points",
|
||||
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)},
|
||||
{"database.cassandra.secure_connect_bundle", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.port", ConfigValue{ConfigType::Integer}.optional()},
|
||||
{"database.cassandra.keyspace",
|
||||
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)},
|
||||
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
|
||||
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
|
||||
{"database.cassandra.table_prefix", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.max_write_requests_outstanding", ConfigValue{ConfigType::Integer}.defaultValue(10'000)},
|
||||
|
||||
@@ -95,15 +95,13 @@ class MigrationCassandraSimpleTest : public WithPrometheus {
|
||||
}
|
||||
|
||||
protected:
|
||||
static constexpr auto kCASSANDRA = "cassandra";
|
||||
|
||||
ClioConfigDefinition cfg_{
|
||||
{{"database.type", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
|
||||
|
||||
{{"database.type", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
|
||||
{"database.cassandra.contact_points",
|
||||
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendHost)},
|
||||
{"database.cassandra.keyspace",
|
||||
ConfigValue{ConfigType::String}.defaultValue(TestGlobals::instance().backendKeyspace)},
|
||||
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue(kCASSANDRA)},
|
||||
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
|
||||
{"database.cassandra.replication_factor", ConfigValue{ConfigType::Integer}.defaultValue(1)},
|
||||
{"database.cassandra.connect_timeout", ConfigValue{ConfigType::Integer}.defaultValue(2)},
|
||||
|
||||
@@ -100,8 +100,6 @@ target_sources(
|
||||
rpc/handlers/AccountCurrenciesTests.cpp
|
||||
rpc/handlers/AccountInfoTests.cpp
|
||||
rpc/handlers/AccountLinesTests.cpp
|
||||
rpc/handlers/AccountMPTokenIssuancesTests.cpp
|
||||
rpc/handlers/AccountMPTokensTests.cpp
|
||||
rpc/handlers/AccountNFTsTests.cpp
|
||||
rpc/handlers/AccountObjectsTests.cpp
|
||||
rpc/handlers/AccountOffersTests.cpp
|
||||
|
||||
@@ -58,7 +58,6 @@ getParseSettingsConfig(boost::json::value val)
|
||||
{"database.cassandra.certificate", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.username", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.password", ConfigValue{ConfigType::String}.optional()},
|
||||
{"database.cassandra.provider", ConfigValue{ConfigType::String}.defaultValue("cassandra")},
|
||||
{"database.cassandra.queue_size_io", ConfigValue{ConfigType::Integer}.optional()},
|
||||
{"database.cassandra.write_batch_size", ConfigValue{ConfigType::Integer}.defaultValue(20)},
|
||||
{"database.cassandra.connect_timeout", ConfigValue{ConfigType::Integer}.optional()},
|
||||
|
||||
@@ -36,10 +36,7 @@ struct AmendmentBlockHandlerTest : util::prometheus::WithPrometheus, SyncAsioCon
|
||||
etl::SystemState state;
|
||||
};
|
||||
|
||||
// Note: This test can be flaky due to the way it was written (depends on time)
|
||||
// Since the old ETL is going to be replaced by ETLng all tests including this one will be deleted anyway so the fix for
|
||||
// flakiness is to increase the context runtime to 50ms until then (to not waste time).
|
||||
TEST_F(AmendmentBlockHandlerTest, CallToNotifyAmendmentBlockedSetsStateAndRepeatedlyCallsAction)
|
||||
TEST_F(AmendmentBlockHandlerTest, CallTonotifyAmendmentBlockedSetsStateAndRepeatedlyCallsAction)
|
||||
{
|
||||
AmendmentBlockHandler handler{ctx_, state, std::chrono::nanoseconds{1}, actionMock.AsStdFunction()};
|
||||
|
||||
@@ -48,7 +45,12 @@ TEST_F(AmendmentBlockHandlerTest, CallToNotifyAmendmentBlockedSetsStateAndRepeat
|
||||
handler.notifyAmendmentBlocked();
|
||||
EXPECT_TRUE(state.isAmendmentBlocked);
|
||||
|
||||
runContextFor(std::chrono::milliseconds{50});
|
||||
// Code runs significantly slower when assertions are enabled
|
||||
#ifdef _GLIBCXX_ASSERTIONS
|
||||
runContextFor(std::chrono::milliseconds{10});
|
||||
#else
|
||||
runContextFor(std::chrono::milliseconds{1});
|
||||
#endif
|
||||
}
|
||||
|
||||
struct DefaultAmendmentBlockActionTest : LoggerFixture {};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user