mirror of
https://github.com/XRPLF/clio.git
synced 2026-01-20 06:35:23 +00:00
Compare commits
43 Commits
2.5.0-b3
...
mathbunnyr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b11c1c9987 | ||
|
|
43a4d15d2d | ||
|
|
04c80c62f5 | ||
|
|
92fdebf590 | ||
|
|
b054a8424d | ||
|
|
162b1305e0 | ||
|
|
bdaa04d1ec | ||
|
|
6c69453bda | ||
|
|
7661ee6a3b | ||
|
|
6cabe89601 | ||
|
|
70f7635dda | ||
|
|
c8574ea42a | ||
|
|
e4fbf5131f | ||
|
|
87ee358297 | ||
|
|
27e29d0421 | ||
|
|
63ec563135 | ||
|
|
2c6f52a0ed | ||
|
|
97956b1718 | ||
|
|
ebfe4e6468 | ||
|
|
534518f13e | ||
|
|
4ed51c22d0 | ||
|
|
4364c07f1e | ||
|
|
f20efae75a | ||
|
|
67b27ee344 | ||
|
|
082f2fe21e | ||
|
|
7584a683dd | ||
|
|
f58c85d203 | ||
|
|
95698ee2de | ||
|
|
3d3db68508 | ||
|
|
7fcabd1ce7 | ||
|
|
59bb9a11ab | ||
|
|
ac5fcc7f4b | ||
|
|
3d0e722176 | ||
|
|
93add775b2 | ||
|
|
0273ba0da3 | ||
|
|
276477c494 | ||
|
|
d0b2a24a30 | ||
|
|
e44a058b13 | ||
|
|
743c9b92de | ||
|
|
35c90e64ec | ||
|
|
6e0d7a0fac | ||
|
|
d3c98ab2a8 | ||
|
|
2d172f470d |
@@ -46,7 +46,7 @@ runs:
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
with:
|
||||
cache-image: false
|
||||
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
id: meta
|
||||
|
||||
23
.github/actions/generate/action.yml
vendored
23
.github/actions/generate/action.yml
vendored
@@ -5,8 +5,8 @@ inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
force_conan_source_build:
|
||||
description: Whether conan should build all dependencies from source
|
||||
required: true
|
||||
default: "false"
|
||||
build_type:
|
||||
@@ -25,15 +25,6 @@ inputs:
|
||||
description: Whether Clio is to be statically linked
|
||||
required: true
|
||||
default: "false"
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
default: "false"
|
||||
choices:
|
||||
- "false"
|
||||
- "tsan"
|
||||
- "asan"
|
||||
- "ubsan"
|
||||
time_trace:
|
||||
description: Whether to enable compiler trace reports
|
||||
required: true
|
||||
@@ -49,7 +40,7 @@ runs:
|
||||
- name: Run conan
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '*' }}"
|
||||
CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
STATIC_OPTION: "${{ inputs.static == 'true' && 'True' || 'False' }}"
|
||||
INTEGRATION_TESTS_OPTION: "${{ inputs.build_integration_tests == 'true' && 'True' || 'False' }}"
|
||||
@@ -59,7 +50,7 @@ runs:
|
||||
conan \
|
||||
install .. \
|
||||
-of . \
|
||||
-b "$BUILD_OPTION" \
|
||||
-b "$CONAN_BUILD_OPTION" \
|
||||
-s "build_type=${{ inputs.build_type }}" \
|
||||
-o "&:static=${STATIC_OPTION}" \
|
||||
-o "&:tests=True" \
|
||||
@@ -74,9 +65,9 @@ runs:
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
SANITIZER_OPTION: |-
|
||||
${{ inputs.sanitizer == 'tsan' && '-Dsan=thread' ||
|
||||
inputs.sanitizer == 'ubsan' && '-Dsan=undefined' ||
|
||||
inputs.sanitizer == 'asan' && '-Dsan=address' ||
|
||||
${{ endsWith(inputs.conan_profile, '.asan') && '-Dsan=address' ||
|
||||
endsWith(inputs.conan_profile, '.tsan') && '-Dsan=thread' ||
|
||||
endsWith(inputs.conan_profile, '.ubsan') && '-Dsan=undefined' ||
|
||||
'' }}
|
||||
run: |
|
||||
cd build
|
||||
|
||||
6
.github/actions/prepare_runner/action.yml
vendored
6
.github/actions/prepare_runner/action.yml
vendored
@@ -13,7 +13,7 @@ runs:
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install \
|
||||
brew install --quiet \
|
||||
bison \
|
||||
ca-certificates \
|
||||
ccache \
|
||||
@@ -31,7 +31,7 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
# Uninstall any existing cmake
|
||||
brew uninstall cmake --ignore-dependencies || true
|
||||
brew uninstall --formula cmake --ignore-dependencies || true
|
||||
|
||||
# Download specific cmake formula
|
||||
FORMULA_URL="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e46db74e74a8c1650b38b1da222284ce1ec5ce/Formula/c/cmake.rb"
|
||||
@@ -43,7 +43,7 @@ runs:
|
||||
echo "$FORMULA_EXPECTED_SHA256 /tmp/homebrew-formula/cmake.rb" | shasum -a 256 -c
|
||||
|
||||
# Install cmake from the specific formula with force flag
|
||||
brew install --formula --force /tmp/homebrew-formula/cmake.rb
|
||||
brew install --formula --quiet --force /tmp/homebrew-formula/cmake.rb
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
|
||||
30
.github/actions/restore_cache/action.yml
vendored
30
.github/actions/restore_cache/action.yml
vendored
@@ -1,10 +1,7 @@
|
||||
name: Restore cache
|
||||
description: Find and restores conan and ccache cache
|
||||
description: Find and restores ccache cache
|
||||
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to Conan directory
|
||||
required: true
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
@@ -19,13 +16,8 @@ inputs:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
outputs:
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
value: ${{ steps.conan_hash.outputs.hash }}
|
||||
conan_cache_hit:
|
||||
description: True if conan cache has been downloaded
|
||||
value: ${{ steps.conan_cache.outputs.cache-hit }}
|
||||
ccache_cache_hit:
|
||||
description: True if ccache cache has been downloaded
|
||||
value: ${{ steps.ccache_cache.outputs.cache-hit }}
|
||||
@@ -37,24 +29,6 @@ runs:
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Calculate conan hash
|
||||
id: conan_hash
|
||||
shell: bash
|
||||
run: |
|
||||
conan graph info . --format json --out-file info.json -o '&:tests=True' --profile:all ${{ inputs.conan_profile }}
|
||||
packages_info="$(cat info.json | jq -r '.graph.nodes[]?.ref' | grep -v 'clio')"
|
||||
echo "$packages_info"
|
||||
hash="$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)"
|
||||
rm info.json
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore conan cache
|
||||
uses: actions/cache/restore@v4
|
||||
id: conan_cache
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/p
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.conan_profile }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v4
|
||||
id: ccache_cache
|
||||
|
||||
35
.github/actions/save_cache/action.yml
vendored
35
.github/actions/save_cache/action.yml
vendored
@@ -1,27 +1,13 @@
|
||||
name: Save cache
|
||||
description: Save conan and ccache cache for develop branch
|
||||
description: Save ccache cache for develop branch
|
||||
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
ccache_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_cache_miss_rate:
|
||||
description: How many cache misses happened
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
@@ -31,6 +17,12 @@ inputs:
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
ccache_cache_hit:
|
||||
description: Whether ccache cache has been downloaded
|
||||
required: true
|
||||
ccache_cache_miss_rate:
|
||||
description: How many ccache cache misses happened
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -38,19 +30,6 @@ runs:
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Cleanup conan directory from extra data
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
conan cache clean --source --build --temp
|
||||
|
||||
- name: Save conan cache
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/p
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-${{ inputs.conan_profile }}-develop-${{ inputs.conan_hash }}
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
|
||||
uses: actions/cache/save@v4
|
||||
|
||||
22
.github/actions/setup_conan/action.yml
vendored
22
.github/actions/setup_conan/action.yml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Setup conan
|
||||
description: Setup conan profile and artifactory
|
||||
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create conan profile on macOS
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
conan profile detect --name "${{ inputs.conan_profile }}" --force
|
||||
sed -i '' 's/compiler.cppstd=[^ ]*/compiler.cppstd=20/' "${{ env.CONAN_HOME }}/profiles/${{ inputs.conan_profile }}"
|
||||
|
||||
- name: Add artifactory remote
|
||||
shell: bash
|
||||
run: |
|
||||
conan remote add --index 0 --force ripple http://18.143.149.228:8081/artifactory/api/conan/dev
|
||||
39
.github/actions/setup_conan_macos/action.yml
vendored
Normal file
39
.github/actions/setup_conan_macos/action.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Setup conan
|
||||
description: Setup conan profiles and artifactory on macOS runner
|
||||
|
||||
inputs:
|
||||
conan_files_dir:
|
||||
description: Directory with conan files
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Fail on non-macOS
|
||||
if: runner.os != 'macOS'
|
||||
shell: bash
|
||||
run: exit 1
|
||||
|
||||
- name: Copy global.conf
|
||||
shell: bash
|
||||
run: |
|
||||
cp "${{ inputs.conan_files_dir }}/global.conf" "${{ env.CONAN_HOME }}/global.conf"
|
||||
|
||||
- name: Create apple-clang conan profile
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p "${{ env.CONAN_HOME }}/profiles"
|
||||
cp .github/actions/setup_conan_macos/apple-clang.profile "${{ env.CONAN_HOME }}/profiles/apple-clang"
|
||||
|
||||
- name: Create conan profiles for sanitizers
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.conan_files_dir }}
|
||||
run: |
|
||||
cp ./sanitizer_template.profile "${{ env.CONAN_HOME }}/profiles/apple-clang.asan"
|
||||
cp ./sanitizer_template.profile "${{ env.CONAN_HOME }}/profiles/apple-clang.tsan"
|
||||
cp ./sanitizer_template.profile "${{ env.CONAN_HOME }}/profiles/apple-clang.ubsan"
|
||||
|
||||
- name: Add artifactory remote
|
||||
shell: bash
|
||||
run: |
|
||||
conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
|
||||
8
.github/actions/setup_conan_macos/apple-clang.profile
vendored
Normal file
8
.github/actions/setup_conan_macos/apple-clang.profile
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
[settings]
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
compiler.libcxx=libc++
|
||||
compiler.version=16
|
||||
os=Macos
|
||||
2
.github/dependabot.yml
vendored
2
.github/dependabot.yml
vendored
@@ -144,7 +144,7 @@ updates:
|
||||
target-branch: develop
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/actions/setup_conan/
|
||||
directory: .github/actions/setup_conan_macos/
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
|
||||
42
.github/scripts/generate_conan_matrix.py
vendored
Executable file
42
.github/scripts/generate_conan_matrix.py
vendored
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env python3
|
||||
import itertools
|
||||
import json
|
||||
|
||||
LINUX_OS = ["heavy", "heavy-arm64"]
|
||||
LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:latest" }']
|
||||
LINUX_COMPILERS = ["gcc", "clang"]
|
||||
|
||||
MACOS_OS = ["macos15"]
|
||||
MACOS_CONTAINERS = [""]
|
||||
MACOS_COMPILERS = ["apple-clang"]
|
||||
|
||||
BUILD_TYPES = ["Release", "Debug"]
|
||||
SANITIZER_EXT = [".asan", ".tsan", ".ubsan", ""]
|
||||
|
||||
|
||||
def generate_matrix():
|
||||
configurations = []
|
||||
|
||||
for os, container, compiler in itertools.chain(
|
||||
itertools.product(LINUX_OS, LINUX_CONTAINERS, LINUX_COMPILERS),
|
||||
itertools.product(MACOS_OS, MACOS_CONTAINERS, MACOS_COMPILERS),
|
||||
):
|
||||
for sanitizer_ext, build_type in itertools.product(SANITIZER_EXT, BUILD_TYPES):
|
||||
# libbacktrace doesn't build on arm64 with gcc.tsan
|
||||
if os == "heavy-arm64" and compiler == "gcc" and sanitizer_ext == ".tsan":
|
||||
continue
|
||||
configurations.append(
|
||||
{
|
||||
"os": os,
|
||||
"container": container,
|
||||
"compiler": compiler,
|
||||
"sanitizer_ext": sanitizer_ext,
|
||||
"build_type": build_type,
|
||||
}
|
||||
)
|
||||
|
||||
return {"include": configurations}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(f"matrix={json.dumps(generate_matrix())}")
|
||||
28
.github/scripts/update-libxrpl-version
vendored
28
.github/scripts/update-libxrpl-version
vendored
@@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Note: This script is intended to be run from the root of the repository.
|
||||
#
|
||||
# This script modifies conanfile.py such that the specified version of libXRPL is used.
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
Version should be passed as first argument to the script.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$1
|
||||
GNU_SED=$(sed --version 2>&1 | grep -q 'GNU' && echo true || echo false)
|
||||
|
||||
echo "+ Updating required libXRPL version to $VERSION"
|
||||
|
||||
if [[ "$GNU_SED" == "false" ]]; then
|
||||
sed -i '' -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/$VERSION'|g" conanfile.py
|
||||
else
|
||||
sed -i -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/$VERSION'|g" conanfile.py
|
||||
fi
|
||||
22
.github/workflows/build.yml
vendored
22
.github/workflows/build.yml
vendored
@@ -19,6 +19,7 @@ on:
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
- "cmake/**"
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
@@ -46,7 +47,7 @@ jobs:
|
||||
|
||||
include:
|
||||
- os: macos15
|
||||
conan_profile: default_apple_clang
|
||||
conan_profile: apple-clang
|
||||
build_type: Release
|
||||
container: ""
|
||||
static: false
|
||||
@@ -76,7 +77,6 @@ jobs:
|
||||
static: true
|
||||
upload_clio_server: false
|
||||
targets: all
|
||||
sanitizer: "false"
|
||||
analyze_build_time: false
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -99,23 +99,9 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
repoConfigFile=docs/config-description.md
|
||||
if ! [ -f "${repoConfigFile}" ]; then
|
||||
echo "Config Description markdown file is missing in docs folder"
|
||||
exit 1
|
||||
fi
|
||||
configDescriptionFile=config_description_new.md
|
||||
|
||||
chmod +x ./clio_server
|
||||
configDescriptionFile=config_description_new.md
|
||||
./clio_server -d "${configDescriptionFile}"
|
||||
|
||||
configDescriptionHash=$(sha256sum "${configDescriptionFile}" | cut -d' ' -f1)
|
||||
repoConfigHash=$(sha256sum "${repoConfigFile}" | cut -d' ' -f1)
|
||||
|
||||
if [ "${configDescriptionHash}" != "${repoConfigHash}" ]; then
|
||||
echo "Markdown file is not up to date"
|
||||
diff -u "${repoConfigFile}" "${configDescriptionFile}"
|
||||
rm -f "${configDescriptionFile}"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "${configDescriptionFile}"
|
||||
exit 0
|
||||
diff -u "${repoConfigFile}" "${configDescriptionFile}"
|
||||
|
||||
10
.github/workflows/build_and_test.yml
vendored
10
.github/workflows/build_and_test.yml
vendored
@@ -24,7 +24,7 @@ on:
|
||||
type: string
|
||||
|
||||
disable_cache:
|
||||
description: Whether ccache and conan cache should be disabled
|
||||
description: Whether ccache should be disabled
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
@@ -57,12 +57,6 @@ on:
|
||||
type: string
|
||||
default: all
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: false
|
||||
type: string
|
||||
default: "false"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
@@ -76,7 +70,6 @@ jobs:
|
||||
static: ${{ inputs.static }}
|
||||
upload_clio_server: ${{ inputs.upload_clio_server }}
|
||||
targets: ${{ inputs.targets }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
analyze_build_time: false
|
||||
|
||||
test:
|
||||
@@ -89,4 +82,3 @@ jobs:
|
||||
build_type: ${{ inputs.build_type }}
|
||||
run_unit_tests: ${{ inputs.run_unit_tests }}
|
||||
run_integration_tests: ${{ inputs.run_integration_tests }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
|
||||
31
.github/workflows/build_impl.yml
vendored
31
.github/workflows/build_impl.yml
vendored
@@ -24,7 +24,7 @@ on:
|
||||
type: string
|
||||
|
||||
disable_cache:
|
||||
description: Whether ccache and conan cache should be disabled
|
||||
description: Whether ccache should be disabled
|
||||
required: false
|
||||
type: boolean
|
||||
|
||||
@@ -48,11 +48,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
type: string
|
||||
|
||||
analyze_build_time:
|
||||
description: Whether to enable build time analysis
|
||||
required: true
|
||||
@@ -64,7 +59,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
name: Build
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
@@ -83,16 +78,16 @@ jobs:
|
||||
disable_ccache: ${{ inputs.disable_cache }}
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
if: runner.os == 'macOS'
|
||||
uses: ./.github/actions/setup_conan_macos
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
conan_files_dir: docker/ci/conan/
|
||||
|
||||
- name: Restore cache
|
||||
if: ${{ !inputs.disable_cache }}
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_HOME }}
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
@@ -102,11 +97,9 @@ jobs:
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
conan_cache_hit: ${{ !inputs.disable_cache && steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
static: ${{ inputs.static }}
|
||||
sanitizer: ${{ inputs.sanitizer }}
|
||||
time_trace: ${{ inputs.analyze_build_time }}
|
||||
|
||||
- name: Build Clio
|
||||
@@ -140,11 +133,11 @@ jobs:
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip unit_tests
|
||||
if: inputs.sanitizer == 'false' && !inputs.code_coverage && !inputs.analyze_build_time
|
||||
if: ${{ !endsWith(inputs.conan_profile, 'san') && !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Strip integration_tests
|
||||
if: inputs.sanitizer == 'false' && !inputs.code_coverage && !inputs.analyze_build_time
|
||||
if: ${{ !endsWith(inputs.conan_profile, 'san') && !inputs.code_coverage && !inputs.analyze_build_time }}
|
||||
run: strip build/clio_integration_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
@@ -172,15 +165,13 @@ jobs:
|
||||
if: ${{ !inputs.disable_cache && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_HOME }}
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
code_coverage: ${{ inputs.code_coverage }}
|
||||
conan_profile: ${{ inputs.conan_profile }}
|
||||
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
|
||||
# This is run as part of the build job, because it requires the following:
|
||||
# - source code
|
||||
|
||||
19
.github/workflows/check_libxrpl.yml
vendored
19
.github/workflows/check_libxrpl.yml
vendored
@@ -15,7 +15,7 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}`
|
||||
runs-on: [self-hosted, heavy]
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:latest
|
||||
|
||||
@@ -27,24 +27,23 @@ jobs:
|
||||
- name: Update libXRPL version requirement
|
||||
shell: bash
|
||||
run: |
|
||||
./.github/scripts/update-libxrpl-version ${{ github.event.client_payload.version }}
|
||||
sed -i.bak -E "s|'xrpl/[a-zA-Z0-9\\.\\-]+'|'xrpl/${{ github.event.client_payload.version }}'|g" conanfile.py
|
||||
rm -f conanfile.py.bak
|
||||
|
||||
- name: Update conan lockfile
|
||||
shell: bash
|
||||
run: |
|
||||
conan lock create . -o '&:tests=True' -o '&:benchmark=True'
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: Release
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
@@ -61,7 +60,7 @@ jobs:
|
||||
run_tests:
|
||||
name: Run tests
|
||||
needs: build
|
||||
runs-on: [self-hosted, heavy]
|
||||
runs-on: heavy
|
||||
container:
|
||||
image: ghcr.io/xrplf/clio-ci:latest
|
||||
|
||||
|
||||
10
.github/workflows/clang-tidy.yml
vendored
10
.github/workflows/clang-tidy.yml
vendored
@@ -40,25 +40,17 @@ jobs:
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_HOME }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: Release
|
||||
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
|
||||
10
.github/workflows/nightly.yml
vendored
10
.github/workflows/nightly.yml
vendored
@@ -32,28 +32,24 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: macos15
|
||||
conan_profile: default_apple_clang
|
||||
conan_profile: apple-clang
|
||||
build_type: Release
|
||||
static: false
|
||||
sanitizer: "false"
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Release
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
sanitizer: "false"
|
||||
- os: heavy
|
||||
conan_profile: gcc
|
||||
build_type: Debug
|
||||
static: true
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
sanitizer: "false"
|
||||
- os: heavy
|
||||
conan_profile: gcc.ubsan
|
||||
build_type: Release
|
||||
static: false
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
sanitizer: "ubsan"
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
@@ -66,7 +62,6 @@ jobs:
|
||||
run_integration_tests: true
|
||||
upload_clio_server: true
|
||||
disable_cache: true
|
||||
sanitizer: ${{ matrix.sanitizer }}
|
||||
|
||||
analyze_build_time:
|
||||
name: Analyze Build Time
|
||||
@@ -80,7 +75,7 @@ jobs:
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
static: true
|
||||
- os: macos15
|
||||
conan_profile: default_apple_clang
|
||||
conan_profile: apple-clang
|
||||
container: ""
|
||||
static: false
|
||||
uses: ./.github/workflows/build_impl.yml
|
||||
@@ -94,7 +89,6 @@ jobs:
|
||||
static: ${{ matrix.static }}
|
||||
upload_clio_server: false
|
||||
targets: all
|
||||
sanitizer: "false"
|
||||
analyze_build_time: true
|
||||
|
||||
nightly_release:
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- os: macos15
|
||||
conan_profile: default_apple_clang
|
||||
conan_profile: apple-clang
|
||||
build_type: Release
|
||||
static: false
|
||||
- os: heavy
|
||||
|
||||
17
.github/workflows/sanitizers.yml
vendored
17
.github/workflows/sanitizers.yml
vendored
@@ -19,6 +19,7 @@ on:
|
||||
|
||||
- CMakeLists.txt
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
- "cmake/**"
|
||||
# We don't run sanitizer on code change, because it takes too long
|
||||
# - "src/**"
|
||||
@@ -36,24 +37,22 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sanitizer: tsan
|
||||
compiler: gcc
|
||||
- sanitizer: asan
|
||||
compiler: gcc
|
||||
- sanitizer: ubsan
|
||||
compiler: gcc
|
||||
compiler: ["gcc", "clang"]
|
||||
sanitizer_ext: [".asan", ".tsan", ".ubsan"]
|
||||
exclude:
|
||||
# Currently, clang.tsan unit tests hang
|
||||
- compiler: clang
|
||||
sanitizer_ext: .tsan
|
||||
|
||||
uses: ./.github/workflows/build_and_test.yml
|
||||
with:
|
||||
runs_on: heavy
|
||||
container: '{ "image": "ghcr.io/xrplf/clio-ci:latest" }'
|
||||
disable_cache: true
|
||||
conan_profile: ${{ matrix.compiler }}.${{ matrix.sanitizer }}
|
||||
conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
build_type: Release
|
||||
static: false
|
||||
run_unit_tests: true
|
||||
run_integration_tests: false
|
||||
upload_clio_server: false
|
||||
targets: clio_tests clio_integration_tests
|
||||
sanitizer: ${{ matrix.sanitizer }}
|
||||
|
||||
13
.github/workflows/test_impl.yml
vendored
13
.github/workflows/test_impl.yml
vendored
@@ -33,22 +33,17 @@ on:
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
sanitizer:
|
||||
description: Sanitizer to use
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
unit_tests:
|
||||
name: Unit testing ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
name: Unit testing
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
if: inputs.run_unit_tests
|
||||
|
||||
env:
|
||||
# TODO: remove when we have fixed all currently existing issues from sanitizers
|
||||
SANITIZER_IGNORE_ERRORS: ${{ inputs.sanitizer != 'false' && inputs.sanitizer != 'ubsan' }}
|
||||
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
|
||||
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.asan') || endsWith(inputs.conan_profile, '.tsan') }}
|
||||
|
||||
steps:
|
||||
- name: Clean workdir
|
||||
@@ -109,7 +104,7 @@ jobs:
|
||||
Reports are available as artifacts.
|
||||
|
||||
integration_tests:
|
||||
name: Integration testing ${{ inputs.container != '' && 'in container' || 'natively' }}
|
||||
name: Integration testing
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
container: ${{ inputs.container != '' && fromJson(inputs.container) || null }}
|
||||
|
||||
|
||||
124
.github/workflows/update_docker_ci.yml
vendored
124
.github/workflows/update_docker_ci.yml
vendored
@@ -25,12 +25,16 @@ on:
|
||||
concurrency:
|
||||
# Only cancel in-progress jobs or runs for the current workflow - matches against branch & tags
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
# We want to execute all builds sequentially in develop
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
GHCR_REPO: ghcr.io/${{ github.repository_owner }}
|
||||
|
||||
jobs:
|
||||
gcc:
|
||||
name: Build and push GCC docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
gcc-amd64:
|
||||
name: Build and push GCC docker image (amd64)
|
||||
runs-on: heavy
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -42,30 +46,112 @@ jobs:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
# Skipping this build for now, because CI environment is not stable
|
||||
if: false && steps.changed-files.outputs.any_changed == 'true'
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/xrplf/clio-gcc
|
||||
${{ env.GHCR_REPO }}/clio-gcc
|
||||
rippleci/clio_gcc
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/gcc
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=12
|
||||
type=raw,value=12.3.0
|
||||
type=raw,value=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
type=raw,value=amd64-latest
|
||||
type=raw,value=amd64-12
|
||||
type=raw,value=amd64-12.3.0
|
||||
type=raw,value=amd64-${{ github.sha }}
|
||||
platforms: linux/amd64
|
||||
dockerhub_repo: rippleci/clio_gcc
|
||||
dockerhub_description: GCC compiler for XRPLF/clio.
|
||||
|
||||
gcc-arm64:
|
||||
name: Build and push GCC docker image (arm64)
|
||||
runs-on: heavy-arm64
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- uses: ./.github/actions/build_docker_image
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
${{ env.GHCR_REPO }}/clio-gcc
|
||||
rippleci/clio_gcc
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/gcc
|
||||
tags: |
|
||||
type=raw,value=arm64-latest
|
||||
type=raw,value=arm64-12
|
||||
type=raw,value=arm64-12.3.0
|
||||
type=raw,value=arm64-${{ github.sha }}
|
||||
platforms: linux/arm64
|
||||
dockerhub_repo: rippleci/clio_gcc
|
||||
dockerhub_description: GCC compiler for XRPLF/clio.
|
||||
|
||||
gcc-merge:
|
||||
name: Merge and push multi-arch GCC docker image
|
||||
runs-on: heavy
|
||||
needs: [gcc-amd64, gcc-arm64]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: "docker/compilers/gcc/**"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PW }}
|
||||
|
||||
- name: Make GHCR_REPO lowercase
|
||||
run: |
|
||||
echo "GHCR_REPO_LC=$(echo ${{env.GHCR_REPO}} | tr '[:upper:]' '[:lower:]')" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Create and push multi-arch manifest
|
||||
if: github.event_name != 'pull_request' && steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
for image in ${{ env.GHCR_REPO_LC }}/clio-gcc rippleci/clio_gcc; do
|
||||
docker buildx imagetools create \
|
||||
-t $image:latest \
|
||||
-t $image:12 \
|
||||
-t $image:12.3.0 \
|
||||
-t $image:${{ github.sha }} \
|
||||
$image:arm64-latest \
|
||||
$image:amd64-latest
|
||||
done
|
||||
|
||||
clang:
|
||||
name: Build and push Clang docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
runs-on: heavy
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -84,7 +170,7 @@ jobs:
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/xrplf/clio-clang
|
||||
${{ env.GHCR_REPO }}/clio-clang
|
||||
rippleci/clio_clang
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/compilers/clang
|
||||
@@ -98,7 +184,7 @@ jobs:
|
||||
|
||||
tools:
|
||||
name: Build and push tools docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
runs-on: heavy
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -115,7 +201,7 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/xrplf/clio-tools
|
||||
${{ env.GHCR_REPO }}/clio-tools
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/tools
|
||||
tags: |
|
||||
@@ -125,8 +211,8 @@ jobs:
|
||||
|
||||
ci:
|
||||
name: Build and push CI docker image
|
||||
runs-on: [self-hosted, heavy]
|
||||
needs: [gcc, clang, tools]
|
||||
runs-on: heavy
|
||||
needs: [gcc-merge, clang, tools]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -137,8 +223,8 @@ jobs:
|
||||
DOCKERHUB_PW: ${{ secrets.DOCKERHUB_PW }}
|
||||
with:
|
||||
images: |
|
||||
${{ env.GHCR_REPO }}/clio-ci
|
||||
rippleci/clio_ci
|
||||
ghcr.io/xrplf/clio-ci
|
||||
push_image: ${{ github.event_name != 'pull_request' }}
|
||||
directory: docker/ci
|
||||
tags: |
|
||||
|
||||
93
.github/workflows/upload_conan_deps.yml
vendored
Normal file
93
.github/workflows/upload_conan_deps.yml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
name: Upload Conan Dependencies
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 9 * * 1-5"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force_source_build:
|
||||
description: "Force source build of all dependencies"
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
- .github/scripts/generate_conan_matrix.py
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- .github/workflows/upload_conan_deps.yml
|
||||
- .github/scripts/generate_conan_matrix.py
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
generate-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Calculate conan matrix
|
||||
id: set-matrix
|
||||
run: .github/scripts/generate_conan_matrix.py >> "${GITHUB_OUTPUT}"
|
||||
|
||||
upload-conan-deps:
|
||||
name: Build ${{ matrix.compiler }}${{ matrix.sanitizer_ext }} ${{ matrix.build_type }}
|
||||
|
||||
needs: generate-matrix
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
container: ${{ matrix.container != '' && fromJson(matrix.container) || null }}
|
||||
|
||||
env:
|
||||
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
if: runner.os == 'macOS'
|
||||
uses: ./.github/actions/setup_conan_macos
|
||||
with:
|
||||
conan_files_dir: docker/ci/conan/
|
||||
|
||||
- name: Show conan profile
|
||||
run: conan profile show --profile:all ${{ env.CONAN_PROFILE }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ env.CONAN_PROFILE }}
|
||||
# We check that everything builds fine from source on scheduled runs
|
||||
# But we do build and upload packages with build=missing by default
|
||||
force_conan_source_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
|
||||
- name: Login to Conan
|
||||
if: github.event_name != 'pull_request'
|
||||
run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ripple ${{ secrets.CONAN_USERNAME }}
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: github.event_name != 'pull_request' && github.event_name != 'schedule'
|
||||
run: conan upload "*" -r=ripple --confirm
|
||||
@@ -11,7 +11,7 @@
|
||||
#
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
exclude: ^docs/doxygen-awesome-theme/
|
||||
exclude: ^(docs/doxygen-awesome-theme/|conan\.lock$)
|
||||
|
||||
repos:
|
||||
# `pre-commit sample-config` default hooks
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
set(COMPILER_FLAGS
|
||||
-pedantic
|
||||
-Wall
|
||||
-Wcast-align
|
||||
-Wdouble-promotion
|
||||
-Wextra
|
||||
-Werror
|
||||
-Wextra
|
||||
-Wformat=2
|
||||
-Wimplicit-fallthrough
|
||||
-Wmisleading-indentation
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-narrowing
|
||||
-Wno-unused-but-set-variable
|
||||
-Wnon-virtual-dtor
|
||||
-Wnull-dereference
|
||||
-Wold-style-cast
|
||||
-pedantic
|
||||
-Wpedantic
|
||||
-Wunreachable-code
|
||||
-Wunused
|
||||
# FIXME: The following bunch are needed for gcc12 atm.
|
||||
-Wno-missing-requires
|
||||
|
||||
57
conan.lock
Normal file
57
conan.lock
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"version": "0.5",
|
||||
"requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1750263732.782",
|
||||
"xxhash/0.8.2#7856c968c985b2981b707ee8f2413b2b%1750263730.908",
|
||||
"xrpl/2.5.0-rc1#e5897e048ea5712d2c71561c507d949d%1750263725.455",
|
||||
"sqlite3/3.47.0#7a0904fd061f5f8a2366c294f9387830%1750263721.79",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1750263717.455",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1750263715.145",
|
||||
"rapidjson/cci.20220822#1b9d8c2256876a154172dc5cfbe447c6%1750263713.526",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1750263698.841",
|
||||
"openssl/1.1.1v#216374e4fb5b2e0f5ab1fb6f27b5b434%1750263685.885",
|
||||
"nudb/2.0.8#63990d3e517038e04bf529eb8167f69f%1750263683.814",
|
||||
"minizip/1.2.13#9e87d57804bd372d6d1e32b1871517a3%1750263681.745",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1750263679.891",
|
||||
"libuv/1.46.0#78565d142ac7102776256328a26cdf60%1750263677.819",
|
||||
"libiconv/1.17#1ae2f60ab5d08de1643a22a81b360c59%1750257497.552",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1750263675.748",
|
||||
"libarchive/3.7.6#e0453864b2a4d225f06b3304903cb2b7%1750263671.05",
|
||||
"http_parser/2.9.4#98d91690d6fd021e9e624218a85d9d97%1750263668.751",
|
||||
"gtest/1.14.0#f8f0757a574a8dd747d16af62d6eb1b7%1750263666.833",
|
||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1750263646.614",
|
||||
"fmt/10.1.1#021e170cf81db57da82b5f737b6906c1%1750263644.741",
|
||||
"date/3.0.3#cf28fe9c0aab99fe12da08aa42df65e1%1750263643.099",
|
||||
"cassandra-cpp-driver/2.17.0#e50919efac8418c26be6671fd702540a%1750263632.157",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1750263630.06",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1750263627.95",
|
||||
"boost/1.83.0#8eb22f36ddfb61f54bbc412c4555bd66%1750263616.444",
|
||||
"benchmark/1.8.3#1a2ce62c99e2b3feaa57b1f0c15a8c46%1724323740.181",
|
||||
"abseil/20230802.1#f0f91485b111dc9837a68972cb19ca7b%1750263609.776"
|
||||
],
|
||||
"build_requires": [
|
||||
"zlib/1.3.1#b8bc2603263cf7eccbd6e17e66b0ed76%1750263732.782",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1750263698.841",
|
||||
"protobuf/3.21.9#64ce20e1d9ea24f3d6c504015d5f6fa8%1750263690.822",
|
||||
"cmake/3.31.6#ed0e6c1d49bd564ce6fed1a19653b86d%1750263636.055",
|
||||
"b2/5.3.2#7b5fabfe7088ae933fb3e78302343ea0%1750263614.565"
|
||||
],
|
||||
"python_requires": [],
|
||||
"overrides": {
|
||||
"boost/1.83.0": [
|
||||
null,
|
||||
"boost/1.83.0#8eb22f36ddfb61f54bbc412c4555bd66"
|
||||
],
|
||||
"protobuf/3.21.9": [
|
||||
null,
|
||||
"protobuf/3.21.12"
|
||||
],
|
||||
"lz4/1.9.4": [
|
||||
"lz4/1.10.0"
|
||||
],
|
||||
"sqlite3/3.44.2": [
|
||||
"sqlite3/3.47.0"
|
||||
]
|
||||
},
|
||||
"config_requires": []
|
||||
}
|
||||
19
conanfile.py
19
conanfile.py
@@ -11,7 +11,6 @@ class ClioConan(ConanFile):
|
||||
settings = 'os', 'compiler', 'build_type', 'arch'
|
||||
options = {
|
||||
'static': [True, False], # static linkage
|
||||
'fPIC': [True, False], # unused?
|
||||
'verbose': [True, False],
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'integration_tests': [True, False], # build integration tests; create `clio_integration_tests` binary
|
||||
@@ -38,7 +37,6 @@ class ClioConan(ConanFile):
|
||||
|
||||
default_options = {
|
||||
'static': False,
|
||||
'fPIC': True,
|
||||
'verbose': False,
|
||||
'tests': False,
|
||||
'integration_tests': False,
|
||||
@@ -89,21 +87,8 @@ class ClioConan(ConanFile):
|
||||
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
tc.variables['verbose'] = self.options.verbose
|
||||
tc.variables['static'] = self.options.static
|
||||
tc.variables['tests'] = self.options.tests
|
||||
tc.variables['integration_tests'] = self.options.integration_tests
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['lint'] = self.options.lint
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.variables['benchmark'] = self.options.benchmark
|
||||
tc.variables['snapshot'] = self.options.snapshot
|
||||
tc.variables['time_trace'] = self.options.time_trace
|
||||
|
||||
if self.settings.compiler == 'clang' and self.settings.compiler.version == 16:
|
||||
tc.extra_cxxflags = ["-DBOOST_ASIO_DISABLE_CONCEPTS"]
|
||||
|
||||
for option_name, option_value in self.options.items():
|
||||
tc.variables[option_name] = option_value
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# TODO: change this when we are able to push gcc image to ghcr.io
|
||||
FROM rippleci/clio_gcc:12.3.0 AS clio-gcc
|
||||
FROM ghcr.io/xrplf/clio-gcc:12.3.0 AS clio-gcc
|
||||
FROM ghcr.io/xrplf/clio-tools:latest AS clio-tools
|
||||
|
||||
FROM ghcr.io/xrplf/clio-clang:16
|
||||
@@ -33,10 +32,8 @@ RUN apt-get update \
|
||||
# Install packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
bison \
|
||||
clang-tidy-${LLVM_TOOLS_VERSION} \
|
||||
clang-tools-${LLVM_TOOLS_VERSION} \
|
||||
flex \
|
||||
git \
|
||||
git-lfs \
|
||||
graphviz \
|
||||
@@ -90,6 +87,9 @@ WORKDIR /root
|
||||
# Setup conan
|
||||
RUN conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/conan/dev
|
||||
|
||||
WORKDIR /root/.conan2
|
||||
COPY conan/global.conf ./global.conf
|
||||
|
||||
WORKDIR /root/.conan2/profiles
|
||||
|
||||
COPY conan/clang.profile ./clang
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[settings]
|
||||
arch=x86_64
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=clang
|
||||
compiler.cppstd=20
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[settings]
|
||||
arch=x86_64
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=gcc
|
||||
compiler.cppstd=20
|
||||
|
||||
3
docker/ci/conan/global.conf
Normal file
3
docker/ci/conan/global.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
core.download:parallel={{os.cpu_count()}}
|
||||
core.upload:parallel={{os.cpu_count()}}
|
||||
tools.info.package_id:confs = ["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]
|
||||
@@ -2,14 +2,19 @@
|
||||
|
||||
{% set sanitizer_opt_map = {'asan': 'address', 'tsan': 'thread', 'ubsan': 'undefined'} %}
|
||||
{% set sanitizer = sanitizer_opt_map[sani] %}
|
||||
{% set sanitizer_build_flags_str = "-fsanitize=" ~ sanitizer ~ " -g -O1 -fno-omit-frame-pointer" %}
|
||||
{% set sanitizer_build_flags = sanitizer_build_flags_str.split(' ') %}
|
||||
{% set sanitizer_link_flags_str = "-fsanitize=" ~ sanitizer %}
|
||||
{% set sanitizer_link_flags = sanitizer_link_flags_str.split(' ') %}
|
||||
|
||||
include({{ compiler }})
|
||||
|
||||
[options]
|
||||
boost/*:extra_b2_flags="cxxflags=\"-fsanitize={{ sanitizer }}\" linkflags=\"-fsanitize={{ sanitizer }}\""
|
||||
boost/*:without_stacktrace=True
|
||||
boost/*:extra_b2_flags = "cxxflags=\"{{ sanitizer_build_flags_str }}\" linkflags=\"{{ sanitizer_link_flags_str }}\""
|
||||
boost/*:without_stacktrace = True
|
||||
|
||||
[conf]
|
||||
tools.build:cflags+=["-fsanitize={{ sanitizer }}"]
|
||||
tools.build:cxxflags+=["-fsanitize={{ sanitizer }}"]
|
||||
tools.build:exelinkflags+=["-fsanitize={{ sanitizer }}"]
|
||||
tools.build:cflags += {{ sanitizer_build_flags }}
|
||||
tools.build:cxxflags += {{ sanitizer_build_flags }}
|
||||
tools.build:exelinkflags += {{ sanitizer_link_flags }}
|
||||
tools.build:sharedlinkflags += {{ sanitizer_link_flags }}
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
FROM ubuntu:20.04 AS build
|
||||
ARG UBUNTU_VERSION=20.04
|
||||
|
||||
ARG GCC_MAJOR_VERSION=12
|
||||
|
||||
FROM ubuntu:$UBUNTU_VERSION AS build
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ARG GCC_MAJOR_VERSION
|
||||
ARG GCC_MINOR_VERSION=3
|
||||
ARG GCC_PATCH_VERSION=0
|
||||
ARG GCC_VERSION=${GCC_MAJOR_VERSION}.${GCC_MINOR_VERSION}.${GCC_PATCH_VERSION}
|
||||
ARG BUILD_VERSION=6
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG UBUNTU_VERSION=20.04
|
||||
ARG GCC_VERSION=12.3.0
|
||||
ARG BUILD_VERSION=2
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
@@ -18,18 +27,21 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /
|
||||
RUN wget --progress=dot:giga https://gcc.gnu.org/pub/gcc/releases/gcc-$GCC_VERSION/gcc-$GCC_VERSION.tar.gz \
|
||||
&& tar xf gcc-$GCC_VERSION.tar.gz \
|
||||
&& cd /gcc-$GCC_VERSION && ./contrib/download_prerequisites
|
||||
&& tar xf gcc-$GCC_VERSION.tar.gz
|
||||
|
||||
RUN mkdir /${TARGETARCH}-gcc-12
|
||||
WORKDIR /${TARGETARCH}-gcc-12
|
||||
WORKDIR /gcc-$GCC_VERSION
|
||||
RUN ./contrib/download_prerequisites
|
||||
|
||||
RUN mkdir /gcc-build
|
||||
WORKDIR /gcc-build
|
||||
RUN /gcc-$GCC_VERSION/configure \
|
||||
--with-pkgversion="clio-build-$BUILD_VERSION https://github.com/XRPLF/clio" \
|
||||
--enable-languages=c,c++ \
|
||||
--prefix=/usr \
|
||||
--with-gcc-major-version-only \
|
||||
--program-suffix=-12 \
|
||||
--program-suffix=-${GCC_MAJOR_VERSION} \
|
||||
--enable-shared \
|
||||
--enable-linker-build-id \
|
||||
--libexecdir=/usr/lib \
|
||||
@@ -53,38 +65,54 @@ RUN /gcc-$GCC_VERSION/configure \
|
||||
--enable-cet \
|
||||
--disable-multilib \
|
||||
--without-cuda-driver \
|
||||
--enable-checking=release \
|
||||
&& make -j "$(nproc)" \
|
||||
&& make install-strip DESTDIR=/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION \
|
||||
--enable-checking=release
|
||||
|
||||
RUN make -j "$(nproc)"
|
||||
|
||||
RUN make install-strip DESTDIR=/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION \
|
||||
&& mkdir -p /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/share/gdb/auto-load/usr/lib64 \
|
||||
&& mv /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/lib64/libstdc++.so.6.0.30-gdb.py /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/share/gdb/auto-load/usr/lib64/libstdc++.so.6.0.30-gdb.py
|
||||
&& mv \
|
||||
/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/lib64/libstdc++.so.6.0.30-gdb.py \
|
||||
/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/usr/share/gdb/auto-load/usr/lib64/libstdc++.so.6.0.30-gdb.py
|
||||
|
||||
# Generate deb
|
||||
WORKDIR /
|
||||
COPY control.m4 /
|
||||
COPY ld.so.conf /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/etc/ld.so.conf.d/1-gcc-12.conf
|
||||
COPY ld.so.conf /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/etc/ld.so.conf.d/1-gcc-${GCC_MAJOR_VERSION}.conf
|
||||
|
||||
RUN mkdir /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/DEBIAN \
|
||||
&& m4 -P -DUBUNTU_VERSION=$UBUNTU_VERSION -DVERSION=$GCC_VERSION-$BUILD_VERSION -DTARGETARCH=$TARGETARCH control.m4 > /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/DEBIAN/control \
|
||||
&& dpkg-deb --build --root-owner-group /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION /gcc12.deb
|
||||
&& m4 \
|
||||
-P \
|
||||
-DUBUNTU_VERSION=$UBUNTU_VERSION \
|
||||
-DVERSION=$GCC_VERSION-$BUILD_VERSION \
|
||||
-DTARGETARCH=$TARGETARCH \
|
||||
control.m4 > /gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION/DEBIAN/control \
|
||||
&& dpkg-deb \
|
||||
--build \
|
||||
--root-owner-group \
|
||||
/gcc-$GCC_VERSION-$BUILD_VERSION-ubuntu-$UBUNTU_VERSION \
|
||||
/gcc${GCC_MAJOR_VERSION}.deb
|
||||
|
||||
# Create final image
|
||||
FROM ubuntu:20.04
|
||||
COPY --from=build /gcc12.deb /
|
||||
FROM ubuntu:$UBUNTU_VERSION
|
||||
|
||||
# Make gcc-12 available but also leave gcc12.deb for others to copy if needed
|
||||
ARG GCC_MAJOR_VERSION
|
||||
|
||||
COPY --from=build /gcc${GCC_MAJOR_VERSION}.deb /
|
||||
|
||||
# Install gcc-${GCC_MAJOR_VERSION}, but also leave gcc${GCC_MAJOR_VERSION}.deb for others to copy if needed
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends --no-install-suggests \
|
||||
binutils \
|
||||
libc6-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& dpkg -i /gcc12.deb
|
||||
&& dpkg -i /gcc${GCC_MAJOR_VERSION}.deb
|
||||
|
||||
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-12 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-12 100
|
||||
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_MAJOR_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_MAJOR_VERSION} 100
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# gcc compiler
|
||||
# GCC compiler
|
||||
|
||||
This image contains gcc compiler to build <https://github.com/XRPLF/clio>.
|
||||
This image contains GCC compiler to build <https://github.com/XRPLF/clio>.
|
||||
|
||||
@@ -2,5 +2,6 @@ Package: gcc-12-ubuntu-UBUNTUVERSION
|
||||
Version: VERSION
|
||||
Architecture: TARGETARCH
|
||||
Maintainer: Alex Kremer <akremer@ripple.com>
|
||||
Description: Gcc VERSION build for ubuntu UBUNTUVERSION
|
||||
Uploaders: Ayaz Salikhov <asalikhov@ripple.com>
|
||||
Description: GCC VERSION build for ubuntu UBUNTUVERSION
|
||||
Depends: binutils, libc6-dev
|
||||
|
||||
@@ -2,7 +2,7 @@ services:
|
||||
clio_develop:
|
||||
image: ghcr.io/xrplf/clio-ci:latest
|
||||
volumes:
|
||||
- clio_develop_conan_data:/root/.conan/data
|
||||
- clio_develop_conan_data:/root/.conan2/p
|
||||
- clio_develop_ccache:/root/.ccache
|
||||
- ../../:/root/clio
|
||||
- clio_develop_build:/root/clio/build_docker
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
### Conan Configuration
|
||||
|
||||
Clio requires `compiler.cppstd=20` in your Conan profile (`~/.conan/profiles/default`).
|
||||
Clio requires `compiler.cppstd=20` in your Conan profile (`~/.conan2/profiles/default`).
|
||||
|
||||
> [!NOTE]
|
||||
> Although Clio is built using C++23, it's required to set `compiler.cppstd=20` for the time being as some of Clio's dependencies are not yet capable of building under C++23.
|
||||
@@ -28,7 +28,7 @@ Clio requires `compiler.cppstd=20` in your Conan profile (`~/.conan/profiles/def
|
||||
|
||||
```text
|
||||
[settings]
|
||||
arch=armv8
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=apple-clang
|
||||
compiler.cppstd=20
|
||||
@@ -37,14 +37,14 @@ compiler.version=16
|
||||
os=Macos
|
||||
|
||||
[conf]
|
||||
tools.build:cxxflags+=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
grpc/1.50.1:tools.build:cxxflags+=["-Wno-missing-template-arg-list-after-template-kw"]
|
||||
```
|
||||
|
||||
**Linux gcc-12 example**:
|
||||
|
||||
```text
|
||||
[settings]
|
||||
arch=x86_64
|
||||
arch={{detect_api.detect_arch()}}
|
||||
build_type=Release
|
||||
compiler=gcc
|
||||
compiler.cppstd=20
|
||||
@@ -56,6 +56,12 @@ os=Linux
|
||||
tools.build:compiler_executables={'c': '/usr/bin/gcc-12', 'cpp': '/usr/bin/g++-12'}
|
||||
```
|
||||
|
||||
Add the following to the `~/.conan2/global.conf` file:
|
||||
|
||||
```text
|
||||
tools.info.package_id:confs = ["tools.build:cflags", "tools.build:cxxflags", "tools.build:exelinkflags", "tools.build:sharedlinkflags"]
|
||||
```
|
||||
|
||||
#### Artifactory
|
||||
|
||||
Make sure artifactory is setup with Conan.
|
||||
@@ -67,12 +73,27 @@ conan remote add --index 0 ripple http://18.143.149.228:8081/artifactory/api/con
|
||||
Now you should be able to download the prebuilt `xrpl` package on some platforms.
|
||||
|
||||
> [!NOTE]
|
||||
> You may need to edit the `~/.conan/remotes.json` file to ensure that this newly added artifactory is listed last. Otherwise, you could see compilation errors when building the project with gcc version 13 (or newer).
|
||||
> You may need to edit the `~/.conan2/remotes.json` file to ensure that this newly added artifactory is listed last. Otherwise, you could see compilation errors when building the project with gcc version 13 (or newer).
|
||||
|
||||
Remove old packages you may have cached.
|
||||
Remove old packages you may have cached interactively.
|
||||
|
||||
```sh
|
||||
conan remove -f xrpl
|
||||
conan remove xrpl
|
||||
```
|
||||
|
||||
#### Conan lockfile
|
||||
|
||||
To achieve reproducible dependencies, we use [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html).
|
||||
|
||||
The `conan.lock` file in the repository contains a "snapshot" of the current dependencies.
|
||||
It is implicitly used when running `conan` commands, you don't need to specify it.
|
||||
|
||||
You have to update this file every time you add a new dependency or change a revision or version of an existing dependency.
|
||||
|
||||
To do that, run the following command in the repository root:
|
||||
|
||||
```bash
|
||||
conan lock create . -o '&:tests=True' -o '&:benchmark=True'
|
||||
```
|
||||
|
||||
## Building Clio
|
||||
@@ -152,24 +173,24 @@ If you wish to develop against a `rippled` instance running in standalone mode t
|
||||
|
||||
Sometimes, during development, you need to build against a custom version of `libxrpl`. (For example, you may be developing compatibility for a proposed amendment that is not yet merged to the main `rippled` codebase.) To build Clio with compatibility for a custom fork or branch of `rippled`, follow these steps:
|
||||
|
||||
1. First, pull/clone the appropriate `rippled` fork and switch to the branch you want to build.
|
||||
The following example uses an in-development build with [XLS-33d Multi-Purpose Tokens](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0033d-multi-purpose-tokens):
|
||||
1. First, pull/clone the appropriate `rippled` version and switch to the branch you want to build.
|
||||
The following example uses a `2.5.0-rc1` tag of rippled in the main branch:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/shawnxie999/rippled/
|
||||
git clone https://github.com/XRPLF/rippled/
|
||||
cd rippled
|
||||
git switch mpt-1.1
|
||||
git checkout 2.5.0-rc1
|
||||
```
|
||||
|
||||
2. Export a custom package to your local Conan store using a user/channel:
|
||||
|
||||
```sh
|
||||
conan export . my/feature
|
||||
conan export . --user=my --channel=feature
|
||||
```
|
||||
|
||||
3. Patch your local Clio build to use the right package.
|
||||
|
||||
Edit `conanfile.py` (from the Clio repository root). Replace the `xrpl` requirement with the custom package version from the previous step. This must also include the current version number from your `rippled` branch. For example:
|
||||
Edit `conanfile.py` in the Clio repository root. Replace the `xrpl` requirement with the custom package version from the previous step. This must also include the current version number from your `rippled` branch. For example:
|
||||
|
||||
```py
|
||||
# ... (excerpt from conanfile.py)
|
||||
@@ -180,7 +201,7 @@ Sometimes, during development, you need to build against a custom version of `li
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1v',
|
||||
'xrpl/2.3.0-b1@my/feature', # Update this line
|
||||
'xrpl/2.5.0-rc1@my/feature', # Use your exported version here
|
||||
'zlib/1.3.1',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/CacheLoader.hpp"
|
||||
#include "etl/CorruptionDetector.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/LoadBalancer.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
@@ -38,6 +37,7 @@
|
||||
#include "etlng/LoadBalancer.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/impl/LedgerPublisher.hpp"
|
||||
#include "etlng/impl/MonitorProvider.hpp"
|
||||
#include "etlng/impl/TaskManagerProvider.hpp"
|
||||
#include "etlng/impl/ext/Cache.hpp"
|
||||
#include "etlng/impl/ext/Core.hpp"
|
||||
@@ -86,6 +86,7 @@ ETLService::makeETLService(
|
||||
);
|
||||
|
||||
auto state = std::make_shared<etl::SystemState>();
|
||||
state->isStrictReadonly = config.get<bool>("read_only");
|
||||
|
||||
auto fetcher = std::make_shared<etl::impl::LedgerFetcher>(backend, balancer);
|
||||
auto extractor = std::make_shared<etlng::impl::Extractor>(fetcher);
|
||||
@@ -93,6 +94,9 @@ ETLService::makeETLService(
|
||||
auto cacheLoader = std::make_shared<etl::CacheLoader<>>(config, backend, backend->cache());
|
||||
auto cacheUpdater = std::make_shared<etlng::impl::CacheUpdater>(backend->cache());
|
||||
auto amendmentBlockHandler = std::make_shared<etlng::impl::AmendmentBlockHandler>(ctx, *state);
|
||||
auto monitorProvider = std::make_shared<etlng::impl::MonitorProvider>();
|
||||
|
||||
backend->setCorruptionDetector(CorruptionDetector{*state, backend->cache()});
|
||||
|
||||
auto loader = std::make_shared<etlng::impl::Loader>(
|
||||
backend,
|
||||
@@ -104,7 +108,8 @@ ETLService::makeETLService(
|
||||
etlng::impl::NFTExt{backend},
|
||||
etlng::impl::MPTExt{backend}
|
||||
),
|
||||
amendmentBlockHandler
|
||||
amendmentBlockHandler,
|
||||
state
|
||||
);
|
||||
|
||||
auto taskManagerProvider = std::make_shared<etlng::impl::TaskManagerProvider>(*ledgers, extractor, loader);
|
||||
@@ -122,6 +127,7 @@ ETLService::makeETLService(
|
||||
loader, // loader itself
|
||||
loader, // initial load observer
|
||||
taskManagerProvider,
|
||||
monitorProvider,
|
||||
state
|
||||
);
|
||||
} else {
|
||||
@@ -346,7 +352,7 @@ ETLService::doWork()
|
||||
worker_ = std::thread([this]() {
|
||||
beast::setCurrentThreadName("ETLService worker");
|
||||
|
||||
if (state_.isReadOnly) {
|
||||
if (state_.isStrictReadonly) {
|
||||
monitorReadOnly();
|
||||
} else {
|
||||
monitor();
|
||||
@@ -373,7 +379,7 @@ ETLService::ETLService(
|
||||
{
|
||||
startSequence_ = config.maybeValue<uint32_t>("start_sequence");
|
||||
finishSequence_ = config.maybeValue<uint32_t>("finish_sequence");
|
||||
state_.isReadOnly = config.get<bool>("read_only");
|
||||
state_.isStrictReadonly = config.get<bool>("read_only");
|
||||
extractorThreads_ = config.get<uint32_t>("extractor_threads");
|
||||
|
||||
// This should probably be done in the backend factory but we don't have state available until here
|
||||
|
||||
@@ -239,7 +239,7 @@ public:
|
||||
|
||||
result["etl_sources"] = loadBalancer_->toJson();
|
||||
result["is_writer"] = static_cast<int>(state_.isWriting);
|
||||
result["read_only"] = static_cast<int>(state_.isReadOnly);
|
||||
result["read_only"] = static_cast<int>(state_.isStrictReadonly);
|
||||
auto last = ledgerPublisher_.getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(ledgerPublisher_.lastPublishAgeSeconds());
|
||||
|
||||
@@ -177,14 +177,14 @@ public:
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded or the download is cancelled.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param observer The observer to notify of progress
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
* @return A std::expected with ledger edge keys on success, or InitialLedgerLoadError on failure
|
||||
*/
|
||||
std::vector<std::string>
|
||||
etlng::InitialLedgerLoadResult
|
||||
loadInitialLedger(
|
||||
[[maybe_unused]] uint32_t sequence,
|
||||
[[maybe_unused]] etlng::InitialLoadObserverInterface& observer,
|
||||
|
||||
@@ -37,7 +37,7 @@ struct SystemState {
|
||||
* In strict read-only mode, the process will never attempt to become the ETL writer, and will only publish ledgers
|
||||
* as they are written to the database.
|
||||
*/
|
||||
util::prometheus::Bool isReadOnly = PrometheusService::boolMetric(
|
||||
util::prometheus::Bool isStrictReadonly = PrometheusService::boolMetric(
|
||||
"read_only",
|
||||
util::prometheus::Labels{},
|
||||
"Whether the process is in strict read-only mode"
|
||||
|
||||
@@ -242,8 +242,8 @@ public:
|
||||
}
|
||||
|
||||
prev = cur->key;
|
||||
static constexpr std::size_t kLOG_INTERVAL = 100000;
|
||||
if (numWrites % kLOG_INTERVAL == 0 && numWrites != 0)
|
||||
static constexpr std::size_t kLOG_STRIDE = 100000;
|
||||
if (numWrites % kLOG_STRIDE == 0 && numWrites != 0)
|
||||
LOG(log_.info()) << "Wrote " << numWrites << " book successors";
|
||||
}
|
||||
|
||||
|
||||
@@ -35,13 +35,13 @@
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "etlng/MonitorProviderInterface.hpp"
|
||||
#include "etlng/TaskManagerProviderInterface.hpp"
|
||||
#include "etlng/impl/AmendmentBlockHandler.hpp"
|
||||
#include "etlng/impl/CacheUpdater.hpp"
|
||||
#include "etlng/impl/Extraction.hpp"
|
||||
#include "etlng/impl/LedgerPublisher.hpp"
|
||||
#include "etlng/impl/Loading.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "etlng/impl/Registry.hpp"
|
||||
#include "etlng/impl/Scheduling.hpp"
|
||||
#include "etlng/impl/TaskManager.hpp"
|
||||
@@ -57,6 +57,7 @@
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
@@ -82,6 +83,7 @@ ETLService::ETLService(
|
||||
std::shared_ptr<LoaderInterface> loader,
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver,
|
||||
std::shared_ptr<etlng::TaskManagerProviderInterface> taskManagerProvider,
|
||||
std::shared_ptr<etlng::MonitorProviderInterface> monitorProvider,
|
||||
std::shared_ptr<etl::SystemState> state
|
||||
)
|
||||
: ctx_(std::move(ctx))
|
||||
@@ -96,9 +98,20 @@ ETLService::ETLService(
|
||||
, loader_(std::move(loader))
|
||||
, initialLoadObserver_(std::move(initialLoadObserver))
|
||||
, taskManagerProvider_(std::move(taskManagerProvider))
|
||||
, monitorProvider_(std::move(monitorProvider))
|
||||
, state_(std::move(state))
|
||||
, startSequence_(config.get().maybeValue<uint32_t>("start_sequence"))
|
||||
, finishSequence_(config.get().maybeValue<uint32_t>("finish_sequence"))
|
||||
{
|
||||
LOG(log_.info()) << "Creating ETLng...";
|
||||
ASSERT(not state_->isWriting, "ETL should never start in writer mode");
|
||||
|
||||
if (startSequence_.has_value())
|
||||
LOG(log_.info()) << "Start sequence: " << *startSequence_;
|
||||
|
||||
if (finishSequence_.has_value())
|
||||
LOG(log_.info()) << "Finish sequence: " << *finishSequence_;
|
||||
|
||||
LOG(log_.info()) << "Starting in " << (state_->isStrictReadonly ? "STRICT READONLY MODE" : "WRITE MODE");
|
||||
}
|
||||
|
||||
ETLService::~ETLService()
|
||||
@@ -112,12 +125,7 @@ ETLService::run()
|
||||
{
|
||||
LOG(log_.info()) << "Running ETLng...";
|
||||
|
||||
// TODO: write-enabled node should start in readonly and do the 10 second dance to become a writer
|
||||
mainLoop_.emplace(ctx_.execute([this] {
|
||||
state_->isWriting =
|
||||
not state_->isReadOnly; // TODO: this is now needed because we don't have a mechanism for readonly or
|
||||
// ETL writer node. remove later in favor of real mechanism
|
||||
|
||||
auto const rng = loadInitialLedgerIfNeeded();
|
||||
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
@@ -129,15 +137,18 @@ ETLService::run()
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||
if (not rng.has_value()) {
|
||||
LOG(log_.warn()) << "Initial ledger download got cancelled - stopping ETL service";
|
||||
return;
|
||||
}
|
||||
|
||||
auto const nextSequence = rng->maxSequence + 1;
|
||||
|
||||
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
||||
startMonitor(nextSequence);
|
||||
|
||||
// TODO: we only want to run the full ETL task man if we are POSSIBLY a write node
|
||||
// but definitely not in strict readonly
|
||||
if (not state_->isReadOnly)
|
||||
// If we are a writer as the result of loading the initial ledger - start loading
|
||||
if (state_->isWriting)
|
||||
startLoading(nextSequence);
|
||||
}));
|
||||
}
|
||||
@@ -147,6 +158,8 @@ ETLService::stop()
|
||||
{
|
||||
LOG(log_.info()) << "Stop called";
|
||||
|
||||
if (mainLoop_)
|
||||
mainLoop_->wait();
|
||||
if (taskMan_)
|
||||
taskMan_->stop();
|
||||
if (monitor_)
|
||||
@@ -160,7 +173,7 @@ ETLService::getInfo() const
|
||||
|
||||
result["etl_sources"] = balancer_->toJson();
|
||||
result["is_writer"] = static_cast<int>(state_->isWriting);
|
||||
result["read_only"] = static_cast<int>(state_->isReadOnly);
|
||||
result["read_only"] = static_cast<int>(state_->isStrictReadonly);
|
||||
auto last = publisher_->getLastPublish();
|
||||
if (last.time_since_epoch().count() != 0)
|
||||
result["last_publish_age_seconds"] = std::to_string(publisher_->lastPublishAgeSeconds());
|
||||
@@ -196,21 +209,40 @@ ETLService::loadInitialLedgerIfNeeded()
|
||||
{
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (not rng.has_value()) {
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
ASSERT(
|
||||
not state_->isStrictReadonly,
|
||||
"Database is empty but this node is in strict readonly mode. Can't write initial ledger."
|
||||
);
|
||||
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
if (auto const mostRecentValidated = ledgers_->getMostRecent(); mostRecentValidated.has_value()) {
|
||||
auto const seq = *mostRecentValidated;
|
||||
LOG(log_.info()) << "Ledger " << seq << " has been validated. Downloading... ";
|
||||
LOG(log_.info()) << "Database is empty. Will download a ledger from the network.";
|
||||
state_->isWriting = true; // immediately become writer as the db is empty
|
||||
|
||||
auto const getMostRecent = [this]() {
|
||||
LOG(log_.info()) << "Waiting for next ledger to be validated by network...";
|
||||
return ledgers_->getMostRecent();
|
||||
};
|
||||
|
||||
if (auto const maybeSeq = startSequence_.or_else(getMostRecent); maybeSeq.has_value()) {
|
||||
auto const seq = *maybeSeq;
|
||||
LOG(log_.info()) << "Starting from sequence " << seq
|
||||
<< ". Initial ledger download and extraction can take a while...";
|
||||
|
||||
auto [ledger, timeDiff] = ::util::timed<std::chrono::duration<double>>([this, seq]() {
|
||||
return extractor_->extractLedgerOnly(seq).and_then([this, seq](auto&& data) {
|
||||
// TODO: loadInitialLedger in balancer should be called fetchEdgeKeys or similar
|
||||
data.edgeKeys = balancer_->loadInitialLedger(seq, *initialLoadObserver_);
|
||||
return extractor_->extractLedgerOnly(seq).and_then(
|
||||
[this, seq](auto&& data) -> std::optional<ripple::LedgerHeader> {
|
||||
// TODO: loadInitialLedger in balancer should be called fetchEdgeKeys or similar
|
||||
auto res = balancer_->loadInitialLedger(seq, *initialLoadObserver_);
|
||||
if (not res.has_value() and res.error() == InitialLedgerLoadError::Cancelled) {
|
||||
LOG(log_.debug()) << "Initial ledger load got cancelled";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// TODO: this should be interruptible for graceful shutdown
|
||||
return loader_->loadInitialLedger(data);
|
||||
});
|
||||
ASSERT(res.has_value(), "Initial ledger retry logic failed");
|
||||
data.edgeKeys = std::move(res).value();
|
||||
|
||||
return loader_->loadInitialLedger(data);
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
if (not ledger.has_value()) {
|
||||
@@ -238,28 +270,64 @@ ETLService::loadInitialLedgerIfNeeded()
|
||||
void
|
||||
ETLService::startMonitor(uint32_t seq)
|
||||
{
|
||||
monitor_ = std::make_unique<impl::Monitor>(ctx_, backend_, ledgers_, seq);
|
||||
monitorSubscription_ = monitor_->subscribe([this](uint32_t seq) {
|
||||
log_.info() << "MONITOR got new seq from db: " << seq;
|
||||
monitor_ = monitorProvider_->make(ctx_, backend_, ledgers_, seq);
|
||||
|
||||
monitorNewSeqSubscription_ = monitor_->subscribeToNewSequence([this](uint32_t seq) {
|
||||
LOG(log_.info()) << "ETLService (via Monitor) got new seq from db: " << seq;
|
||||
|
||||
if (state_->writeConflict) {
|
||||
LOG(log_.info()) << "Got a write conflict; Giving up writer seat immediately";
|
||||
giveUpWriter();
|
||||
}
|
||||
|
||||
// FIXME: is this the best way?
|
||||
if (not state_->isWriting) {
|
||||
auto const diff = data::synchronousAndRetryOnTimeout([this, seq](auto yield) {
|
||||
return backend_->fetchLedgerDiff(seq, yield);
|
||||
});
|
||||
|
||||
cacheUpdater_->update(seq, diff);
|
||||
backend_->updateRange(seq);
|
||||
}
|
||||
|
||||
publisher_->publish(seq, {});
|
||||
});
|
||||
|
||||
monitorDbStalledSubscription_ = monitor_->subscribeToDbStalled([this]() {
|
||||
LOG(log_.warn()) << "ETLService received DbStalled signal from Monitor";
|
||||
if (not state_->isStrictReadonly and not state_->isWriting)
|
||||
attemptTakeoverWriter();
|
||||
});
|
||||
|
||||
monitor_->run();
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::startLoading(uint32_t seq)
|
||||
{
|
||||
taskMan_ = taskManagerProvider_->make(ctx_, *monitor_, seq);
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
taskMan_ = taskManagerProvider_->make(ctx_, *monitor_, seq, finishSequence_);
|
||||
taskMan_->run(config_.get().get<std::size_t>("extractor_threads"));
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::attemptTakeoverWriter()
|
||||
{
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||
|
||||
state_->isWriting = true; // switch to writer
|
||||
LOG(log_.info()) << "Taking over the ETL writer seat";
|
||||
startLoading(rng->maxSequence + 1);
|
||||
}
|
||||
|
||||
void
|
||||
ETLService::giveUpWriter()
|
||||
{
|
||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||
state_->isWriting = false;
|
||||
state_->writeConflict = false;
|
||||
taskMan_ = nullptr;
|
||||
}
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "etlng/MonitorProviderInterface.hpp"
|
||||
#include "etlng/TaskManagerInterface.hpp"
|
||||
#include "etlng/TaskManagerProviderInterface.hpp"
|
||||
#include "etlng/impl/AmendmentBlockHandler.hpp"
|
||||
@@ -42,7 +43,6 @@
|
||||
#include "etlng/impl/Extraction.hpp"
|
||||
#include "etlng/impl/LedgerPublisher.hpp"
|
||||
#include "etlng/impl/Loading.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "etlng/impl/Registry.hpp"
|
||||
#include "etlng/impl/Scheduling.hpp"
|
||||
#include "etlng/impl/TaskManager.hpp"
|
||||
@@ -106,12 +106,17 @@ class ETLService : public ETLServiceInterface {
|
||||
std::shared_ptr<LoaderInterface> loader_;
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver_;
|
||||
std::shared_ptr<etlng::TaskManagerProviderInterface> taskManagerProvider_;
|
||||
std::shared_ptr<etlng::MonitorProviderInterface> monitorProvider_;
|
||||
std::shared_ptr<etl::SystemState> state_;
|
||||
|
||||
std::optional<uint32_t> startSequence_;
|
||||
std::optional<uint32_t> finishSequence_;
|
||||
|
||||
std::unique_ptr<MonitorInterface> monitor_;
|
||||
std::unique_ptr<TaskManagerInterface> taskMan_;
|
||||
|
||||
boost::signals2::scoped_connection monitorSubscription_;
|
||||
boost::signals2::scoped_connection monitorNewSeqSubscription_;
|
||||
boost::signals2::scoped_connection monitorDbStalledSubscription_;
|
||||
|
||||
std::optional<util::async::AnyOperation<void>> mainLoop_;
|
||||
|
||||
@@ -131,6 +136,7 @@ public:
|
||||
* @param loader Interface for loading data
|
||||
* @param initialLoadObserver The observer for initial data loading
|
||||
* @param taskManagerProvider The provider of the task manager instance
|
||||
* @param monitorProvider The provider of the monitor instance
|
||||
* @param state System state tracking object
|
||||
*/
|
||||
ETLService(
|
||||
@@ -146,6 +152,7 @@ public:
|
||||
std::shared_ptr<LoaderInterface> loader,
|
||||
std::shared_ptr<InitialLoadObserverInterface> initialLoadObserver,
|
||||
std::shared_ptr<etlng::TaskManagerProviderInterface> taskManagerProvider,
|
||||
std::shared_ptr<etlng::MonitorProviderInterface> monitorProvider,
|
||||
std::shared_ptr<etl::SystemState> state
|
||||
);
|
||||
|
||||
@@ -173,7 +180,6 @@ public:
|
||||
lastCloseAgeSeconds() const override;
|
||||
|
||||
private:
|
||||
// TODO: this better be std::expected
|
||||
std::optional<data::LedgerRange>
|
||||
loadInitialLedgerIfNeeded();
|
||||
|
||||
@@ -182,6 +188,12 @@ private:
|
||||
|
||||
void
|
||||
startLoading(uint32_t seq);
|
||||
|
||||
void
|
||||
attemptTakeoverWriter();
|
||||
|
||||
void
|
||||
giveUpWriter();
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -210,30 +210,32 @@ LoadBalancer::LoadBalancer(
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string>
|
||||
InitialLedgerLoadResult
|
||||
LoadBalancer::loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
etlng::InitialLoadObserverInterface& loadObserver,
|
||||
std::chrono::steady_clock::duration retryAfter
|
||||
)
|
||||
{
|
||||
std::vector<std::string> response;
|
||||
InitialLedgerLoadResult response;
|
||||
|
||||
execute(
|
||||
[this, &response, &sequence, &loadObserver](auto& source) {
|
||||
auto [data, res] = source->loadInitialLedger(sequence, downloadRanges_, loadObserver);
|
||||
auto res = source->loadInitialLedger(sequence, downloadRanges_, loadObserver);
|
||||
|
||||
if (!res) {
|
||||
if (not res.has_value() and res.error() == InitialLedgerLoadError::Errored) {
|
||||
LOG(log_.error()) << "Failed to download initial ledger."
|
||||
<< " Sequence = " << sequence << " source = " << source->toString();
|
||||
} else {
|
||||
response = std::move(data);
|
||||
return false; // should retry on error
|
||||
}
|
||||
|
||||
return res;
|
||||
response = std::move(res); // cancelled or data received
|
||||
return true;
|
||||
},
|
||||
sequence,
|
||||
retryAfter
|
||||
);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
@@ -49,6 +49,7 @@
|
||||
#include <concepts>
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
@@ -183,14 +184,14 @@ public:
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded or the download is cancelled.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param observer The observer to notify of progress
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
* @return A std::expected with ledger edge keys on success, or InitialLedgerLoadError on failure
|
||||
*/
|
||||
std::vector<std::string>
|
||||
InitialLedgerLoadResult
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
etlng::InitialLoadObserverInterface& observer,
|
||||
|
||||
@@ -39,6 +39,20 @@
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief Represents possible errors for initial ledger load
|
||||
*/
|
||||
enum class InitialLedgerLoadError {
|
||||
Cancelled, /*< Indicating the initial load got cancelled by user */
|
||||
Errored, /*< Indicating some error happened during initial ledger load */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief The result type of the initial ledger load
|
||||
* @note The successful value represents edge keys
|
||||
*/
|
||||
using InitialLedgerLoadResult = std::expected<std::vector<std::string>, InitialLedgerLoadError>;
|
||||
|
||||
/**
|
||||
* @brief An interface for LoadBalancer
|
||||
*/
|
||||
@@ -52,14 +66,14 @@ public:
|
||||
|
||||
/**
|
||||
* @brief Load the initial ledger, writing data to the queue.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded.
|
||||
* @note This function will retry indefinitely until the ledger is downloaded or the download is cancelled.
|
||||
*
|
||||
* @param sequence Sequence of ledger to download
|
||||
* @param loader InitialLoadObserverInterface implementation
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
* @return A std::expected with ledger edge keys on success, or InitialLedgerLoadError on failure
|
||||
*/
|
||||
virtual std::vector<std::string>
|
||||
[[nodiscard]] virtual InitialLedgerLoadResult
|
||||
loadInitialLedger(
|
||||
uint32_t sequence,
|
||||
etlng::InitialLoadObserverInterface& loader,
|
||||
@@ -74,7 +88,7 @@ public:
|
||||
* @param retryAfter Time to wait between retries (2 seconds by default)
|
||||
* @return A std::vector<std::string> The ledger data
|
||||
*/
|
||||
virtual std::vector<std::string>
|
||||
[[nodiscard]] virtual std::vector<std::string>
|
||||
loadInitialLedger(uint32_t sequence, std::chrono::steady_clock::duration retryAfter = std::chrono::seconds{2}) = 0;
|
||||
|
||||
/**
|
||||
@@ -90,7 +104,7 @@ public:
|
||||
* @return The extracted data, if extraction was successful. If the ledger was found
|
||||
* in the database or the server is shutting down, the optional will be empty
|
||||
*/
|
||||
virtual OptionalGetLedgerResponseType
|
||||
[[nodiscard]] virtual OptionalGetLedgerResponseType
|
||||
fetchLedger(
|
||||
uint32_t ledgerSequence,
|
||||
bool getObjects,
|
||||
@@ -103,7 +117,7 @@ public:
|
||||
*
|
||||
* @return JSON representation of the state of this load balancer.
|
||||
*/
|
||||
virtual boost::json::value
|
||||
[[nodiscard]] virtual boost::json::value
|
||||
toJson() const = 0;
|
||||
|
||||
/**
|
||||
@@ -115,7 +129,7 @@ public:
|
||||
* @param yield The coroutine context
|
||||
* @return Response received from rippled node as JSON object on success or error on failure
|
||||
*/
|
||||
virtual std::expected<boost::json::object, rpc::CombinedError>
|
||||
[[nodiscard]] virtual std::expected<boost::json::object, rpc::CombinedError>
|
||||
forwardToRippled(
|
||||
boost::json::object const& request,
|
||||
std::optional<std::string> const& clientIp,
|
||||
@@ -127,7 +141,7 @@ public:
|
||||
* @brief Return state of ETL nodes.
|
||||
* @return ETL state, nullopt if etl nodes not available
|
||||
*/
|
||||
virtual std::optional<etl::ETLState>
|
||||
[[nodiscard]] virtual std::optional<etl::ETLState>
|
||||
getETLState() noexcept = 0;
|
||||
|
||||
/**
|
||||
|
||||
@@ -23,10 +23,19 @@
|
||||
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
|
||||
#include <expected>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief Enumeration of possible errors that can occur during loading operations
|
||||
*/
|
||||
enum class LoaderError {
|
||||
AmendmentBlocked, /*< Error indicating that an operation is blocked by an amendment */
|
||||
WriteConflict, /*< Error indicating that a write operation resulted in a conflict */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief An interface for a ETL Loader
|
||||
*/
|
||||
@@ -36,8 +45,9 @@ struct LoaderInterface {
|
||||
/**
|
||||
* @brief Load ledger data
|
||||
* @param data The data to load
|
||||
* @return Nothing or error as std::expected
|
||||
*/
|
||||
virtual void
|
||||
[[nodiscard]] virtual std::expected<void, LoaderError>
|
||||
load(model::LedgerData const& data) = 0;
|
||||
|
||||
/**
|
||||
|
||||
@@ -36,7 +36,8 @@ namespace etlng {
|
||||
class MonitorInterface {
|
||||
public:
|
||||
static constexpr auto kDEFAULT_REPEAT_INTERVAL = std::chrono::seconds{1};
|
||||
using SignalType = boost::signals2::signal<void(uint32_t)>;
|
||||
using NewSequenceSignalType = boost::signals2::signal<void(uint32_t)>;
|
||||
using DbStalledSignalType = boost::signals2::signal<void()>;
|
||||
|
||||
virtual ~MonitorInterface() = default;
|
||||
|
||||
@@ -45,7 +46,14 @@ public:
|
||||
* @param seq The ledger sequence loaded
|
||||
*/
|
||||
virtual void
|
||||
notifyLedgerLoaded(uint32_t seq) = 0;
|
||||
notifySequenceLoaded(uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Notifies the monitor of a write conflict
|
||||
* @param seq The sequence number of the ledger that encountered a write conflict
|
||||
*/
|
||||
virtual void
|
||||
notifyWriteConflict(uint32_t seq) = 0;
|
||||
|
||||
/**
|
||||
* @brief Allows clients to get notified when a new ledger becomes available in Clio's database
|
||||
@@ -54,7 +62,16 @@ public:
|
||||
* @return A connection object that automatically disconnects the subscription once destroyed
|
||||
*/
|
||||
[[nodiscard]] virtual boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) = 0;
|
||||
subscribeToNewSequence(NewSequenceSignalType::slot_type const& subscriber) = 0;
|
||||
|
||||
/**
|
||||
* @brief Allows clients to get notified when no database update is detected for a configured period.
|
||||
*
|
||||
* @param subscriber The slot to connect
|
||||
* @return A connection object that automatically disconnects the subscription once destroyed
|
||||
*/
|
||||
[[nodiscard]] virtual boost::signals2::scoped_connection
|
||||
subscribeToDbStalled(DbStalledSignalType::slot_type const& subscriber) = 0;
|
||||
|
||||
/**
|
||||
* @brief Run the monitor service
|
||||
|
||||
64
src/etlng/MonitorProviderInterface.hpp
Normal file
64
src/etlng/MonitorProviderInterface.hpp
Normal file
@@ -0,0 +1,64 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
/**
|
||||
* @brief An interface for providing Monitor instances
|
||||
*/
|
||||
struct MonitorProviderInterface {
|
||||
/**
|
||||
* @brief The time Monitor should wait before reporting absence of updates to the database
|
||||
*/
|
||||
static constexpr auto kDEFAULT_DB_STALLED_REPORT_DELAY = std::chrono::seconds{10};
|
||||
|
||||
virtual ~MonitorProviderInterface() = default;
|
||||
|
||||
/**
|
||||
* @brief Create a new Monitor instance
|
||||
*
|
||||
* @param ctx The execution context for asynchronous operations
|
||||
* @param backend Interface to the backend database
|
||||
* @param validatedLedgers Interface for accessing network validated ledgers
|
||||
* @param startSequence The sequence number to start monitoring from
|
||||
* @param dbStalledReportDelay The timeout duration after which to signal no database updates
|
||||
* @return A unique pointer to a Monitor implementation
|
||||
*/
|
||||
[[nodiscard]] virtual std::unique_ptr<MonitorInterface>
|
||||
make(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence,
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay = kDEFAULT_DB_STALLED_REPORT_DELAY
|
||||
) = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
@@ -20,8 +20,8 @@
|
||||
#include "etlng/Source.hpp"
|
||||
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/impl/ForwardingSource.hpp"
|
||||
#include "etl/impl/SubscriptionSource.hpp"
|
||||
#include "etlng/impl/ForwardingSource.hpp"
|
||||
#include "etlng/impl/GrpcSource.hpp"
|
||||
#include "etlng/impl/SourceImpl.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
@@ -52,7 +52,7 @@ makeSource(
|
||||
auto const wsPort = config.get<std::string>("ws_port");
|
||||
auto const grpcPort = config.get<std::string>("grpc_port");
|
||||
|
||||
etl::impl::ForwardingSource forwardingSource{ip, wsPort, forwardingTimeout};
|
||||
etlng::impl::ForwardingSource forwardingSource{ip, wsPort, forwardingTimeout};
|
||||
impl::GrpcSource grpcSource{ip, grpcPort};
|
||||
auto subscriptionSource = std::make_unique<etl::impl::SubscriptionSource>(
|
||||
ioc,
|
||||
|
||||
@@ -19,9 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
#include "util/config/ObjectView.hpp"
|
||||
@@ -131,7 +131,7 @@ public:
|
||||
* @param loader InitialLoadObserverInterface implementation
|
||||
* @return A std::pair of the data and a bool indicating whether the download was successful
|
||||
*/
|
||||
virtual std::pair<std::vector<std::string>, bool>
|
||||
virtual InitialLedgerLoadResult
|
||||
loadInitialLedger(uint32_t sequence, std::uint32_t numMarkers, etlng::InitialLoadObserverInterface& loader) = 0;
|
||||
|
||||
/**
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
namespace etlng {
|
||||
|
||||
@@ -41,11 +42,17 @@ struct TaskManagerProviderInterface {
|
||||
*
|
||||
* @param ctx The async context to associate the task manager instance with
|
||||
* @param monitor The monitor to notify when ledger is loaded
|
||||
* @param seq The sequence to start at
|
||||
* @param startSeq The sequence to start at
|
||||
* @param finishSeq The sequence to stop at if specified
|
||||
* @return A unique pointer to a TaskManager implementation
|
||||
*/
|
||||
virtual std::unique_ptr<TaskManagerInterface>
|
||||
make(util::async::AnyExecutionContext ctx, std::reference_wrapper<MonitorInterface> monitor, uint32_t seq) = 0;
|
||||
[[nodiscard]] virtual std::unique_ptr<TaskManagerInterface>
|
||||
make(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::reference_wrapper<MonitorInterface> monitor,
|
||||
uint32_t startSeq,
|
||||
std::optional<uint32_t> finishSeq = std::nullopt
|
||||
) = 0;
|
||||
};
|
||||
|
||||
} // namespace etlng
|
||||
|
||||
@@ -20,11 +20,13 @@
|
||||
#include "etlng/impl/GrpcSource.hpp"
|
||||
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/impl/AsyncGrpcCall.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
#include "web/Resolver.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/security/credentials.h>
|
||||
@@ -33,9 +35,12 @@
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <expected>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
@@ -60,6 +65,7 @@ namespace etlng::impl {
|
||||
|
||||
GrpcSource::GrpcSource(std::string const& ip, std::string const& grpcPort)
|
||||
: log_(fmt::format("ETL_Grpc[{}:{}]", ip, grpcPort))
|
||||
, initialLoadShouldStop_(std::make_unique<std::atomic_bool>(false))
|
||||
{
|
||||
try {
|
||||
grpc::ChannelArguments chArgs;
|
||||
@@ -103,15 +109,18 @@ GrpcSource::fetchLedger(uint32_t sequence, bool getObjects, bool getObjectNeighb
|
||||
return {status, std::move(response)};
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
InitialLedgerLoadResult
|
||||
GrpcSource::loadInitialLedger(
|
||||
uint32_t const sequence,
|
||||
uint32_t const numMarkers,
|
||||
etlng::InitialLoadObserverInterface& observer
|
||||
)
|
||||
{
|
||||
if (*initialLoadShouldStop_)
|
||||
return std::unexpected{InitialLedgerLoadError::Cancelled};
|
||||
|
||||
if (!stub_)
|
||||
return {{}, false};
|
||||
return std::unexpected{InitialLedgerLoadError::Errored};
|
||||
|
||||
std::vector<AsyncGrpcCall> calls = AsyncGrpcCall::makeAsyncCalls(sequence, numMarkers);
|
||||
|
||||
@@ -131,9 +140,9 @@ GrpcSource::loadInitialLedger(
|
||||
ASSERT(tag != nullptr, "Tag can't be null.");
|
||||
auto ptr = static_cast<AsyncGrpcCall*>(tag);
|
||||
|
||||
if (!ok) {
|
||||
LOG(log_.error()) << "loadInitialLedger - ok is false";
|
||||
return {{}, false}; // cancelled
|
||||
if (not ok or *initialLoadShouldStop_) {
|
||||
LOG(log_.error()) << "loadInitialLedger cancelled";
|
||||
return std::unexpected{InitialLedgerLoadError::Cancelled};
|
||||
}
|
||||
|
||||
LOG(log_.trace()) << "Marker prefix = " << ptr->getMarkerPrefix();
|
||||
@@ -151,7 +160,16 @@ GrpcSource::loadInitialLedger(
|
||||
abort = true;
|
||||
}
|
||||
|
||||
return {std::move(edgeKeys), !abort};
|
||||
if (abort)
|
||||
return std::unexpected{InitialLedgerLoadError::Errored};
|
||||
|
||||
return edgeKeys;
|
||||
}
|
||||
|
||||
void
|
||||
GrpcSource::stop(boost::asio::yield_context)
|
||||
{
|
||||
initialLoadShouldStop_->store(true);
|
||||
}
|
||||
|
||||
} // namespace etlng::impl
|
||||
|
||||
@@ -20,23 +20,26 @@
|
||||
#pragma once
|
||||
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <grpcpp/support/status.h>
|
||||
#include <org/xrpl/rpc/v1/get_ledger.pb.h>
|
||||
#include <xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.grpc.pb.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class GrpcSource {
|
||||
util::Logger log_;
|
||||
std::unique_ptr<org::xrpl::rpc::v1::XRPLedgerAPIService::Stub> stub_;
|
||||
std::unique_ptr<std::atomic_bool> initialLoadShouldStop_;
|
||||
|
||||
public:
|
||||
GrpcSource(std::string const& ip, std::string const& grpcPort);
|
||||
@@ -61,10 +64,18 @@ public:
|
||||
* @param sequence Sequence of the ledger to download
|
||||
* @param numMarkers Number of markers to generate for async calls
|
||||
* @param observer InitialLoadObserverInterface implementation
|
||||
* @return A std::pair of the data and a bool indicating whether the download was successful
|
||||
* @return Downloaded data or an indication of error or cancellation
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
InitialLedgerLoadResult
|
||||
loadInitialLedger(uint32_t sequence, uint32_t numMarkers, etlng::InitialLoadObserverInterface& observer);
|
||||
|
||||
/**
|
||||
* @brief Stop any ongoing operations
|
||||
* @note This is used to cancel any ongoing initial ledger downloads
|
||||
* @param yield The coroutine context
|
||||
*/
|
||||
void
|
||||
stop(boost::asio::yield_context yield);
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etlng/LedgerPublisherInterface.hpp"
|
||||
#include "etlng/impl/Loading.hpp"
|
||||
@@ -35,6 +34,7 @@
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/post.hpp>
|
||||
#include <boost/asio/strand.hpp>
|
||||
#include <fmt/core.h>
|
||||
#include <xrpl/basics/chrono.h>
|
||||
#include <xrpl/protocol/Fees.h>
|
||||
#include <xrpl/protocol/LedgerHeader.h>
|
||||
@@ -164,10 +164,6 @@ public:
|
||||
boost::asio::post(publishStrand_, [this, lgrInfo = lgrInfo]() {
|
||||
LOG(log_.info()) << "Publishing ledger " << std::to_string(lgrInfo.seq);
|
||||
|
||||
// TODO: This should probably not be part of publisher in the future
|
||||
if (not state_.get().isWriting)
|
||||
backend_->updateRange(lgrInfo.seq); // This can't be unit tested atm.
|
||||
|
||||
setLastClose(lgrInfo.closeTime);
|
||||
auto age = lastCloseAgeSeconds();
|
||||
|
||||
|
||||
@@ -20,11 +20,14 @@
|
||||
#include "etlng/impl/Loading.hpp"
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etlng/AmendmentBlockHandlerInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/RegistryInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
@@ -46,29 +49,45 @@ namespace etlng::impl {
|
||||
Loader::Loader(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<RegistryInterface> registry,
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler,
|
||||
std::shared_ptr<etl::SystemState> state
|
||||
)
|
||||
: backend_(std::move(backend))
|
||||
, registry_(std::move(registry))
|
||||
, amendmentBlockHandler_(std::move(amendmentBlockHandler))
|
||||
, state_(std::move(state))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
std::expected<void, LoaderError>
|
||||
Loader::load(model::LedgerData const& data)
|
||||
{
|
||||
try {
|
||||
// perform cache updates and all writes from extensions
|
||||
// Perform cache updates and all writes from extensions
|
||||
// TODO: maybe this readonly logic should be removed?
|
||||
registry_->dispatch(data);
|
||||
|
||||
auto [success, duration] =
|
||||
::util::timed<std::chrono::duration<double>>([&]() { return backend_->finishWrites(data.seq); });
|
||||
LOG(log_.info()) << "Finished writes to DB for " << data.seq << ": " << (success ? "YES" : "NO") << "; took "
|
||||
<< duration;
|
||||
// Only a writer should attempt to commit to DB
|
||||
// This is also where conflicts with other writer nodes will be detected
|
||||
if (state_->isWriting) {
|
||||
auto [success, duration] =
|
||||
::util::timed<std::chrono::milliseconds>([&]() { return backend_->finishWrites(data.seq); });
|
||||
LOG(log_.info()) << "Finished writes to DB for " << data.seq << ": " << (success ? "YES" : "NO")
|
||||
<< "; took " << duration << "ms";
|
||||
|
||||
if (not success) {
|
||||
state_->writeConflict = true;
|
||||
LOG(log_.warn()) << "Another node wrote a ledger into the DB - we have a write conflict";
|
||||
return std::unexpected(LoaderError::WriteConflict);
|
||||
}
|
||||
}
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load " << data.seq << ": " << e.what();
|
||||
amendmentBlockHandler_->notifyAmendmentBlocked();
|
||||
return std::unexpected(LoaderError::AmendmentBlocked);
|
||||
}
|
||||
|
||||
return {};
|
||||
};
|
||||
|
||||
void
|
||||
@@ -78,13 +97,32 @@ Loader::onInitialLoadGotMoreObjects(
|
||||
std::optional<std::string> lastKey
|
||||
)
|
||||
{
|
||||
static constexpr std::size_t kLOG_STRIDE = 1000u;
|
||||
static auto kINITIAL_LOAD_START_TIME = std::chrono::steady_clock::now();
|
||||
|
||||
try {
|
||||
LOG(log_.debug()) << "On initial load: got more objects for seq " << seq << ". size = " << data.size();
|
||||
LOG(log_.trace()) << "On initial load: got more objects for seq " << seq << ". size = " << data.size();
|
||||
registry_->dispatchInitialObjects(
|
||||
seq,
|
||||
data,
|
||||
std::move(lastKey).value_or(std::string{}) // TODO: perhaps use optional all the way to extensions?
|
||||
);
|
||||
|
||||
initialLoadWrittenObjects_ += data.size();
|
||||
++initialLoadWrites_;
|
||||
if (initialLoadWrites_ % kLOG_STRIDE == 0u && initialLoadWrites_ != 0u) {
|
||||
auto elapsedSinceStart = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::steady_clock::now() - kINITIAL_LOAD_START_TIME
|
||||
);
|
||||
auto elapsedSeconds = elapsedSinceStart.count() / static_cast<double>(util::kMILLISECONDS_PER_SECOND);
|
||||
auto objectsPerSecond =
|
||||
elapsedSeconds > 0.0 ? static_cast<double>(initialLoadWrittenObjects_) / elapsedSeconds : 0.0;
|
||||
|
||||
LOG(log_.info()) << "Wrote " << initialLoadWrittenObjects_
|
||||
<< " initial ledger objects so far with average rate of " << objectsPerSecond
|
||||
<< " objects per second";
|
||||
}
|
||||
|
||||
} catch (std::runtime_error const& e) {
|
||||
LOG(log_.fatal()) << "Failed to load initial objects for " << seq << ": " << e.what();
|
||||
amendmentBlockHandler_->notifyAmendmentBlocked();
|
||||
@@ -95,9 +133,7 @@ std::optional<ripple::LedgerHeader>
|
||||
Loader::loadInitialLedger(model::LedgerData const& data)
|
||||
{
|
||||
try {
|
||||
// check that database is actually empty
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
if (rng) {
|
||||
if (auto const rng = backend_->hardFetchLedgerRangeNoThrow(); rng.has_value()) {
|
||||
ASSERT(false, "Database is not empty");
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/LedgerFetcherInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etl/impl/LedgerLoader.hpp"
|
||||
#include "etlng/AmendmentBlockHandlerInterface.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
@@ -39,6 +39,7 @@
|
||||
#include <xrpl/protocol/Serializer.h>
|
||||
#include <xrpl/protocol/TxMeta.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
@@ -51,7 +52,10 @@ class Loader : public LoaderInterface, public InitialLoadObserverInterface {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<RegistryInterface> registry_;
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler_;
|
||||
std::shared_ptr<etl::SystemState> state_;
|
||||
|
||||
std::size_t initialLoadWrittenObjects_{0u};
|
||||
std::size_t initialLoadWrites_{0u};
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
public:
|
||||
@@ -62,7 +66,8 @@ public:
|
||||
Loader(
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<RegistryInterface> registry,
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler
|
||||
std::shared_ptr<AmendmentBlockHandlerInterface> amendmentBlockHandler,
|
||||
std::shared_ptr<etl::SystemState> state
|
||||
);
|
||||
|
||||
Loader(Loader const&) = delete;
|
||||
@@ -72,7 +77,7 @@ public:
|
||||
Loader&
|
||||
operator=(Loader&&) = delete;
|
||||
|
||||
void
|
||||
std::expected<void, LoaderError>
|
||||
load(model::LedgerData const& data) override;
|
||||
|
||||
void
|
||||
|
||||
@@ -23,11 +23,11 @@
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/signals2/connection.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -41,12 +41,18 @@ Monitor::Monitor(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence
|
||||
uint32_t startSequence,
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay
|
||||
)
|
||||
: strand_(ctx.makeStrand())
|
||||
, backend_(std::move(backend))
|
||||
, validatedLedgers_(std::move(validatedLedgers))
|
||||
, nextSequence_(startSequence)
|
||||
, updateData_({
|
||||
.dbStalledReportDelay = dbStalledReportDelay,
|
||||
.lastDbCheckTime = std::chrono::steady_clock::now(),
|
||||
.lastSeenMaxSeqInDb = startSequence > 0 ? startSequence - 1 : 0,
|
||||
})
|
||||
{
|
||||
}
|
||||
|
||||
@@ -55,20 +61,37 @@ Monitor::~Monitor()
|
||||
stop();
|
||||
}
|
||||
|
||||
// TODO: think about using signals perhaps? maybe combining with onNextSequence?
|
||||
// also, how do we not double invoke or does it not matter
|
||||
void
|
||||
Monitor::notifyLedgerLoaded(uint32_t seq)
|
||||
Monitor::notifySequenceLoaded(uint32_t seq)
|
||||
{
|
||||
LOG(log_.debug()) << "Loader notified about newly committed ledger " << seq;
|
||||
repeatedTask_->invoke(); // force-invoke immediately
|
||||
LOG(log_.debug()) << "Loader notified Monitor about newly committed ledger " << seq;
|
||||
{
|
||||
auto lck = updateData_.lock();
|
||||
lck->lastSeenMaxSeqInDb = std::max(seq, lck->lastSeenMaxSeqInDb);
|
||||
lck->lastDbCheckTime = std::chrono::steady_clock::now();
|
||||
}
|
||||
repeatedTask_->invoke(); // force-invoke doWork immediately
|
||||
};
|
||||
|
||||
void
|
||||
Monitor::notifyWriteConflict(uint32_t seq)
|
||||
{
|
||||
LOG(log_.warn()) << "Loader notified Monitor about write conflict at " << seq;
|
||||
nextSequence_ = seq + 1; // we already loaded the cache for seq just before we detected conflict
|
||||
LOG(log_.warn()) << "Resume monitoring from " << nextSequence_;
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::run(std::chrono::steady_clock::duration repeatInterval)
|
||||
{
|
||||
ASSERT(not repeatedTask_.has_value(), "Monitor attempted to run more than once");
|
||||
LOG(log_.debug()) << "Starting monitor";
|
||||
{
|
||||
auto lck = updateData_.lock();
|
||||
LOG(log_.debug()) << "Starting monitor with repeat interval: "
|
||||
<< std::chrono::duration_cast<std::chrono::seconds>(repeatInterval).count()
|
||||
<< "s and dbStalledReportDelay: "
|
||||
<< std::chrono::duration_cast<std::chrono::seconds>(lck->dbStalledReportDelay).count() << "s";
|
||||
}
|
||||
|
||||
repeatedTask_ = strand_.executeRepeatedly(repeatInterval, std::bind_front(&Monitor::doWork, this));
|
||||
subscription_ = validatedLedgers_->subscribe(std::bind_front(&Monitor::onNextSequence, this));
|
||||
@@ -80,28 +103,65 @@ Monitor::stop()
|
||||
if (repeatedTask_.has_value())
|
||||
repeatedTask_->abort();
|
||||
|
||||
subscription_ = std::nullopt;
|
||||
repeatedTask_ = std::nullopt;
|
||||
}
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
Monitor::subscribe(SignalType::slot_type const& subscriber)
|
||||
Monitor::subscribeToNewSequence(NewSequenceSignalType::slot_type const& subscriber)
|
||||
{
|
||||
return notificationChannel_.connect(subscriber);
|
||||
}
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
Monitor::subscribeToDbStalled(DbStalledSignalType::slot_type const& subscriber)
|
||||
{
|
||||
return dbStalledChannel_.connect(subscriber);
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::onNextSequence(uint32_t seq)
|
||||
{
|
||||
LOG(log_.debug()) << "rippled published sequence " << seq;
|
||||
ASSERT(repeatedTask_.has_value(), "Ledger subscription without repeated task is a logic error");
|
||||
LOG(log_.debug()) << "Notified about new sequence on the network: " << seq;
|
||||
repeatedTask_->invoke(); // force-invoke immediately
|
||||
}
|
||||
|
||||
void
|
||||
Monitor::doWork()
|
||||
{
|
||||
if (auto rng = backend_->hardFetchLedgerRangeNoThrow(); rng) {
|
||||
while (rng->maxSequence >= nextSequence_)
|
||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||
bool dbProgressedThisCycle = false;
|
||||
auto lck = updateData_.lock();
|
||||
|
||||
if (rng.has_value()) {
|
||||
if (rng->maxSequence > lck->lastSeenMaxSeqInDb) {
|
||||
LOG(log_.trace()) << "DB progressed. Old max seq = " << lck->lastSeenMaxSeqInDb
|
||||
<< ", new max seq = " << rng->maxSequence;
|
||||
lck->lastSeenMaxSeqInDb = rng->maxSequence;
|
||||
dbProgressedThisCycle = true;
|
||||
}
|
||||
|
||||
while (lck->lastSeenMaxSeqInDb >= nextSequence_) {
|
||||
LOG(log_.trace()) << "Publishing from Monitor::doWork. nextSequence_ = " << nextSequence_
|
||||
<< ", lastSeenMaxSeqInDb_ = " << lck->lastSeenMaxSeqInDb;
|
||||
notificationChannel_(nextSequence_++);
|
||||
dbProgressedThisCycle = true;
|
||||
}
|
||||
} else {
|
||||
LOG(log_.trace()) << "DB range is not available or empty. lastSeenMaxSeqInDb_ = " << lck->lastSeenMaxSeqInDb
|
||||
<< ", nextSequence_ = " << nextSequence_;
|
||||
}
|
||||
|
||||
if (dbProgressedThisCycle) {
|
||||
lck->lastDbCheckTime = std::chrono::steady_clock::now();
|
||||
} else if (std::chrono::steady_clock::now() - lck->lastDbCheckTime > lck->dbStalledReportDelay) {
|
||||
LOG(log_.info()) << "No DB update detected for "
|
||||
<< std::chrono::duration_cast<std::chrono::seconds>(lck->dbStalledReportDelay).count()
|
||||
<< " seconds. Firing dbStalledChannel. Last seen max seq in DB: " << lck->lastSeenMaxSeqInDb
|
||||
<< ". Expecting next: " << nextSequence_;
|
||||
dbStalledChannel_();
|
||||
lck->lastDbCheckTime = std::chrono::steady_clock::now();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
#include "util/async/AnyOperation.hpp"
|
||||
#include "util/async/AnyStrand.hpp"
|
||||
@@ -30,6 +31,7 @@
|
||||
#include <boost/signals2/connection.hpp>
|
||||
#include <xrpl/protocol/TxFormats.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
@@ -43,11 +45,20 @@ class Monitor : public MonitorInterface {
|
||||
std::shared_ptr<BackendInterface> backend_;
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers_;
|
||||
|
||||
uint32_t nextSequence_;
|
||||
std::atomic_uint32_t nextSequence_;
|
||||
std::optional<util::async::AnyOperation<void>> repeatedTask_;
|
||||
std::optional<boost::signals2::scoped_connection> subscription_; // network validated ledgers subscription
|
||||
|
||||
SignalType notificationChannel_;
|
||||
NewSequenceSignalType notificationChannel_;
|
||||
DbStalledSignalType dbStalledChannel_;
|
||||
|
||||
struct UpdateData {
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay;
|
||||
std::chrono::steady_clock::time_point lastDbCheckTime;
|
||||
uint32_t lastSeenMaxSeqInDb = 0u;
|
||||
};
|
||||
|
||||
util::Mutex<UpdateData> updateData_;
|
||||
|
||||
util::Logger log_{"ETL"};
|
||||
|
||||
@@ -56,12 +67,16 @@ public:
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence
|
||||
uint32_t startSequence,
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay
|
||||
);
|
||||
~Monitor() override;
|
||||
|
||||
void
|
||||
notifyLedgerLoaded(uint32_t seq) override;
|
||||
notifySequenceLoaded(uint32_t seq) override;
|
||||
|
||||
void
|
||||
notifyWriteConflict(uint32_t seq) override;
|
||||
|
||||
void
|
||||
run(std::chrono::steady_clock::duration repeatInterval) override;
|
||||
@@ -70,7 +85,10 @@ public:
|
||||
stop() override;
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
subscribe(SignalType::slot_type const& subscriber) override;
|
||||
subscribeToNewSequence(NewSequenceSignalType::slot_type const& subscriber) override;
|
||||
|
||||
boost::signals2::scoped_connection
|
||||
subscribeToDbStalled(DbStalledSignalType::slot_type const& subscriber) override;
|
||||
|
||||
private:
|
||||
void
|
||||
|
||||
53
src/etlng/impl/MonitorProvider.hpp
Normal file
53
src/etlng/impl/MonitorProvider.hpp
Normal file
@@ -0,0 +1,53 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2025, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "etlng/MonitorProviderInterface.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
|
||||
class MonitorProvider : public MonitorProviderInterface {
|
||||
public:
|
||||
std::unique_ptr<MonitorInterface>
|
||||
make(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::shared_ptr<BackendInterface> backend,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface> validatedLedgers,
|
||||
uint32_t startSequence,
|
||||
std::chrono::steady_clock::duration dbStalledReportDelay
|
||||
) override
|
||||
{
|
||||
return std::make_unique<Monitor>(
|
||||
std::move(ctx), std::move(backend), std::move(validatedLedgers), startSequence, dbStalledReportDelay
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace etlng::impl
|
||||
@@ -19,10 +19,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "etl/impl/ForwardingSource.hpp"
|
||||
#include "etl/impl/SubscriptionSource.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/Source.hpp"
|
||||
#include "etlng/impl/ForwardingSource.hpp"
|
||||
#include "etlng/impl/GrpcSource.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
|
||||
@@ -53,7 +54,7 @@ namespace etlng::impl {
|
||||
template <
|
||||
typename GrpcSourceType = GrpcSource,
|
||||
typename SubscriptionSourceTypePtr = std::unique_ptr<etl::impl::SubscriptionSource>,
|
||||
typename ForwardingSourceType = etl::impl::ForwardingSource>
|
||||
typename ForwardingSourceType = etlng::impl::ForwardingSource>
|
||||
class SourceImpl : public SourceBase {
|
||||
std::string ip_;
|
||||
std::string wsPort_;
|
||||
@@ -107,6 +108,7 @@ public:
|
||||
stop(boost::asio::yield_context yield) final
|
||||
{
|
||||
subscriptionSource_->stop(yield);
|
||||
grpcSource_.stop(yield);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -202,7 +204,7 @@ public:
|
||||
* @param loader InitialLoadObserverInterface implementation
|
||||
* @return A std::pair of the data and a bool indicating whether the download was successful
|
||||
*/
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
InitialLedgerLoadResult
|
||||
loadInitialLedger(uint32_t sequence, std::uint32_t numMarkers, etlng::InitialLoadObserverInterface& loader) final
|
||||
{
|
||||
return grpcSource_.loadInitialLedger(sequence, numMarkers, loader);
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "etlng/SchedulerInterface.hpp"
|
||||
#include "etlng/impl/Monitor.hpp"
|
||||
#include "etlng/impl/TaskQueue.hpp"
|
||||
#include "util/Constants.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/async/AnyExecutionContext.hpp"
|
||||
@@ -102,29 +103,49 @@ TaskManager::spawnExtractor(TaskQueue& queue)
|
||||
if (stopRequested)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// TODO: how do we signal to the loaders that it's time to shutdown? some special task?
|
||||
break; // TODO: handle server shutdown or other node took over ETL
|
||||
}
|
||||
} else {
|
||||
// TODO (https://github.com/XRPLF/clio/issues/1852)
|
||||
std::this_thread::sleep_for(kDELAY_BETWEEN_ATTEMPTS);
|
||||
}
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Extractor (one of) coroutine stopped";
|
||||
});
|
||||
}
|
||||
|
||||
util::async::AnyOperation<void>
|
||||
TaskManager::spawnLoader(TaskQueue& queue)
|
||||
{
|
||||
static constexpr auto kNANO_TO_SECOND = 1.0e9;
|
||||
|
||||
return ctx_.execute([this, &queue](auto stopRequested) {
|
||||
while (not stopRequested) {
|
||||
// TODO (https://github.com/XRPLF/clio/issues/66): does not tell the loader whether it's out of order or not
|
||||
if (auto data = queue.dequeue(); data.has_value()) {
|
||||
auto nanos = util::timed<std::chrono::nanoseconds>([this, data = *data] { loader_.get().load(data); });
|
||||
auto const seconds = nanos / kNANO_TO_SECOND;
|
||||
auto [expectedSuccess, nanos] =
|
||||
util::timed<std::chrono::nanoseconds>([&] { return loader_.get().load(*data); });
|
||||
|
||||
auto const shouldExitOnError = [&] {
|
||||
if (expectedSuccess.has_value())
|
||||
return false;
|
||||
|
||||
switch (expectedSuccess.error()) {
|
||||
case LoaderError::WriteConflict:
|
||||
LOG(log_.warn()) << "Immediately stopping loader on write conflict"
|
||||
<< "; latest ledger cache loaded for " << data->seq;
|
||||
monitor_.get().notifyWriteConflict(data->seq);
|
||||
return true;
|
||||
case LoaderError::AmendmentBlocked:
|
||||
LOG(log_.warn()) << "Immediately stopping loader on amendment block";
|
||||
return true;
|
||||
}
|
||||
|
||||
std::unreachable();
|
||||
}();
|
||||
|
||||
if (shouldExitOnError)
|
||||
break;
|
||||
|
||||
auto const seconds = nanos / util::kNANO_PER_SECOND;
|
||||
auto const txnCount = data->transactions.size();
|
||||
auto const objCount = data->objects.size();
|
||||
|
||||
@@ -133,9 +154,11 @@ TaskManager::spawnLoader(TaskQueue& queue)
|
||||
<< " seconds;"
|
||||
<< " tps[" << txnCount / seconds << "], ops[" << objCount / seconds << "]";
|
||||
|
||||
monitor_.get().notifyLedgerLoaded(data->seq);
|
||||
monitor_.get().notifySequenceLoaded(data->seq);
|
||||
}
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Loader coroutine stopped";
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
namespace etlng::impl {
|
||||
@@ -62,12 +63,19 @@ public:
|
||||
}
|
||||
|
||||
std::unique_ptr<TaskManagerInterface>
|
||||
make(util::async::AnyExecutionContext ctx, std::reference_wrapper<MonitorInterface> monitor, uint32_t seq) override
|
||||
make(
|
||||
util::async::AnyExecutionContext ctx,
|
||||
std::reference_wrapper<MonitorInterface> monitor,
|
||||
uint32_t startSeq,
|
||||
std::optional<uint32_t> finishSeq
|
||||
) override
|
||||
{
|
||||
auto scheduler = impl::makeScheduler(impl::ForwardScheduler{ledgers_, seq});
|
||||
// TODO: add impl::BackfillScheduler{seq - 1, seq - 1000},
|
||||
auto scheduler = impl::makeScheduler(impl::ForwardScheduler{ledgers_, startSeq, finishSeq});
|
||||
// TODO: add impl::BackfillScheduler{startSeq - 1, startSeq - ...},
|
||||
|
||||
return std::make_unique<TaskManager>(std::move(ctx), std::move(scheduler), *extractor_, *loader_, monitor, seq);
|
||||
return std::make_unique<TaskManager>(
|
||||
std::move(ctx), std::move(scheduler), *extractor_, *loader_, monitor, startSeq
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -265,6 +265,9 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, BookChange const
|
||||
{JS(open), to_string(change.openRate.iou())},
|
||||
{JS(close), to_string(change.closeRate.iou())},
|
||||
};
|
||||
|
||||
if (change.domain.has_value())
|
||||
jv.as_object()[JS(domain)] = ripple::to_string(*change.domain);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -74,7 +74,7 @@ makeWarning(WarningCode code)
|
||||
ClioErrorInfo const&
|
||||
getErrorInfo(ClioError code)
|
||||
{
|
||||
constexpr static ClioErrorInfo kINFOS[]{
|
||||
static constexpr ClioErrorInfo kINFOS[]{
|
||||
{.code = ClioError::RpcMalformedCurrency, .error = "malformedCurrency", .message = "Malformed currency."},
|
||||
{.code = ClioError::RpcMalformedRequest, .error = "malformedRequest", .message = "Malformed request."},
|
||||
{.code = ClioError::RpcMalformedOwner, .error = "malformedOwner", .message = "Malformed owner."},
|
||||
|
||||
@@ -36,7 +36,8 @@ namespace rpc::validation {
|
||||
* @return true if convertible; false otherwise
|
||||
*/
|
||||
template <typename Expected>
|
||||
[[nodiscard]] bool static checkType(boost::json::value const& value)
|
||||
[[nodiscard]] static bool
|
||||
checkType(boost::json::value const& value)
|
||||
{
|
||||
auto hasError = false;
|
||||
if constexpr (std::is_same_v<Expected, bool>) {
|
||||
|
||||
@@ -148,11 +148,14 @@ public:
|
||||
validation::CustomValidators::accountValidator,
|
||||
Status(RippledError::rpcINVALID_PARAMS, "Invalid field 'taker'.")
|
||||
}},
|
||||
{
|
||||
JS(domain),
|
||||
validation::Type<std::string>{},
|
||||
validation::CustomValidators::uint256HexStringValidator,
|
||||
},
|
||||
{JS(domain),
|
||||
meta::WithCustomError{
|
||||
validation::Type<std::string>{}, Status(RippledError::rpcDOMAIN_MALFORMED, "Unable to parse domain.")
|
||||
},
|
||||
meta::WithCustomError{
|
||||
validation::CustomValidators::uint256HexStringValidator,
|
||||
Status(RippledError::rpcDOMAIN_MALFORMED, "Unable to parse domain.")
|
||||
}},
|
||||
{JS(limit),
|
||||
validation::Type<uint32_t>{},
|
||||
validation::Min(1u),
|
||||
|
||||
@@ -147,9 +147,9 @@ public:
|
||||
{JS(ledger_index), validation::CustomValidators::ledgerIndexValidator}
|
||||
};
|
||||
|
||||
auto static const kSPEC_V1 =
|
||||
static auto const kSPEC_V1 =
|
||||
RpcSpec{kSPEC_COMMON, {{JS(hotwallet), getHotWalletValidator(ripple::rpcINVALID_HOTWALLET)}}};
|
||||
auto static const kSPEC_V2 =
|
||||
static auto const kSPEC_V2 =
|
||||
RpcSpec{kSPEC_COMMON, {{JS(hotwallet), getHotWalletValidator(ripple::rpcINVALID_PARAMS)}}};
|
||||
|
||||
return apiVersion == 1 ? kSPEC_V1 : kSPEC_V2;
|
||||
|
||||
@@ -23,4 +23,5 @@
|
||||
|
||||
namespace util {
|
||||
static constexpr std::size_t kMILLISECONDS_PER_SECOND = 1000;
|
||||
static constexpr double kNANO_PER_SECOND = 1.0e9;
|
||||
} // namespace util
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/asio/any_io_executor.hpp>
|
||||
#include <boost/asio/associated_executor.hpp>
|
||||
#include <boost/asio/post.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
@@ -82,7 +82,8 @@ public:
|
||||
override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<MetricBase> static makeMetric(std::string name, std::string labelsString, MetricType type);
|
||||
static std::unique_ptr<MetricBase>
|
||||
makeMetric(std::string name, std::string labelsString, MetricType type);
|
||||
|
||||
template <typename ValueType>
|
||||
requires std::same_as<ValueType, std::int64_t> || std::same_as<ValueType, double>
|
||||
|
||||
@@ -257,7 +257,8 @@ public:
|
||||
*
|
||||
* @param config The configuration to use
|
||||
*/
|
||||
void static init(util::config::ClioConfigDefinition const& config);
|
||||
static void
|
||||
init(util::config::ClioConfigDefinition const& config);
|
||||
|
||||
/**
|
||||
* @brief Get a bool based metric. It will be created if it doesn't exist
|
||||
|
||||
@@ -42,7 +42,7 @@ struct MockNgLoadBalancer : etlng::LoadBalancerInterface {
|
||||
using RawLedgerObjectType = FakeLedgerObject;
|
||||
|
||||
MOCK_METHOD(
|
||||
std::vector<std::string>,
|
||||
etlng::InitialLedgerLoadResult,
|
||||
loadInitialLedger,
|
||||
(uint32_t, etlng::InitialLoadObserverInterface&, std::chrono::steady_clock::duration),
|
||||
(override)
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/Source.hpp"
|
||||
#include "feed/SubscriptionManagerInterface.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
@@ -61,7 +62,7 @@ struct MockSourceNg : etlng::SourceBase {
|
||||
(override)
|
||||
);
|
||||
MOCK_METHOD(
|
||||
(std::pair<std::vector<std::string>, bool>),
|
||||
etlng::InitialLedgerLoadResult,
|
||||
loadInitialLedger,
|
||||
(uint32_t, uint32_t, etlng::InitialLoadObserverInterface&),
|
||||
(override)
|
||||
@@ -136,7 +137,7 @@ public:
|
||||
return mock_->fetchLedger(sequence, getObjects, getObjectNeighbors);
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::string>, bool>
|
||||
etlng::InitialLedgerLoadResult
|
||||
loadInitialLedger(uint32_t sequence, uint32_t maxLedger, etlng::InitialLoadObserverInterface& observer) override
|
||||
{
|
||||
return mock_->loadInitialLedger(sequence, maxLedger, observer);
|
||||
|
||||
@@ -328,18 +328,21 @@ createMetaDataForBookChange(
|
||||
std::string_view issueId,
|
||||
uint32_t transactionIndex,
|
||||
int finalTakerGets,
|
||||
int perviousTakerGets,
|
||||
int previousTakerGets,
|
||||
int finalTakerPays,
|
||||
int perviousTakerPays
|
||||
int previousTakerPays,
|
||||
std::optional<std::string_view> domain
|
||||
)
|
||||
{
|
||||
ripple::STObject finalFields(ripple::sfFinalFields);
|
||||
ripple::Issue const issue1 = getIssue(currency, issueId);
|
||||
finalFields.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(issue1, finalTakerPays));
|
||||
finalFields.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(finalTakerGets, false));
|
||||
if (domain.has_value())
|
||||
finalFields.setFieldH256(ripple::sfDomainID, ripple::uint256{*domain});
|
||||
ripple::STObject previousFields(ripple::sfPreviousFields);
|
||||
previousFields.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(issue1, perviousTakerPays));
|
||||
previousFields.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(perviousTakerGets, false));
|
||||
previousFields.setFieldAmount(ripple::sfTakerPays, ripple::STAmount(issue1, previousTakerPays));
|
||||
previousFields.setFieldAmount(ripple::sfTakerGets, ripple::STAmount(previousTakerGets, false));
|
||||
ripple::STObject metaObj(ripple::sfTransactionMetaData);
|
||||
ripple::STArray metaArray{1};
|
||||
ripple::STObject node(ripple::sfModifiedNode);
|
||||
@@ -484,7 +487,7 @@ createOfferLedgerObject(
|
||||
std::string_view getsIssueId,
|
||||
std::string_view paysIssueId,
|
||||
std::string_view dirId,
|
||||
std::optional<std::string_view> const& domain
|
||||
std::optional<std::string_view> domain
|
||||
)
|
||||
{
|
||||
ripple::STObject offer(ripple::sfLedgerEntry);
|
||||
|
||||
@@ -182,9 +182,10 @@ createMetaDataForBookChange(
|
||||
std::string_view issueId,
|
||||
uint32_t transactionIndex,
|
||||
int finalTakerGets,
|
||||
int perviousTakerGets,
|
||||
int previousTakerGets,
|
||||
int finalTakerPays,
|
||||
int perviousTakerPays
|
||||
int previousTakerPays,
|
||||
std::optional<std::string_view> domain = std::nullopt
|
||||
);
|
||||
|
||||
/*
|
||||
@@ -258,7 +259,7 @@ createOfferLedgerObject(
|
||||
std::string_view getsIssueId,
|
||||
std::string_view paysIssueId,
|
||||
std::string_view bookDirId,
|
||||
std::optional<std::string_view> const& domain = std::nullopt
|
||||
std::optional<std::string_view> domain = std::nullopt
|
||||
);
|
||||
|
||||
[[nodiscard]] ripple::STObject
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
using namespace util::config;
|
||||
|
||||
struct BackendCassandraFactoryTest : SyncAsioContextTest, util::prometheus::WithPrometheus {
|
||||
constexpr static auto kKEYSPACE = "factory_test";
|
||||
static constexpr auto kKEYSPACE = "factory_test";
|
||||
|
||||
protected:
|
||||
ClioConfigDefinition cfg_{
|
||||
|
||||
@@ -1424,7 +1424,7 @@ TEST_F(BackendCassandraNodeMessageTest, UpdatingMessageKeepsItAlive)
|
||||
{
|
||||
#if defined(__APPLE__)
|
||||
GTEST_SKIP() << "Skipping test on Apple platform due to slow DB";
|
||||
#endif
|
||||
#else
|
||||
static boost::uuids::uuid const kUUID = generateUuid();
|
||||
static std::string const kUPDATED_MESSAGE = "updated message";
|
||||
|
||||
@@ -1442,4 +1442,5 @@ TEST_F(BackendCassandraNodeMessageTest, UpdatingMessageKeepsItAlive)
|
||||
EXPECT_EQ(uuid, kUUID);
|
||||
EXPECT_EQ(message, kUPDATED_MESSAGE);
|
||||
});
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ public:
|
||||
void
|
||||
writeTxIndexExample(std::string const& hash, std::string const& txType)
|
||||
{
|
||||
auto static kINSERT_TX_INDEX_EXAMPLE = [this]() {
|
||||
static auto kINSERT_TX_INDEX_EXAMPLE = [this]() {
|
||||
return handle_.prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
@@ -96,7 +96,7 @@ public:
|
||||
std::optional<std::string>
|
||||
fetchTxTypeViaID(std::string const& hash, boost::asio::yield_context ctx)
|
||||
{
|
||||
auto static kFETCH_TX_TYPE = [this]() {
|
||||
static auto kFETCH_TX_TYPE = [this]() {
|
||||
return handle_.prepare(fmt::format(
|
||||
R"(
|
||||
SELECT tx_type FROM {} WHERE hash = ?
|
||||
@@ -129,7 +129,7 @@ public:
|
||||
std::optional<std::uint64_t>
|
||||
fetchTxIndexTableSize(boost::asio::yield_context ctx)
|
||||
{
|
||||
auto static kINSERT_TX_INDEX_EXAMPLE = [this]() {
|
||||
static auto kINSERT_TX_INDEX_EXAMPLE = [this]() {
|
||||
return handle_.prepare(fmt::format(
|
||||
R"(
|
||||
SELECT COUNT(*) FROM {}
|
||||
@@ -168,7 +168,7 @@ public:
|
||||
void
|
||||
writeLedgerAccountHash(std::uint64_t sequence, std::string const& accountHash)
|
||||
{
|
||||
auto static kINSERT_LEDGER_EXAMPLE = [this]() {
|
||||
static auto kINSERT_LEDGER_EXAMPLE = [this]() {
|
||||
return handle_.prepare(fmt::format(
|
||||
R"(
|
||||
INSERT INTO {}
|
||||
@@ -192,7 +192,7 @@ public:
|
||||
std::optional<ripple::uint256>
|
||||
fetchAccountHashViaSequence(std::uint64_t sequence, boost::asio::yield_context ctx)
|
||||
{
|
||||
auto static kFETCH_ACCOUNT_HASH = [this]() {
|
||||
static auto kFETCH_ACCOUNT_HASH = [this]() {
|
||||
return handle_.prepare(fmt::format(
|
||||
R"(
|
||||
SELECT account_hash FROM {} WHERE sequence = ?
|
||||
@@ -225,7 +225,7 @@ public:
|
||||
std::optional<std::uint64_t>
|
||||
fetchLedgerTableSize(boost::asio::yield_context ctx)
|
||||
{
|
||||
auto static kINSERT_LEDGER_EXAMPLE = [this]() {
|
||||
static auto kINSERT_LEDGER_EXAMPLE = [this]() {
|
||||
return handle_.prepare(fmt::format(
|
||||
R"(
|
||||
SELECT COUNT(*) FROM {}
|
||||
@@ -280,7 +280,7 @@ public:
|
||||
std::optional<std::uint64_t>
|
||||
fetchDiffTableSize(boost::asio::yield_context ctx)
|
||||
{
|
||||
auto static kCOUNT_DIFF = [this]() {
|
||||
static auto kCOUNT_DIFF = [this]() {
|
||||
return handle_.prepare(fmt::format(
|
||||
R"(
|
||||
SELECT COUNT(*) FROM {}
|
||||
|
||||
@@ -43,7 +43,7 @@ struct ETLExtractorTest : util::prometheus::WithPrometheus, NoLoggerFixture {
|
||||
{
|
||||
state_.isStopping = false;
|
||||
state_.writeConflict = false;
|
||||
state_.isReadOnly = false;
|
||||
state_.isStrictReadonly = false;
|
||||
state_.isWriting = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ using namespace util::config;
|
||||
using testing::Return;
|
||||
using namespace util::prometheus;
|
||||
|
||||
constexpr static auto const kTWO_SOURCES_LEDGER_RESPONSE = R"JSON({
|
||||
static constexpr auto kTWO_SOURCES_LEDGER_RESPONSE = R"JSON({
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
@@ -77,7 +77,7 @@ constexpr static auto const kTWO_SOURCES_LEDGER_RESPONSE = R"JSON({
|
||||
]
|
||||
})JSON";
|
||||
|
||||
constexpr static auto const kTHREE_SOURCES_LEDGER_RESPONSE = R"JSON({
|
||||
static constexpr auto kTHREE_SOURCES_LEDGER_RESPONSE = R"JSON({
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
|
||||
@@ -64,7 +64,7 @@ struct ETLTransformerTest : util::prometheus::WithPrometheus, MockBackendTest {
|
||||
{
|
||||
state_.isStopping = false;
|
||||
state_.writeConflict = false;
|
||||
state_.isReadOnly = false;
|
||||
state_.isStrictReadonly = false;
|
||||
state_.isWriting = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ protected:
|
||||
|
||||
TEST_F(AmendmentBlockHandlerNgTests, CallTonotifyAmendmentBlockedSetsStateAndRepeatedlyCallsAction)
|
||||
{
|
||||
constexpr static auto kMAX_ITERATIONS = 10uz;
|
||||
static constexpr auto kMAX_ITERATIONS = 10uz;
|
||||
etlng::impl::AmendmentBlockHandler handler{ctx_, state_, std::chrono::nanoseconds{1}, actionMock_.AsStdFunction()};
|
||||
auto counter = 0uz;
|
||||
std::binary_semaphore stop{0};
|
||||
|
||||
@@ -17,21 +17,24 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/ETLState.hpp"
|
||||
#include "etl/NetworkValidatedLedgersInterface.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etlng/CacheLoaderInterface.hpp"
|
||||
#include "etlng/CacheUpdaterInterface.hpp"
|
||||
#include "etlng/ETLService.hpp"
|
||||
#include "etlng/ExtractorInterface.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/LoaderInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/MonitorInterface.hpp"
|
||||
#include "etlng/MonitorProviderInterface.hpp"
|
||||
#include "etlng/TaskManagerInterface.hpp"
|
||||
#include "etlng/TaskManagerProviderInterface.hpp"
|
||||
#include "util/BinaryTestObject.hpp"
|
||||
#include "util/MockAssert.hpp"
|
||||
#include "util/MockBackendTestFixture.hpp"
|
||||
#include "util/MockLedgerPublisher.hpp"
|
||||
#include "util/MockLoadBalancer.hpp"
|
||||
@@ -43,6 +46,7 @@
|
||||
#include "util/async/context/BasicExecutionContext.hpp"
|
||||
#include "util/async/context/SyncExecutionContext.hpp"
|
||||
#include "util/async/impl/ErasedOperation.hpp"
|
||||
#include "util/config/ConfigConstraints.hpp"
|
||||
#include "util/config/ConfigDefinition.hpp"
|
||||
#include "util/config/ConfigValue.hpp"
|
||||
#include "util/config/Types.hpp"
|
||||
@@ -62,6 +66,7 @@
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace util::config;
|
||||
@@ -71,8 +76,20 @@ constinit auto const kSEQ = 100;
|
||||
constinit auto const kLEDGER_HASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652";
|
||||
|
||||
struct MockMonitor : public etlng::MonitorInterface {
|
||||
MOCK_METHOD(void, notifyLedgerLoaded, (uint32_t), (override));
|
||||
MOCK_METHOD(boost::signals2::scoped_connection, subscribe, (SignalType::slot_type const&), (override));
|
||||
MOCK_METHOD(void, notifySequenceLoaded, (uint32_t), (override));
|
||||
MOCK_METHOD(void, notifyWriteConflict, (uint32_t), (override));
|
||||
MOCK_METHOD(
|
||||
boost::signals2::scoped_connection,
|
||||
subscribeToNewSequence,
|
||||
(NewSequenceSignalType::slot_type const&),
|
||||
(override)
|
||||
);
|
||||
MOCK_METHOD(
|
||||
boost::signals2::scoped_connection,
|
||||
subscribeToDbStalled,
|
||||
(DbStalledSignalType::slot_type const&),
|
||||
(override)
|
||||
);
|
||||
MOCK_METHOD(void, run, (std::chrono::steady_clock::duration), (override));
|
||||
MOCK_METHOD(void, stop, (), (override));
|
||||
};
|
||||
@@ -83,7 +100,8 @@ struct MockExtractor : etlng::ExtractorInterface {
|
||||
};
|
||||
|
||||
struct MockLoader : etlng::LoaderInterface {
|
||||
MOCK_METHOD(void, load, (etlng::model::LedgerData const&), (override));
|
||||
using ExpectedType = std::expected<void, etlng::LoaderError>;
|
||||
MOCK_METHOD(ExpectedType, load, (etlng::model::LedgerData const&), (override));
|
||||
MOCK_METHOD(std::optional<ripple::LedgerHeader>, loadInitialLedger, (etlng::model::LedgerData const&), (override));
|
||||
};
|
||||
|
||||
@@ -118,7 +136,23 @@ struct MockTaskManagerProvider : etlng::TaskManagerProviderInterface {
|
||||
MOCK_METHOD(
|
||||
std::unique_ptr<etlng::TaskManagerInterface>,
|
||||
make,
|
||||
(util::async::AnyExecutionContext, std::reference_wrapper<etlng::MonitorInterface>, uint32_t),
|
||||
(util::async::AnyExecutionContext,
|
||||
std::reference_wrapper<etlng::MonitorInterface>,
|
||||
uint32_t,
|
||||
std::optional<uint32_t>),
|
||||
(override)
|
||||
);
|
||||
};
|
||||
|
||||
struct MockMonitorProvider : etlng::MonitorProviderInterface {
|
||||
MOCK_METHOD(
|
||||
std::unique_ptr<etlng::MonitorInterface>,
|
||||
make,
|
||||
(util::async::AnyExecutionContext,
|
||||
std::shared_ptr<BackendInterface>,
|
||||
std::shared_ptr<etl::NetworkValidatedLedgersInterface>,
|
||||
uint32_t,
|
||||
std::chrono::steady_clock::duration),
|
||||
(override)
|
||||
);
|
||||
};
|
||||
@@ -134,7 +168,7 @@ createTestData(uint32_t seq)
|
||||
.edgeKeys = {},
|
||||
.header = header,
|
||||
.rawHeader = {},
|
||||
.seq = seq
|
||||
.seq = seq,
|
||||
};
|
||||
}
|
||||
} // namespace
|
||||
@@ -150,6 +184,9 @@ struct ETLServiceTests : util::prometheus::WithPrometheus, MockBackendTest {
|
||||
protected:
|
||||
SameThreadTestContext ctx_;
|
||||
util::config::ClioConfigDefinition config_{
|
||||
{"read_only", ConfigValue{ConfigType::Boolean}.defaultValue(false)},
|
||||
{"start_sequence", ConfigValue{ConfigType::Integer}.optional().withConstraint(gValidateUint32)},
|
||||
{"finish_sequence", ConfigValue{ConfigType::Integer}.optional().withConstraint(gValidateUint32)},
|
||||
{"extractor_threads", ConfigValue{ConfigType::Integer}.defaultValue(4)},
|
||||
{"io_threads", ConfigValue{ConfigType::Integer}.defaultValue(2)},
|
||||
{"cache.num_diffs", ConfigValue{ConfigType::Integer}.defaultValue(32)},
|
||||
@@ -159,7 +196,7 @@ protected:
|
||||
{"cache.page_fetch_size", ConfigValue{ConfigType::Integer}.defaultValue(512)},
|
||||
{"cache.load", ConfigValue{ConfigType::String}.defaultValue("async")}
|
||||
};
|
||||
StrictMockSubscriptionManagerSharedPtr subscriptions_;
|
||||
MockSubscriptionManagerSharedPtr subscriptions_;
|
||||
std::shared_ptr<testing::NiceMock<MockLoadBalancer>> balancer_ =
|
||||
std::make_shared<testing::NiceMock<MockLoadBalancer>>();
|
||||
std::shared_ptr<testing::NiceMock<MockNetworkValidatedLedgers>> ledgers_ =
|
||||
@@ -176,6 +213,8 @@ protected:
|
||||
std::make_shared<testing::NiceMock<MockInitialLoadObserver>>();
|
||||
std::shared_ptr<testing::NiceMock<MockTaskManagerProvider>> taskManagerProvider_ =
|
||||
std::make_shared<testing::NiceMock<MockTaskManagerProvider>>();
|
||||
std::shared_ptr<testing::NiceMock<MockMonitorProvider>> monitorProvider_ =
|
||||
std::make_shared<testing::NiceMock<MockMonitorProvider>>();
|
||||
std::shared_ptr<etl::SystemState> systemState_ = std::make_shared<etl::SystemState>();
|
||||
|
||||
etlng::ETLService service_{
|
||||
@@ -191,6 +230,7 @@ protected:
|
||||
loader_,
|
||||
initialLoadObserver_,
|
||||
taskManagerProvider_,
|
||||
monitorProvider_,
|
||||
systemState_
|
||||
};
|
||||
};
|
||||
@@ -258,82 +298,244 @@ TEST_F(ETLServiceTests, LastCloseAgeSeconds)
|
||||
TEST_F(ETLServiceTests, RunWithEmptyDatabase)
|
||||
{
|
||||
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
||||
auto& mockTaskManagerRef = *mockTaskManager;
|
||||
auto ledgerData = createTestData(kSEQ);
|
||||
|
||||
testing::Sequence const s;
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).InSequence(s).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).InSequence(s).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
||||
EXPECT_CALL(*extractor_, extractLedgerOnly(kSEQ)).WillOnce(testing::Return(ledgerData));
|
||||
EXPECT_CALL(*balancer_, loadInitialLedger(kSEQ, testing::_, testing::_))
|
||||
.WillOnce(testing::Return(std::vector<std::string>{}));
|
||||
EXPECT_CALL(*loader_, loadInitialLedger(testing::_)).WillOnce(testing::Return(ripple::LedgerHeader{}));
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_))
|
||||
EXPECT_CALL(*loader_, loadInitialLedger).WillOnce(testing::Return(ripple::LedgerHeader{}));
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||
.InSequence(s)
|
||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||
EXPECT_CALL(*mockTaskManager, run(testing::_));
|
||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1))
|
||||
EXPECT_CALL(mockTaskManagerRef, run);
|
||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
||||
.WillOnce(testing::Return(std::unique_ptr<etlng::TaskManagerInterface>(mockTaskManager.release())));
|
||||
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, testing::_, testing::_))
|
||||
.WillOnce([](auto, auto, auto, auto, auto) { return std::make_unique<testing::NiceMock<MockMonitor>>(); });
|
||||
|
||||
service_.run();
|
||||
}
|
||||
|
||||
TEST_F(ETLServiceTests, RunWithPopulatedDatabase)
|
||||
{
|
||||
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
||||
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_))
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([](auto, auto, auto, auto, auto) {
|
||||
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
||||
});
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||
EXPECT_CALL(*mockTaskManager, run(testing::_));
|
||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1))
|
||||
.WillOnce(testing::Return(std::unique_ptr<etlng::TaskManagerInterface>(mockTaskManager.release())));
|
||||
|
||||
service_.run();
|
||||
}
|
||||
|
||||
TEST_F(ETLServiceTests, WaitForValidatedLedgerIsAborted)
|
||||
{
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).Times(2).WillRepeatedly(testing::Return(std::nullopt));
|
||||
|
||||
// No other calls should happen because we exit early
|
||||
EXPECT_CALL(*extractor_, extractLedgerOnly(testing::_)).Times(0);
|
||||
EXPECT_CALL(*extractor_, extractLedgerOnly).Times(0);
|
||||
EXPECT_CALL(*balancer_, loadInitialLedger(testing::_, testing::_, testing::_)).Times(0);
|
||||
EXPECT_CALL(*loader_, loadInitialLedger(testing::_)).Times(0);
|
||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, testing::_)).Times(0);
|
||||
EXPECT_CALL(*loader_, loadInitialLedger).Times(0);
|
||||
EXPECT_CALL(*taskManagerProvider_, make).Times(0);
|
||||
|
||||
service_.run();
|
||||
}
|
||||
|
||||
struct ETLServiceAssertTests : common::util::WithMockAssert, ETLServiceTests {};
|
||||
|
||||
TEST_F(ETLServiceAssertTests, FailToLoadInitialLedger)
|
||||
TEST_F(ETLServiceTests, HandlesWriteConflictInMonitorSubscription)
|
||||
{
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).WillOnce(testing::Return(std::nullopt));
|
||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
||||
auto& mockMonitorRef = *mockMonitor;
|
||||
std::function<void(uint32_t)> capturedCallback;
|
||||
|
||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
||||
return std::move(mockMonitor);
|
||||
});
|
||||
|
||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto&& callback) {
|
||||
capturedCallback = callback;
|
||||
return boost::signals2::scoped_connection{};
|
||||
});
|
||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||
EXPECT_CALL(mockMonitorRef, run);
|
||||
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||
|
||||
service_.run();
|
||||
systemState_->writeConflict = true;
|
||||
|
||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
||||
ASSERT_TRUE(capturedCallback);
|
||||
capturedCallback(kSEQ + 1);
|
||||
|
||||
EXPECT_FALSE(systemState_->writeConflict);
|
||||
EXPECT_FALSE(systemState_->isWriting);
|
||||
}
|
||||
|
||||
TEST_F(ETLServiceTests, NormalFlowInMonitorSubscription)
|
||||
{
|
||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
||||
auto& mockMonitorRef = *mockMonitor;
|
||||
std::function<void(uint32_t)> capturedCallback;
|
||||
|
||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
||||
return std::move(mockMonitor);
|
||||
});
|
||||
|
||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto callback) {
|
||||
capturedCallback = callback;
|
||||
return boost::signals2::scoped_connection{};
|
||||
});
|
||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||
EXPECT_CALL(mockMonitorRef, run);
|
||||
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||
|
||||
service_.run();
|
||||
systemState_->isWriting = false;
|
||||
std::vector<data::LedgerObject> const dummyDiff = {};
|
||||
|
||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ + 1, testing::_)).WillOnce(testing::Return(dummyDiff));
|
||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ + 1, testing::A<std::vector<data::LedgerObject> const&>()));
|
||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
||||
|
||||
ASSERT_TRUE(capturedCallback);
|
||||
capturedCallback(kSEQ + 1);
|
||||
}
|
||||
|
||||
TEST_F(ETLServiceTests, AttemptTakeoverWriter)
|
||||
{
|
||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
||||
auto& mockMonitorRef = *mockMonitor;
|
||||
std::function<void()> capturedDbStalledCallback;
|
||||
|
||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
||||
return std::move(mockMonitor);
|
||||
});
|
||||
|
||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence);
|
||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
||||
capturedDbStalledCallback = callback;
|
||||
return boost::signals2::scoped_connection{};
|
||||
});
|
||||
EXPECT_CALL(mockMonitorRef, run);
|
||||
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||
|
||||
service_.run();
|
||||
systemState_->isStrictReadonly = false; // writer node
|
||||
systemState_->isWriting = false; // but starts in readonly as usual
|
||||
|
||||
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
||||
auto& mockTaskManagerRef = *mockTaskManager;
|
||||
EXPECT_CALL(mockTaskManagerRef, run);
|
||||
|
||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
||||
.WillOnce(testing::Return(std::move(mockTaskManager)));
|
||||
|
||||
ASSERT_TRUE(capturedDbStalledCallback);
|
||||
capturedDbStalledCallback();
|
||||
|
||||
EXPECT_TRUE(systemState_->isWriting); // should attempt to become writer
|
||||
}
|
||||
|
||||
TEST_F(ETLServiceTests, GiveUpWriterAfterWriteConflict)
|
||||
{
|
||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
||||
auto& mockMonitorRef = *mockMonitor;
|
||||
|
||||
std::function<void(uint32_t)> capturedCallback;
|
||||
|
||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
||||
return std::move(mockMonitor);
|
||||
});
|
||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto callback) {
|
||||
capturedCallback = callback;
|
||||
return boost::signals2::scoped_connection{};
|
||||
});
|
||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||
EXPECT_CALL(mockMonitorRef, run);
|
||||
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||
|
||||
service_.run();
|
||||
systemState_->isWriting = true;
|
||||
systemState_->writeConflict = true; // got a write conflict along the way
|
||||
|
||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
||||
|
||||
ASSERT_TRUE(capturedCallback);
|
||||
capturedCallback(kSEQ + 1);
|
||||
|
||||
EXPECT_FALSE(systemState_->isWriting); // gives up writing
|
||||
EXPECT_FALSE(systemState_->writeConflict); // and removes write conflict flag
|
||||
}
|
||||
|
||||
TEST_F(ETLServiceTests, CancelledLoadInitialLedger)
|
||||
{
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
||||
EXPECT_CALL(*extractor_, extractLedgerOnly(kSEQ)).WillOnce(testing::Return(std::nullopt));
|
||||
|
||||
// These calls should not happen because loading the initial ledger fails
|
||||
EXPECT_CALL(*balancer_, loadInitialLedger(testing::_, testing::_, testing::_)).Times(0);
|
||||
EXPECT_CALL(*loader_, loadInitialLedger(testing::_)).Times(0);
|
||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, testing::_)).Times(0);
|
||||
EXPECT_CALL(*loader_, loadInitialLedger).Times(0);
|
||||
EXPECT_CALL(*taskManagerProvider_, make).Times(0);
|
||||
|
||||
EXPECT_CLIO_ASSERT_FAIL({ service_.run(); });
|
||||
service_.run();
|
||||
}
|
||||
|
||||
TEST_F(ETLServiceAssertTests, WaitForValidatedLedgerIsAbortedLeadToFailToLoadInitialLedger)
|
||||
TEST_F(ETLServiceTests, WaitForValidatedLedgerIsAbortedLeadToFailToLoadInitialLedger)
|
||||
{
|
||||
testing::Sequence const s;
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).InSequence(s).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent()).InSequence(s).WillOnce(testing::Return(kSEQ));
|
||||
|
||||
// No other calls should happen because we exit early
|
||||
EXPECT_CALL(*extractor_, extractLedgerOnly(testing::_)).Times(0);
|
||||
EXPECT_CALL(*extractor_, extractLedgerOnly).Times(0);
|
||||
EXPECT_CALL(*balancer_, loadInitialLedger(testing::_, testing::_, testing::_)).Times(0);
|
||||
EXPECT_CALL(*loader_, loadInitialLedger(testing::_)).Times(0);
|
||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, testing::_)).Times(0);
|
||||
EXPECT_CALL(*loader_, loadInitialLedger).Times(0);
|
||||
EXPECT_CALL(*taskManagerProvider_, make).Times(0);
|
||||
|
||||
EXPECT_CLIO_ASSERT_FAIL({ service_.run(); });
|
||||
service_.run();
|
||||
}
|
||||
|
||||
TEST_F(ETLServiceTests, RunStopsIfInitialLoadIsCancelledByBalancer)
|
||||
{
|
||||
constexpr uint32_t kMOCK_START_SEQUENCE = 123u;
|
||||
systemState_->isStrictReadonly = false;
|
||||
|
||||
testing::Sequence const s;
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).WillOnce(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent).InSequence(s).WillOnce(testing::Return(kMOCK_START_SEQUENCE));
|
||||
EXPECT_CALL(*ledgers_, getMostRecent).InSequence(s).WillOnce(testing::Return(kMOCK_START_SEQUENCE + 10));
|
||||
|
||||
auto const dummyLedgerData = createTestData(kMOCK_START_SEQUENCE);
|
||||
EXPECT_CALL(*extractor_, extractLedgerOnly(kMOCK_START_SEQUENCE)).WillOnce(testing::Return(dummyLedgerData));
|
||||
EXPECT_CALL(*balancer_, loadInitialLedger(testing::_, testing::_, testing::_))
|
||||
.WillOnce(testing::Return(std::unexpected{etlng::InitialLedgerLoadError::Cancelled}));
|
||||
|
||||
service_.run();
|
||||
|
||||
EXPECT_TRUE(systemState_->isWriting);
|
||||
EXPECT_FALSE(service_.isAmendmentBlocked());
|
||||
EXPECT_FALSE(service_.isCorruptionDetected());
|
||||
}
|
||||
|
||||
@@ -21,14 +21,17 @@
|
||||
#include "etl/ETLHelpers.hpp"
|
||||
#include "etl/impl/GrpcSource.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/impl/GrpcSource.hpp"
|
||||
#include "util/AsioContextTestFixture.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/LoggerFixtures.hpp"
|
||||
#include "util/MockXrpLedgerAPIService.hpp"
|
||||
#include "util/Mutex.hpp"
|
||||
#include "util/TestObject.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <gmock/gmock.h>
|
||||
#include <grpcpp/server_context.h>
|
||||
#include <grpcpp/support/status.h>
|
||||
@@ -39,9 +42,11 @@
|
||||
#include <xrpl/basics/strHex.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <future>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
@@ -62,7 +67,7 @@ struct MockLoadObserver : etlng::InitialLoadObserverInterface {
|
||||
);
|
||||
};
|
||||
|
||||
struct GrpcSourceNgTests : NoLoggerFixture, tests::util::WithMockXrpLedgerAPIService {
|
||||
struct GrpcSourceNgTests : virtual NoLoggerFixture, tests::util::WithMockXrpLedgerAPIService {
|
||||
GrpcSourceNgTests()
|
||||
: WithMockXrpLedgerAPIService("localhost:0"), grpcSource_("localhost", std::to_string(getXRPLMockPort()))
|
||||
{
|
||||
@@ -184,9 +189,8 @@ TEST_F(GrpcSourceNgLoadInitialLedgerTests, GetLedgerDataNotFound)
|
||||
return grpc::Status{grpc::StatusCode::NOT_FOUND, "Not found"};
|
||||
});
|
||||
|
||||
auto const [data, success] = grpcSource_.loadInitialLedger(sequence_, numMarkers_, observer_);
|
||||
EXPECT_TRUE(data.empty());
|
||||
EXPECT_FALSE(success);
|
||||
auto const res = grpcSource_.loadInitialLedger(sequence_, numMarkers_, observer_);
|
||||
EXPECT_FALSE(res.has_value());
|
||||
}
|
||||
|
||||
TEST_F(GrpcSourceNgLoadInitialLedgerTests, ObserverCalledCorrectly)
|
||||
@@ -219,12 +223,12 @@ TEST_F(GrpcSourceNgLoadInitialLedgerTests, ObserverCalledCorrectly)
|
||||
EXPECT_EQ(data.size(), 1);
|
||||
});
|
||||
|
||||
auto const [data, success] = grpcSource_.loadInitialLedger(sequence_, numMarkers_, observer_);
|
||||
auto const res = grpcSource_.loadInitialLedger(sequence_, numMarkers_, observer_);
|
||||
|
||||
EXPECT_TRUE(success);
|
||||
EXPECT_EQ(data.size(), numMarkers_);
|
||||
EXPECT_TRUE(res.has_value());
|
||||
EXPECT_EQ(res.value().size(), numMarkers_);
|
||||
|
||||
EXPECT_EQ(data, std::vector<std::string>(4, keyStr));
|
||||
EXPECT_EQ(res.value(), std::vector<std::string>(4, keyStr));
|
||||
}
|
||||
|
||||
TEST_F(GrpcSourceNgLoadInitialLedgerTests, DataTransferredAndObserverCalledCorrectly)
|
||||
@@ -284,12 +288,73 @@ TEST_F(GrpcSourceNgLoadInitialLedgerTests, DataTransferredAndObserverCalledCorre
|
||||
total += data.size();
|
||||
});
|
||||
|
||||
auto const [data, success] = grpcSource_.loadInitialLedger(sequence_, numMarkers_, observer_);
|
||||
auto const res = grpcSource_.loadInitialLedger(sequence_, numMarkers_, observer_);
|
||||
|
||||
EXPECT_TRUE(success);
|
||||
EXPECT_EQ(data.size(), numMarkers_);
|
||||
EXPECT_TRUE(res.has_value());
|
||||
EXPECT_EQ(res.value().size(), numMarkers_);
|
||||
EXPECT_EQ(total, totalKeys);
|
||||
EXPECT_EQ(totalWithLastKey + totalWithoutLastKey, numMarkers_ * batchesPerMarker);
|
||||
EXPECT_EQ(totalWithoutLastKey, numMarkers_);
|
||||
EXPECT_EQ(totalWithLastKey, (numMarkers_ - 1) * batchesPerMarker);
|
||||
}
|
||||
|
||||
struct GrpcSourceStopTests : GrpcSourceNgTests, SyncAsioContextTest {};
|
||||
|
||||
TEST_F(GrpcSourceStopTests, LoadInitialLedgerStopsWhenRequested)
|
||||
{
|
||||
uint32_t const sequence = 123u;
|
||||
uint32_t const numMarkers = 1;
|
||||
|
||||
std::mutex mtx;
|
||||
std::condition_variable cvGrpcCallActive;
|
||||
std::condition_variable cvStopCalled;
|
||||
bool grpcCallIsActive = false;
|
||||
bool stopHasBeenCalled = false;
|
||||
|
||||
EXPECT_CALL(mockXrpLedgerAPIService, GetLedgerData)
|
||||
.WillOnce([&](grpc::ServerContext*,
|
||||
org::xrpl::rpc::v1::GetLedgerDataRequest const* request,
|
||||
org::xrpl::rpc::v1::GetLedgerDataResponse* response) {
|
||||
EXPECT_EQ(request->ledger().sequence(), sequence);
|
||||
EXPECT_EQ(request->user(), "ETL");
|
||||
|
||||
{
|
||||
std::unique_lock const lk(mtx);
|
||||
grpcCallIsActive = true;
|
||||
}
|
||||
cvGrpcCallActive.notify_one();
|
||||
|
||||
{
|
||||
std::unique_lock lk(mtx);
|
||||
cvStopCalled.wait(lk, [&] { return stopHasBeenCalled; });
|
||||
}
|
||||
|
||||
response->set_is_unlimited(true);
|
||||
return grpc::Status::OK;
|
||||
});
|
||||
|
||||
EXPECT_CALL(observer_, onInitialLoadGotMoreObjects).Times(0);
|
||||
|
||||
auto loadTask = std::async(std::launch::async, [&]() {
|
||||
return grpcSource_.loadInitialLedger(sequence, numMarkers, observer_);
|
||||
});
|
||||
|
||||
{
|
||||
std::unique_lock lk(mtx);
|
||||
cvGrpcCallActive.wait(lk, [&] { return grpcCallIsActive; });
|
||||
}
|
||||
|
||||
runSyncOperation([&](boost::asio::yield_context yield) {
|
||||
grpcSource_.stop(yield);
|
||||
{
|
||||
std::unique_lock const lk(mtx);
|
||||
stopHasBeenCalled = true;
|
||||
}
|
||||
cvStopCalled.notify_one();
|
||||
});
|
||||
|
||||
auto const res = loadTask.get();
|
||||
|
||||
ASSERT_FALSE(res.has_value());
|
||||
EXPECT_EQ(res.error(), etlng::InitialLedgerLoadError::Cancelled);
|
||||
}
|
||||
|
||||
@@ -69,52 +69,64 @@ struct ETLLedgerPublisherNgTest : util::prometheus::WithPrometheus, MockBackendT
|
||||
StrictMockSubscriptionManagerSharedPtr mockSubscriptionManagerPtr;
|
||||
};
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderIsWritingFalseAndCacheDisabled)
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderSkipDueToAge)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
dummyState.isWriting = false;
|
||||
// Use kAGE (800) which is > MAX_LEDGER_AGE_SECONDS (600) to test skipping
|
||||
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
publisher.publish(dummyLedgerHeader);
|
||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ, _)).Times(0);
|
||||
auto dummyState = etl::SystemState{};
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
|
||||
// setLastPublishedSequence not in strand, should verify before run
|
||||
backend_->setRange(kSEQ - 1, kSEQ);
|
||||
publisher.publish(dummyLedgerHeader);
|
||||
|
||||
// Verify last published sequence is set immediately
|
||||
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||
|
||||
// Since age > MAX_LEDGER_AGE_SECONDS, these should not be called
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(0);
|
||||
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger).Times(0);
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger).Times(0);
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges).Times(0);
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction).Times(0);
|
||||
|
||||
ctx_.run();
|
||||
EXPECT_TRUE(backend_->fetchLedgerRange());
|
||||
EXPECT_EQ(backend_->fetchLedgerRange().value().minSequence, kSEQ);
|
||||
EXPECT_EQ(backend_->fetchLedgerRange().value().maxSequence, kSEQ);
|
||||
}
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderIsWritingFalseAndCacheEnabled)
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderWithinAgeLimit)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
dummyState.isWriting = false;
|
||||
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
// Use age 0 which is < MAX_LEDGER_AGE_SECONDS to ensure publishing happens
|
||||
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||
auto dummyState = etl::SystemState{};
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
|
||||
backend_->setRange(kSEQ - 1, kSEQ);
|
||||
publisher.publish(dummyLedgerHeader);
|
||||
|
||||
// setLastPublishedSequence not in strand, should verify before run
|
||||
// Verify last published sequence is set immediately
|
||||
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
|
||||
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 0));
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||
|
||||
ctx_.run();
|
||||
EXPECT_TRUE(backend_->fetchLedgerRange());
|
||||
EXPECT_EQ(backend_->fetchLedgerRange().value().minSequence, kSEQ);
|
||||
EXPECT_EQ(backend_->fetchLedgerRange().value().maxSequence, kSEQ);
|
||||
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||
}
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderIsWritingTrue)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isWriting = true;
|
||||
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
publisher.publish(dummyLedgerHeader);
|
||||
|
||||
// setLastPublishedSequence not in strand, should verify before run
|
||||
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||
|
||||
@@ -124,16 +136,15 @@ TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderIsWritingTrue)
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderInRange)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isWriting = true;
|
||||
|
||||
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0); // age is 0
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
backend_->setRange(kSEQ - 1, kSEQ);
|
||||
|
||||
publisher.publish(dummyLedgerHeader);
|
||||
|
||||
// mock fetch fee
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||
|
||||
@@ -145,10 +156,8 @@ TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderInRange)
|
||||
.peekData();
|
||||
t1.ledgerSequence = kSEQ;
|
||||
|
||||
// mock fetch transactions
|
||||
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger).WillOnce(Return(std::vector<TransactionAndMetadata>{t1}));
|
||||
|
||||
// setLastPublishedSequence not in strand, should verify before run
|
||||
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||
|
||||
@@ -158,26 +167,24 @@ TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderInRange)
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction);
|
||||
|
||||
ctx_.run();
|
||||
// last publish time should be set
|
||||
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||
}
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderCloseTimeGreaterThanNow)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isWriting = true;
|
||||
|
||||
ripple::LedgerHeader dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||
auto dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||
auto const nowPlus10 = system_clock::now() + seconds(10);
|
||||
auto const closeTime = duration_cast<seconds>(nowPlus10.time_since_epoch()).count() - kRIPPLE_EPOCH_START;
|
||||
dummyLedgerHeader.closeTime = ripple::NetClock::time_point{seconds{closeTime}};
|
||||
|
||||
backend_->setRange(kSEQ - 1, kSEQ);
|
||||
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
publisher.publish(dummyLedgerHeader);
|
||||
|
||||
// mock fetch fee
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||
|
||||
@@ -189,37 +196,33 @@ TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderCloseTimeGreaterThanNow)
|
||||
.peekData();
|
||||
t1.ledgerSequence = kSEQ;
|
||||
|
||||
// mock fetch transactions
|
||||
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||
.WillOnce(Return(std::vector<TransactionAndMetadata>{t1}));
|
||||
|
||||
// setLastPublishedSequence not in strand, should verify before run
|
||||
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 1));
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||
// mock 1 transaction
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction);
|
||||
|
||||
ctx_.run();
|
||||
// last publish time should be set
|
||||
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||
}
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsTrue)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isStopping = true;
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
EXPECT_FALSE(publisher.publish(kSEQ, {}));
|
||||
}
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqMaxAttempt)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isStopping = false;
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
|
||||
static constexpr auto kMAX_ATTEMPT = 2;
|
||||
|
||||
@@ -231,9 +234,9 @@ TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqMaxAttempt)
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsFalse)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isStopping = false;
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
|
||||
LedgerRange const range{.minSequence = kSEQ, .maxSequence = kSEQ};
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).WillOnce(Return(range));
|
||||
@@ -247,16 +250,15 @@ TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsFalse)
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishMultipleTxInOrder)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isWriting = true;
|
||||
|
||||
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0); // age is 0
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
backend_->setRange(kSEQ - 1, kSEQ);
|
||||
|
||||
publisher.publish(dummyLedgerHeader);
|
||||
|
||||
// mock fetch fee
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||
|
||||
@@ -278,34 +280,31 @@ TEST_F(ETLLedgerPublisherNgTest, PublishMultipleTxInOrder)
|
||||
t2.ledgerSequence = kSEQ;
|
||||
t2.date = 2;
|
||||
|
||||
// mock fetch transactions
|
||||
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||
.WillOnce(Return(std::vector<TransactionAndMetadata>{t1, t2}));
|
||||
|
||||
// setLastPublishedSequence not in strand, should verify before run
|
||||
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 2));
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||
// should call pubTransaction t2 first (greater tx index)
|
||||
|
||||
Sequence const s;
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction(t2, _)).InSequence(s);
|
||||
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction(t1, _)).InSequence(s);
|
||||
|
||||
ctx_.run();
|
||||
// last publish time should be set
|
||||
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||
}
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishVeryOldLedgerShouldSkip)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isWriting = true;
|
||||
|
||||
// Create a ledger header with age (800) greater than MAX_LEDGER_AGE_SECONDS (600)
|
||||
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 800);
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
backend_->setRange(kSEQ - 1, kSEQ);
|
||||
|
||||
publisher.publish(dummyLedgerHeader);
|
||||
@@ -322,12 +321,12 @@ TEST_F(ETLLedgerPublisherNgTest, PublishVeryOldLedgerShouldSkip)
|
||||
|
||||
TEST_F(ETLLedgerPublisherNgTest, PublishMultipleLedgersInQuickSuccession)
|
||||
{
|
||||
etl::SystemState dummyState;
|
||||
auto dummyState = etl::SystemState{};
|
||||
dummyState.isWriting = true;
|
||||
|
||||
auto const dummyLedgerHeader1 = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||
auto const dummyLedgerHeader2 = createLedgerHeader(kLEDGER_HASH, kSEQ + 1, 0);
|
||||
impl::LedgerPublisher publisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
auto publisher = impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||
backend_->setRange(kSEQ - 1, kSEQ + 1);
|
||||
|
||||
// Publish two ledgers in quick succession
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancer.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/Source.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
@@ -459,7 +460,7 @@ struct LoadBalancerLoadInitialLedgerNgTests : LoadBalancerOnConnectHookNgTests {
|
||||
protected:
|
||||
uint32_t const sequence_ = 123;
|
||||
uint32_t const numMarkers_ = 16;
|
||||
std::pair<std::vector<std::string>, bool> const response_ = {{"1", "2", "3"}, true};
|
||||
InitialLedgerLoadResult const response_{std::vector<std::string>{"1", "2", "3"}};
|
||||
testing::StrictMock<InitialLoadObserverMock> observer_;
|
||||
};
|
||||
|
||||
@@ -469,7 +470,7 @@ TEST_F(LoadBalancerLoadInitialLedgerNgTests, load)
|
||||
EXPECT_CALL(sourceFactory_.sourceAt(0), loadInitialLedger(sequence_, numMarkers_, testing::_))
|
||||
.WillOnce(Return(response_));
|
||||
|
||||
EXPECT_EQ(loadBalancer_->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.first);
|
||||
EXPECT_EQ(loadBalancer_->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.value());
|
||||
}
|
||||
|
||||
TEST_F(LoadBalancerLoadInitialLedgerNgTests, load_source0DoesntHaveLedger)
|
||||
@@ -479,7 +480,7 @@ TEST_F(LoadBalancerLoadInitialLedgerNgTests, load_source0DoesntHaveLedger)
|
||||
EXPECT_CALL(sourceFactory_.sourceAt(1), loadInitialLedger(sequence_, numMarkers_, testing::_))
|
||||
.WillOnce(Return(response_));
|
||||
|
||||
EXPECT_EQ(loadBalancer_->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.first);
|
||||
EXPECT_EQ(loadBalancer_->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.value());
|
||||
}
|
||||
|
||||
TEST_F(LoadBalancerLoadInitialLedgerNgTests, load_bothSourcesDontHaveLedger)
|
||||
@@ -489,26 +490,26 @@ TEST_F(LoadBalancerLoadInitialLedgerNgTests, load_bothSourcesDontHaveLedger)
|
||||
EXPECT_CALL(sourceFactory_.sourceAt(1), loadInitialLedger(sequence_, numMarkers_, testing::_))
|
||||
.WillOnce(Return(response_));
|
||||
|
||||
EXPECT_EQ(loadBalancer_->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.first);
|
||||
EXPECT_EQ(loadBalancer_->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.value());
|
||||
}
|
||||
|
||||
TEST_F(LoadBalancerLoadInitialLedgerNgTests, load_source0ReturnsStatusFalse)
|
||||
{
|
||||
EXPECT_CALL(sourceFactory_.sourceAt(0), hasLedger(sequence_)).WillOnce(Return(true));
|
||||
EXPECT_CALL(sourceFactory_.sourceAt(0), loadInitialLedger(sequence_, numMarkers_, testing::_))
|
||||
.WillOnce(Return(std::make_pair(std::vector<std::string>{}, false)));
|
||||
.WillOnce(Return(std::unexpected{InitialLedgerLoadError::Errored}));
|
||||
EXPECT_CALL(sourceFactory_.sourceAt(1), hasLedger(sequence_)).WillOnce(Return(true));
|
||||
EXPECT_CALL(sourceFactory_.sourceAt(1), loadInitialLedger(sequence_, numMarkers_, testing::_))
|
||||
.WillOnce(Return(response_));
|
||||
|
||||
EXPECT_EQ(loadBalancer_->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.first);
|
||||
EXPECT_EQ(loadBalancer_->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.value());
|
||||
}
|
||||
|
||||
struct LoadBalancerLoadInitialLedgerCustomNumMarkersNgTests : LoadBalancerConstructorNgTests {
|
||||
protected:
|
||||
uint32_t const numMarkers_ = 16;
|
||||
uint32_t const sequence_ = 123;
|
||||
std::pair<std::vector<std::string>, bool> const response_ = {{"1", "2", "3"}, true};
|
||||
InitialLedgerLoadResult const response_{std::vector<std::string>{"1", "2", "3"}};
|
||||
testing::StrictMock<InitialLoadObserverMock> observer_;
|
||||
};
|
||||
|
||||
@@ -527,7 +528,7 @@ TEST_F(LoadBalancerLoadInitialLedgerCustomNumMarkersNgTests, loadInitialLedger)
|
||||
EXPECT_CALL(sourceFactory_.sourceAt(0), loadInitialLedger(sequence_, numMarkers_, testing::_))
|
||||
.WillOnce(Return(response_));
|
||||
|
||||
EXPECT_EQ(loadBalancer->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.first);
|
||||
EXPECT_EQ(loadBalancer->loadInitialLedger(sequence_, observer_, std::chrono::milliseconds{1}), response_.value());
|
||||
}
|
||||
|
||||
struct LoadBalancerFetchLegerNgTests : LoadBalancerOnConnectHookNgTests {
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
//==============================================================================
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "etl/SystemState.hpp"
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/RegistryInterface.hpp"
|
||||
@@ -67,7 +68,8 @@ struct MockLoadObserver : etlng::InitialLoadObserverInterface {
|
||||
struct LoadingTests : util::prometheus::WithPrometheus, MockBackendTest, MockAmendmentBlockHandlerTest {
|
||||
protected:
|
||||
std::shared_ptr<MockRegistry> mockRegistryPtr_ = std::make_shared<MockRegistry>();
|
||||
Loader loader_{backend_, mockRegistryPtr_, mockAmendmentBlockHandlerPtr_};
|
||||
std::shared_ptr<etl::SystemState> state_ = std::make_shared<etl::SystemState>();
|
||||
Loader loader_{backend_, mockRegistryPtr_, mockAmendmentBlockHandlerPtr_, state_};
|
||||
};
|
||||
|
||||
struct LoadingAssertTest : common::util::WithMockAssert, LoadingTests {};
|
||||
@@ -104,6 +106,7 @@ TEST_F(LoadingTests, LoadInitialLedger)
|
||||
|
||||
TEST_F(LoadingTests, LoadSuccess)
|
||||
{
|
||||
state_->isWriting = true; // writer is active
|
||||
auto const data = createTestData();
|
||||
|
||||
EXPECT_CALL(*backend_, doFinishWrites());
|
||||
@@ -114,6 +117,7 @@ TEST_F(LoadingTests, LoadSuccess)
|
||||
|
||||
TEST_F(LoadingTests, LoadFailure)
|
||||
{
|
||||
state_->isWriting = true; // writer is active
|
||||
auto const data = createTestData();
|
||||
|
||||
EXPECT_CALL(*backend_, doFinishWrites()).Times(0);
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <semaphore>
|
||||
|
||||
using namespace etlng::impl;
|
||||
@@ -40,6 +41,7 @@ using namespace data;
|
||||
|
||||
namespace {
|
||||
constexpr auto kSTART_SEQ = 123u;
|
||||
constexpr auto kNO_NEW_LEDGER_REPORT_DELAY = std::chrono::milliseconds(1u);
|
||||
} // namespace
|
||||
|
||||
struct MonitorTests : util::prometheus::WithPrometheus, MockBackendTest {
|
||||
@@ -47,8 +49,10 @@ protected:
|
||||
util::async::CoroExecutionContext ctx_;
|
||||
StrictMockNetworkValidatedLedgersPtr ledgers_;
|
||||
testing::StrictMock<testing::MockFunction<void(uint32_t)>> actionMock_;
|
||||
testing::StrictMock<testing::MockFunction<void()>> dbStalledMock_;
|
||||
|
||||
etlng::impl::Monitor monitor_ = etlng::impl::Monitor(ctx_, backend_, ledgers_, kSTART_SEQ);
|
||||
etlng::impl::Monitor monitor_ =
|
||||
etlng::impl::Monitor(ctx_, backend_, ledgers_, kSTART_SEQ, kNO_NEW_LEDGER_REPORT_DELAY);
|
||||
};
|
||||
|
||||
TEST_F(MonitorTests, ConsumesAndNotifiesForAllOutstandingSequencesAtOnce)
|
||||
@@ -65,7 +69,7 @@ TEST_F(MonitorTests, ConsumesAndNotifiesForAllOutstandingSequencesAtOnce)
|
||||
unblock.release();
|
||||
});
|
||||
|
||||
auto subscription = monitor_.subscribe(actionMock_.AsStdFunction());
|
||||
auto subscription = monitor_.subscribeToNewSequence(actionMock_.AsStdFunction());
|
||||
monitor_.run(std::chrono::milliseconds{10});
|
||||
unblock.acquire();
|
||||
}
|
||||
@@ -88,7 +92,7 @@ TEST_F(MonitorTests, NotifiesForEachSequence)
|
||||
unblock.release();
|
||||
});
|
||||
|
||||
auto subscription = monitor_.subscribe(actionMock_.AsStdFunction());
|
||||
auto subscription = monitor_.subscribeToNewSequence(actionMock_.AsStdFunction());
|
||||
monitor_.run(std::chrono::milliseconds{1});
|
||||
unblock.acquire();
|
||||
}
|
||||
@@ -106,7 +110,7 @@ TEST_F(MonitorTests, NotifiesWhenForcedByNewSequenceAvailableFromNetwork)
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).WillOnce(testing::Return(range));
|
||||
EXPECT_CALL(actionMock_, Call).WillOnce([&] { unblock.release(); });
|
||||
|
||||
auto subscription = monitor_.subscribe(actionMock_.AsStdFunction());
|
||||
auto subscription = monitor_.subscribeToNewSequence(actionMock_.AsStdFunction());
|
||||
monitor_.run(std::chrono::seconds{10}); // expected to be force-invoked sooner than in 10 sec
|
||||
pusher(kSTART_SEQ); // pretend network validated a new ledger
|
||||
unblock.acquire();
|
||||
@@ -121,8 +125,49 @@ TEST_F(MonitorTests, NotifiesWhenForcedByLedgerLoaded)
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).WillOnce(testing::Return(range));
|
||||
EXPECT_CALL(actionMock_, Call).WillOnce([&] { unblock.release(); });
|
||||
|
||||
auto subscription = monitor_.subscribe(actionMock_.AsStdFunction());
|
||||
monitor_.run(std::chrono::seconds{10}); // expected to be force-invoked sooner than in 10 sec
|
||||
monitor_.notifyLedgerLoaded(kSTART_SEQ); // notify about newly committed ledger
|
||||
auto subscription = monitor_.subscribeToNewSequence(actionMock_.AsStdFunction());
|
||||
monitor_.run(std::chrono::seconds{10}); // expected to be force-invoked sooner than in 10 sec
|
||||
monitor_.notifySequenceLoaded(kSTART_SEQ); // notify about newly committed ledger
|
||||
unblock.acquire();
|
||||
}
|
||||
|
||||
TEST_F(MonitorTests, ResumesMonitoringFromNextSequenceAfterWriteConflict)
|
||||
{
|
||||
constexpr uint32_t kCONFLICT_SEQ = 456u;
|
||||
constexpr uint32_t kEXPECTED_NEXT_SEQ = kCONFLICT_SEQ + 1;
|
||||
|
||||
LedgerRange const rangeBeforeConflict(kSTART_SEQ, kSTART_SEQ);
|
||||
LedgerRange const rangeAfterConflict(kEXPECTED_NEXT_SEQ, kEXPECTED_NEXT_SEQ);
|
||||
std::binary_semaphore unblock(0);
|
||||
|
||||
EXPECT_CALL(*ledgers_, subscribe(testing::_));
|
||||
|
||||
{
|
||||
testing::InSequence const seq; // second call will produce conflict
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).WillOnce(testing::Return(rangeBeforeConflict));
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).WillRepeatedly(testing::Return(rangeAfterConflict));
|
||||
}
|
||||
|
||||
EXPECT_CALL(actionMock_, Call(kEXPECTED_NEXT_SEQ)).WillOnce([&](uint32_t seq) {
|
||||
EXPECT_EQ(seq, kEXPECTED_NEXT_SEQ);
|
||||
unblock.release();
|
||||
});
|
||||
|
||||
auto subscription = monitor_.subscribeToNewSequence(actionMock_.AsStdFunction());
|
||||
monitor_.run(std::chrono::nanoseconds{100});
|
||||
monitor_.notifyWriteConflict(kCONFLICT_SEQ);
|
||||
unblock.acquire();
|
||||
}
|
||||
|
||||
TEST_F(MonitorTests, DbStalledChannelTriggeredWhenTimeoutExceeded)
|
||||
{
|
||||
std::binary_semaphore unblock(0);
|
||||
|
||||
EXPECT_CALL(*ledgers_, subscribe(testing::_));
|
||||
EXPECT_CALL(*backend_, hardFetchLedgerRange(testing::_)).WillRepeatedly(testing::Return(std::nullopt));
|
||||
EXPECT_CALL(dbStalledMock_, Call()).WillOnce([&]() { unblock.release(); });
|
||||
|
||||
auto subscription = monitor_.subscribeToDbStalled(dbStalledMock_.AsStdFunction());
|
||||
monitor_.run(std::chrono::nanoseconds{100});
|
||||
unblock.acquire();
|
||||
}
|
||||
|
||||
@@ -672,16 +672,28 @@ TEST_F(RegistryTest, MixedReadonlyAndRegularExtensions)
|
||||
TEST_F(RegistryTest, MonitorInterfaceExecution)
|
||||
{
|
||||
struct MockMonitor : etlng::MonitorInterface {
|
||||
MOCK_METHOD(void, notifyLedgerLoaded, (uint32_t), (override));
|
||||
MOCK_METHOD(boost::signals2::scoped_connection, subscribe, (SignalType::slot_type const&), (override));
|
||||
MOCK_METHOD(void, notifySequenceLoaded, (uint32_t), (override));
|
||||
MOCK_METHOD(void, notifyWriteConflict, (uint32_t), (override));
|
||||
MOCK_METHOD(
|
||||
boost::signals2::scoped_connection,
|
||||
subscribeToNewSequence,
|
||||
(NewSequenceSignalType::slot_type const&),
|
||||
(override)
|
||||
);
|
||||
MOCK_METHOD(
|
||||
boost::signals2::scoped_connection,
|
||||
subscribeToDbStalled,
|
||||
(DbStalledSignalType::slot_type const&),
|
||||
(override)
|
||||
);
|
||||
MOCK_METHOD(void, run, (std::chrono::steady_clock::duration), (override));
|
||||
MOCK_METHOD(void, stop, (), (override));
|
||||
};
|
||||
|
||||
auto monitor = MockMonitor{};
|
||||
EXPECT_CALL(monitor, notifyLedgerLoaded(kSEQ)).Times(1);
|
||||
EXPECT_CALL(monitor, notifySequenceLoaded(kSEQ)).Times(1);
|
||||
|
||||
monitor.notifyLedgerLoaded(kSEQ);
|
||||
monitor.notifySequenceLoaded(kSEQ);
|
||||
}
|
||||
|
||||
TEST_F(RegistryTest, ReadonlyModeWithAllowInReadonlyTest)
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
//==============================================================================
|
||||
|
||||
#include "etlng/InitialLoadObserverInterface.hpp"
|
||||
#include "etlng/LoadBalancerInterface.hpp"
|
||||
#include "etlng/Models.hpp"
|
||||
#include "etlng/impl/SourceImpl.hpp"
|
||||
#include "rpc/Errors.hpp"
|
||||
@@ -33,6 +34,7 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <expected>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
@@ -51,8 +53,10 @@ struct GrpcSourceMock {
|
||||
using FetchLedgerReturnType = std::pair<grpc::Status, org::xrpl::rpc::v1::GetLedgerResponse>;
|
||||
MOCK_METHOD(FetchLedgerReturnType, fetchLedger, (uint32_t, bool, bool));
|
||||
|
||||
using LoadLedgerReturnType = std::pair<std::vector<std::string>, bool>;
|
||||
using LoadLedgerReturnType = etlng::InitialLedgerLoadResult;
|
||||
MOCK_METHOD(LoadLedgerReturnType, loadInitialLedger, (uint32_t, uint32_t, etlng::InitialLoadObserverInterface&));
|
||||
|
||||
MOCK_METHOD(void, stop, (boost::asio::yield_context), ());
|
||||
};
|
||||
|
||||
struct SubscriptionSourceMock {
|
||||
@@ -127,6 +131,7 @@ TEST_F(SourceImplNgTest, run)
|
||||
TEST_F(SourceImplNgTest, stop)
|
||||
{
|
||||
EXPECT_CALL(*subscriptionSourceMock_, stop);
|
||||
EXPECT_CALL(grpcSourceMock_, stop);
|
||||
boost::asio::io_context ctx;
|
||||
boost::asio::spawn(ctx, [&](boost::asio::yield_context yield) { source_.stop(yield); });
|
||||
ctx.run();
|
||||
@@ -190,7 +195,7 @@ TEST_F(SourceImplNgTest, fetchLedger)
|
||||
EXPECT_EQ(actualStatus.error_code(), grpc::StatusCode::OK);
|
||||
}
|
||||
|
||||
TEST_F(SourceImplNgTest, loadInitialLedger)
|
||||
TEST_F(SourceImplNgTest, loadInitialLedgerErrorPath)
|
||||
{
|
||||
uint32_t const ledgerSeq = 123;
|
||||
uint32_t const numMarkers = 3;
|
||||
@@ -198,11 +203,25 @@ TEST_F(SourceImplNgTest, loadInitialLedger)
|
||||
auto observerMock = testing::StrictMock<InitialLoadObserverMock>();
|
||||
|
||||
EXPECT_CALL(grpcSourceMock_, loadInitialLedger(ledgerSeq, numMarkers, testing::_))
|
||||
.WillOnce(Return(std::make_pair(std::vector<std::string>{}, true)));
|
||||
auto const [actualLedgers, actualSuccess] = source_.loadInitialLedger(ledgerSeq, numMarkers, observerMock);
|
||||
.WillOnce(Return(std::unexpected{etlng::InitialLedgerLoadError::Errored}));
|
||||
auto const res = source_.loadInitialLedger(ledgerSeq, numMarkers, observerMock);
|
||||
|
||||
EXPECT_TRUE(actualLedgers.empty());
|
||||
EXPECT_TRUE(actualSuccess);
|
||||
EXPECT_FALSE(res.has_value());
|
||||
}
|
||||
|
||||
TEST_F(SourceImplNgTest, loadInitialLedgerSuccessPath)
|
||||
{
|
||||
uint32_t const ledgerSeq = 123;
|
||||
uint32_t const numMarkers = 3;
|
||||
auto response = etlng::InitialLedgerLoadResult{{"1", "2", "3"}};
|
||||
|
||||
auto observerMock = testing::StrictMock<InitialLoadObserverMock>();
|
||||
|
||||
EXPECT_CALL(grpcSourceMock_, loadInitialLedger(ledgerSeq, numMarkers, testing::_)).WillOnce(Return(response));
|
||||
auto const res = source_.loadInitialLedger(ledgerSeq, numMarkers, observerMock);
|
||||
|
||||
EXPECT_TRUE(res.has_value());
|
||||
EXPECT_EQ(res, response);
|
||||
}
|
||||
|
||||
TEST_F(SourceImplNgTest, forwardToRippled)
|
||||
|
||||
@@ -62,13 +62,26 @@ struct MockExtractor : etlng::ExtractorInterface {
|
||||
};
|
||||
|
||||
struct MockLoader : etlng::LoaderInterface {
|
||||
MOCK_METHOD(void, load, (LedgerData const&), (override));
|
||||
using ExpectedType = std::expected<void, etlng::LoaderError>;
|
||||
MOCK_METHOD(ExpectedType, load, (LedgerData const&), (override));
|
||||
MOCK_METHOD(std::optional<ripple::LedgerHeader>, loadInitialLedger, (LedgerData const&), (override));
|
||||
};
|
||||
|
||||
struct MockMonitor : etlng::MonitorInterface {
|
||||
MOCK_METHOD(void, notifyLedgerLoaded, (uint32_t), (override));
|
||||
MOCK_METHOD(boost::signals2::scoped_connection, subscribe, (SignalType::slot_type const&), (override));
|
||||
MOCK_METHOD(void, notifySequenceLoaded, (uint32_t), (override));
|
||||
MOCK_METHOD(void, notifyWriteConflict, (uint32_t), (override));
|
||||
MOCK_METHOD(
|
||||
boost::signals2::scoped_connection,
|
||||
subscribeToNewSequence,
|
||||
(NewSequenceSignalType::slot_type const&),
|
||||
(override)
|
||||
);
|
||||
MOCK_METHOD(
|
||||
boost::signals2::scoped_connection,
|
||||
subscribeToDbStalled,
|
||||
(DbStalledSignalType::slot_type const&),
|
||||
(override)
|
||||
);
|
||||
MOCK_METHOD(void, run, (std::chrono::steady_clock::duration), (override));
|
||||
MOCK_METHOD(void, stop, (), (override));
|
||||
};
|
||||
@@ -127,21 +140,130 @@ TEST_F(TaskManagerTests, LoaderGetsDataIfNextSequenceIsExtracted)
|
||||
return createTestData(seq);
|
||||
});
|
||||
|
||||
EXPECT_CALL(*mockLoaderPtr_, load(testing::_)).Times(kTOTAL).WillRepeatedly([&](LedgerData data) {
|
||||
loaded.push_back(data.seq);
|
||||
if (loaded.size() == kTOTAL) {
|
||||
done.release();
|
||||
}
|
||||
});
|
||||
EXPECT_CALL(*mockLoaderPtr_, load(testing::_))
|
||||
.Times(kTOTAL)
|
||||
.WillRepeatedly([&](LedgerData data) -> std::expected<void, etlng::LoaderError> {
|
||||
loaded.push_back(data.seq);
|
||||
if (loaded.size() == kTOTAL)
|
||||
done.release();
|
||||
|
||||
EXPECT_CALL(*mockMonitorPtr_, notifyLedgerLoaded(testing::_)).Times(kTOTAL);
|
||||
return {};
|
||||
});
|
||||
|
||||
EXPECT_CALL(*mockMonitorPtr_, notifySequenceLoaded(testing::_)).Times(kTOTAL);
|
||||
|
||||
taskManager_.run(kEXTRACTORS);
|
||||
done.acquire();
|
||||
taskManager_.stop();
|
||||
|
||||
EXPECT_EQ(loaded.size(), kTOTAL);
|
||||
for (std::size_t i = 0; i < loaded.size(); ++i) {
|
||||
for (std::size_t i = 0; i < loaded.size(); ++i)
|
||||
EXPECT_EQ(loaded[i], kSEQ + i);
|
||||
}
|
||||
|
||||
TEST_F(TaskManagerTests, WriteConflictHandling)
|
||||
{
|
||||
static constexpr auto kTOTAL = 64uz;
|
||||
static constexpr auto kCONFLICT_AFTER = 32uz; // Conflict after 32 ledgers
|
||||
static constexpr auto kEXTRACTORS = 4uz;
|
||||
|
||||
std::atomic_uint32_t seq = kSEQ;
|
||||
std::vector<uint32_t> loaded;
|
||||
std::binary_semaphore done{0};
|
||||
bool conflictOccurred = false;
|
||||
|
||||
EXPECT_CALL(*mockSchedulerPtr_, next()).WillRepeatedly([&]() {
|
||||
return Task{.priority = Task::Priority::Higher, .seq = seq++};
|
||||
});
|
||||
|
||||
EXPECT_CALL(*mockExtractorPtr_, extractLedgerWithDiff(testing::_))
|
||||
.WillRepeatedly([](uint32_t seq) -> std::optional<LedgerData> {
|
||||
if (seq > kSEQ + kTOTAL - 1)
|
||||
return std::nullopt;
|
||||
|
||||
return createTestData(seq);
|
||||
});
|
||||
|
||||
// First kCONFLICT_AFTER calls succeed, then we get a write conflict
|
||||
EXPECT_CALL(*mockLoaderPtr_, load(testing::_))
|
||||
.WillRepeatedly([&](LedgerData data) -> std::expected<void, etlng::LoaderError> {
|
||||
loaded.push_back(data.seq);
|
||||
|
||||
if (loaded.size() == kCONFLICT_AFTER) {
|
||||
conflictOccurred = true;
|
||||
done.release();
|
||||
return std::unexpected(etlng::LoaderError::WriteConflict);
|
||||
}
|
||||
|
||||
if (loaded.size() == kTOTAL)
|
||||
done.release();
|
||||
|
||||
return {};
|
||||
});
|
||||
|
||||
EXPECT_CALL(*mockMonitorPtr_, notifySequenceLoaded(testing::_)).Times(kCONFLICT_AFTER - 1);
|
||||
EXPECT_CALL(*mockMonitorPtr_, notifyWriteConflict(kSEQ + kCONFLICT_AFTER - 1));
|
||||
|
||||
taskManager_.run(kEXTRACTORS);
|
||||
done.acquire();
|
||||
taskManager_.stop();
|
||||
|
||||
EXPECT_EQ(loaded.size(), kCONFLICT_AFTER);
|
||||
EXPECT_TRUE(conflictOccurred);
|
||||
|
||||
for (std::size_t i = 0; i < loaded.size(); ++i)
|
||||
EXPECT_EQ(loaded[i], kSEQ + i);
|
||||
}
|
||||
|
||||
TEST_F(TaskManagerTests, AmendmentBlockedHandling)
|
||||
{
|
||||
static constexpr auto kTOTAL = 64uz;
|
||||
static constexpr auto kAMENDMENT_BLOCKED_AFTER = 20uz; // Amendment block after 20 ledgers
|
||||
static constexpr auto kEXTRACTORS = 2uz;
|
||||
|
||||
std::atomic_uint32_t seq = kSEQ;
|
||||
std::vector<uint32_t> loaded;
|
||||
std::binary_semaphore done{0};
|
||||
bool amendmentBlockedOccurred = false;
|
||||
|
||||
EXPECT_CALL(*mockSchedulerPtr_, next()).WillRepeatedly([&]() {
|
||||
return Task{.priority = Task::Priority::Higher, .seq = seq++};
|
||||
});
|
||||
|
||||
EXPECT_CALL(*mockExtractorPtr_, extractLedgerWithDiff(testing::_))
|
||||
.WillRepeatedly([](uint32_t seq) -> std::optional<LedgerData> {
|
||||
if (seq > kSEQ + kTOTAL - 1)
|
||||
return std::nullopt;
|
||||
|
||||
return createTestData(seq);
|
||||
});
|
||||
|
||||
EXPECT_CALL(*mockLoaderPtr_, load(testing::_))
|
||||
.WillRepeatedly([&](LedgerData data) -> std::expected<void, etlng::LoaderError> {
|
||||
loaded.push_back(data.seq);
|
||||
|
||||
if (loaded.size() == kAMENDMENT_BLOCKED_AFTER) {
|
||||
amendmentBlockedOccurred = true;
|
||||
done.release();
|
||||
return std::unexpected(etlng::LoaderError::AmendmentBlocked);
|
||||
}
|
||||
|
||||
if (loaded.size() == kTOTAL)
|
||||
done.release();
|
||||
|
||||
return {};
|
||||
});
|
||||
|
||||
EXPECT_CALL(*mockMonitorPtr_, notifySequenceLoaded(testing::_)).Times(kAMENDMENT_BLOCKED_AFTER - 1);
|
||||
EXPECT_CALL(*mockMonitorPtr_, notifyWriteConflict(testing::_)).Times(0);
|
||||
|
||||
taskManager_.run(kEXTRACTORS);
|
||||
done.acquire();
|
||||
taskManager_.stop();
|
||||
|
||||
EXPECT_EQ(loaded.size(), kAMENDMENT_BLOCKED_AFTER);
|
||||
EXPECT_TRUE(amendmentBlockedOccurred);
|
||||
|
||||
for (std::size_t i = 0; i < loaded.size(); ++i)
|
||||
EXPECT_EQ(loaded[i], kSEQ + i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ TEST_F(RPCAMMInfoHandlerTest, AccountNotFound)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(accountKey, testing::_, testing::_))
|
||||
.WillByDefault(Return(accountRoot.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}",
|
||||
"account": "{}"
|
||||
@@ -205,7 +205,7 @@ TEST_F(RPCAMMInfoHandlerTest, AMMAccountNotExist)
|
||||
ON_CALL(*backend_, fetchLedgerBySequence).WillByDefault(Return(lgrInfo));
|
||||
ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -228,7 +228,7 @@ TEST_F(RPCAMMInfoHandlerTest, AMMAccountNotInDBIsMalformed)
|
||||
ON_CALL(*backend_, fetchLedgerBySequence).WillByDefault(Return(lgrInfo));
|
||||
ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -254,7 +254,7 @@ TEST_F(RPCAMMInfoHandlerTest, AMMAccountNotFoundMissingAmmField)
|
||||
ON_CALL(*backend_, fetchLedgerBySequence).WillByDefault(Return(lgrInfo));
|
||||
ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(accountRoot.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -289,7 +289,7 @@ TEST_F(RPCAMMInfoHandlerTest, AMMAccountAmmBlobNotFound)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(ammKeylet.key, testing::_, testing::_))
|
||||
.WillByDefault(Return(std::optional<Blob>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -328,7 +328,7 @@ TEST_F(RPCAMMInfoHandlerTest, AMMAccountAccBlobNotFound)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(account2Key, testing::_, testing::_))
|
||||
.WillByDefault(Return(std::optional<Blob>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -373,7 +373,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathMinimalFirstXRPNoTrustline)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(feesKey, kSEQ, _)).WillByDefault(Return(feesObj));
|
||||
ON_CALL(*backend_, doFetchLedgerObject(issue2LineKey, kSEQ, _)).WillByDefault(Return(std::optional<Blob>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -453,7 +453,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathWithAccount)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(accountHoldsKeylet.key, kSEQ, _))
|
||||
.WillByDefault(Return(trustline.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}",
|
||||
"account": "{}"
|
||||
@@ -527,7 +527,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathMinimalSecondXRPNoTrustline)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(feesKey, kSEQ, _)).WillByDefault(Return(feesObj));
|
||||
ON_CALL(*backend_, doFetchLedgerObject(issue2LineKey, kSEQ, _)).WillByDefault(Return(std::optional<Blob>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -597,7 +597,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathNonXRPNoTrustlines)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(feesKey, kSEQ, _)).WillByDefault(Return(feesObj));
|
||||
ON_CALL(*backend_, doFetchLedgerObject(issue2LineKey, kSEQ, _)).WillByDefault(Return(std::optional<Blob>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -686,7 +686,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathFrozen)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(issue2LineKey, kSEQ, _))
|
||||
.WillByDefault(Return(trustline2BalanceFrozen.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -776,7 +776,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathFrozenIssuer)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(issue2LineKey, kSEQ, _))
|
||||
.WillByDefault(Return(trustline2BalanceFrozen.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -858,7 +858,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathWithTrustline)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(issue2LineKey, kSEQ, _))
|
||||
.WillByDefault(Return(trustlineBalance.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -935,7 +935,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathWithVoteSlots)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(issue2LineKey, kSEQ, _))
|
||||
.WillByDefault(Return(trustlineBalance.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -1028,7 +1028,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathWithAuctionSlot)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(issue2LineKey, kSEQ, _))
|
||||
.WillByDefault(Return(trustlineBalance.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"amm_account": "{}"
|
||||
}})JSON",
|
||||
@@ -1116,7 +1116,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathWithAssetsMatchingInputOrder)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(ammKeylet.key, testing::_, testing::_))
|
||||
.WillByDefault(Return(ammObj.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"asset": {{
|
||||
"currency": "JPY",
|
||||
@@ -1226,7 +1226,7 @@ TEST_F(RPCAMMInfoHandlerTest, HappyPathWithAssetsPreservesInputOrder)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(ammKeylet.key, testing::_, testing::_))
|
||||
.WillByDefault(Return(ammObj.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"asset": {{
|
||||
"currency": "USD",
|
||||
|
||||
@@ -73,7 +73,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, AccountNotExist)
|
||||
ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -95,7 +95,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaIntSequence)
|
||||
// return empty ledgerHeader
|
||||
ON_CALL(*backend_, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -119,7 +119,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaStringSequence)
|
||||
// return empty ledgerHeader
|
||||
ON_CALL(*backend_, fetchLedgerBySequence(12, _)).WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_index":"{}"
|
||||
@@ -144,7 +144,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, LedgerNonExistViaHash)
|
||||
ON_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _))
|
||||
.WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_hash":"{}"
|
||||
@@ -210,7 +210,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, DefaultParameter)
|
||||
|
||||
ON_CALL(*backend_, doFetchLedgerObjects).WillByDefault(Return(bbs));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).Times(1);
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -245,7 +245,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderHash)
|
||||
|
||||
ON_CALL(*backend_, doFetchLedgerObjects).WillByDefault(Return(bbs));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).Times(1);
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_hash":"{}"
|
||||
@@ -282,7 +282,7 @@ TEST_F(RPCAccountCurrenciesHandlerTest, RequestViaLegderSeq)
|
||||
|
||||
ON_CALL(*backend_, doFetchLedgerObjects).WillByDefault(Return(bbs));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).Times(1);
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_index":{}
|
||||
|
||||
@@ -187,7 +187,7 @@ TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaIntSequence)
|
||||
// return empty ledgerHeader
|
||||
ON_CALL(*backend_, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index": 30
|
||||
@@ -210,7 +210,7 @@ TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaStringSequence)
|
||||
// return empty ledgerHeader
|
||||
ON_CALL(*backend_, fetchLedgerBySequence(30, _)).WillByDefault(Return(std::nullopt));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index": "30"
|
||||
@@ -234,7 +234,7 @@ TEST_F(RPCAccountInfoHandlerTest, LedgerNonExistViaHash)
|
||||
ON_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _))
|
||||
.WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_hash": "{}"
|
||||
@@ -261,7 +261,7 @@ TEST_F(RPCAccountInfoHandlerTest, AccountNotExist)
|
||||
ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}"
|
||||
}})JSON",
|
||||
@@ -287,7 +287,7 @@ TEST_F(RPCAccountInfoHandlerTest, AccountInvalid)
|
||||
ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}"
|
||||
}})JSON",
|
||||
@@ -321,7 +321,7 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsInvalid)
|
||||
EXPECT_CALL(*mockAmendmentCenterPtr_, isEnabled(_, Amendments::Clawback, _)).WillOnce(Return(false));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"signer_lists": true
|
||||
@@ -424,7 +424,7 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsTrueV2)
|
||||
EXPECT_CALL(*mockAmendmentCenterPtr_, isEnabled(_, Amendments::Clawback, _)).WillOnce(Return(false));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"signer_lists": true
|
||||
@@ -525,7 +525,7 @@ TEST_F(RPCAccountInfoHandlerTest, SignerListsTrueV1)
|
||||
EXPECT_CALL(*mockAmendmentCenterPtr_, isEnabled(_, Amendments::Clawback, _)).WillOnce(Return(false));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"signer_lists": true
|
||||
@@ -599,7 +599,7 @@ TEST_F(RPCAccountInfoHandlerTest, Flags)
|
||||
EXPECT_CALL(*mockAmendmentCenterPtr_, isEnabled(_, Amendments::Clawback, _)).WillOnce(Return(false));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}"
|
||||
}})JSON",
|
||||
@@ -628,7 +628,7 @@ TEST_F(RPCAccountInfoHandlerTest, IdentAndSignerListsFalse)
|
||||
EXPECT_CALL(*mockAmendmentCenterPtr_, isEnabled(_, Amendments::Clawback, _)).WillOnce(Return(false));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"ident": "{}"
|
||||
}})JSON",
|
||||
@@ -706,7 +706,7 @@ TEST_F(RPCAccountInfoHandlerTest, DisallowIncoming)
|
||||
EXPECT_CALL(*mockAmendmentCenterPtr_, isEnabled(_, Amendments::Clawback, _)).WillOnce(Return(false));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}"
|
||||
}})JSON",
|
||||
@@ -780,7 +780,7 @@ TEST_F(RPCAccountInfoHandlerTest, Clawback)
|
||||
EXPECT_CALL(*mockAmendmentCenterPtr_, isEnabled(_, Amendments::Clawback, _)).WillOnce(Return(true));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}"
|
||||
}})JSON",
|
||||
|
||||
@@ -181,7 +181,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LedgerNotFoundViaHash)
|
||||
ON_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _))
|
||||
.WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_hash":"{}"
|
||||
@@ -207,7 +207,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LedgerNotFoundViaStringIndex)
|
||||
// return empty ledgerHeader
|
||||
ON_CALL(*backend_, fetchLedgerBySequence(kSEQ, _)).WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_index":"{}"
|
||||
@@ -233,7 +233,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LedgerNotFoundViaIntIndex)
|
||||
// return empty ledgerHeader
|
||||
ON_CALL(*backend_, fetchLedgerBySequence(kSEQ, _)).WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_index":{}
|
||||
@@ -260,7 +260,7 @@ TEST_F(RPCAccountNFTsHandlerTest, AccountNotFound)
|
||||
ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -323,7 +323,7 @@ TEST_F(RPCAccountNFTsHandlerTest, NormalPath)
|
||||
.WillByDefault(Return(pageObject.getSerializer().peekData()));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -357,7 +357,7 @@ TEST_F(RPCAccountNFTsHandlerTest, Limit)
|
||||
.WillByDefault(Return(pageObject.getSerializer().peekData()));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1 + kLIMIT);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
@@ -392,7 +392,7 @@ TEST_F(RPCAccountNFTsHandlerTest, Marker)
|
||||
.WillByDefault(Return(pageObject.getSerializer().peekData()));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{}"
|
||||
@@ -419,7 +419,7 @@ TEST_F(RPCAccountNFTsHandlerTest, InvalidMarker)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(ripple::keylet::account(accountID).key, 30, _))
|
||||
.WillByDefault(Return(accountObject.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{}"
|
||||
@@ -448,7 +448,7 @@ TEST_F(RPCAccountNFTsHandlerTest, AccountWithNoNFT)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(ripple::keylet::account(accountID).key, 30, _))
|
||||
.WillByDefault(Return(accountObject.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -480,7 +480,7 @@ TEST_F(RPCAccountNFTsHandlerTest, invalidPage)
|
||||
.WillByDefault(Return(accountObject.getSerializer().peekData()));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{}"
|
||||
@@ -546,7 +546,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LimitLessThanMin)
|
||||
.WillByDefault(Return(pageObject.getSerializer().peekData()));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
@@ -610,7 +610,7 @@ TEST_F(RPCAccountNFTsHandlerTest, LimitMoreThanMax)
|
||||
.WillByDefault(Return(pageObject.getSerializer().peekData()));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
|
||||
@@ -223,7 +223,7 @@ TEST_F(RPCAccountObjectsHandlerTest, LedgerNonExistViaIntSequence)
|
||||
// return empty ledgerHeader
|
||||
EXPECT_CALL(*backend_, fetchLedgerBySequence(kMAX_SEQ, _)).WillOnce(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_index":30
|
||||
@@ -245,7 +245,7 @@ TEST_F(RPCAccountObjectsHandlerTest, LedgerNonExistViaStringSequence)
|
||||
// return empty ledgerHeader
|
||||
EXPECT_CALL(*backend_, fetchLedgerBySequence(kMAX_SEQ, _)).WillOnce(Return(std::nullopt));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_index":"30"
|
||||
@@ -268,7 +268,7 @@ TEST_F(RPCAccountObjectsHandlerTest, LedgerNonExistViaHash)
|
||||
EXPECT_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _))
|
||||
.WillOnce(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_hash":"{}"
|
||||
@@ -293,7 +293,7 @@ TEST_F(RPCAccountObjectsHandlerTest, AccountNotExist)
|
||||
EXPECT_CALL(*backend_, fetchLedgerBySequence).WillOnce(Return(ledgerHeader));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).WillOnce(Return(std::optional<Blob>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -365,7 +365,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DefaultParameterNoNFTFound)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -409,7 +409,7 @@ TEST_F(RPCAccountObjectsHandlerTest, Limit)
|
||||
}
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
@@ -453,7 +453,7 @@ TEST_F(RPCAccountObjectsHandlerTest, Marker)
|
||||
}
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{},{}"
|
||||
@@ -507,7 +507,7 @@ TEST_F(RPCAccountObjectsHandlerTest, MultipleDirNoNFT)
|
||||
}
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
@@ -561,7 +561,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilter)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"type":"offer"
|
||||
@@ -605,7 +605,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilterAmmType)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"type": "amm"
|
||||
@@ -658,7 +658,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilterReturnEmpty)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"type": "check"
|
||||
@@ -713,7 +713,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilter)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"deletion_blockers_only": true
|
||||
@@ -756,7 +756,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilterWithTypeFilter)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"deletion_blockers_only": true,
|
||||
@@ -818,7 +818,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilterEmptyResult)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"deletion_blockers_only": true
|
||||
@@ -878,7 +878,7 @@ TEST_F(RPCAccountObjectsHandlerTest, DeletionBlockersOnlyFilterWithIncompatibleT
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"deletion_blockers_only": true,
|
||||
@@ -992,7 +992,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMixOtherObjects)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -1031,7 +1031,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTReachLimitReturnMarker)
|
||||
current = previous;
|
||||
}
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
@@ -1080,7 +1080,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTReachLimitNoMarker)
|
||||
);
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject(current, 30, _)).WillOnce(Return(nftpage11.getSerializer().peekData()));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
@@ -1158,7 +1158,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarker)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{},{}"
|
||||
@@ -1214,7 +1214,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarkerNoMoreNFT)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{},{}"
|
||||
@@ -1242,7 +1242,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarkerNotInRange)
|
||||
auto const accountKk = ripple::keylet::account(account).key;
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject(accountKk, kMAX_SEQ, _)).WillOnce(Return(Blob{'f', 'a', 'k', 'e'}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"marker" : "{},{}"
|
||||
@@ -1275,7 +1275,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTMarkerNotExist)
|
||||
auto const accountNftMax = ripple::keylet::nftpage_max(account).key;
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject(accountNftMax, kMAX_SEQ, _)).WillOnce(Return(std::nullopt));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"marker" : "{},{}"
|
||||
@@ -1349,7 +1349,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTLimitAdjust)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{},{}",
|
||||
@@ -1445,7 +1445,7 @@ TEST_F(RPCAccountObjectsHandlerTest, FilterNFT)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"type": "nft_page"
|
||||
@@ -1486,7 +1486,7 @@ TEST_F(RPCAccountObjectsHandlerTest, NFTZeroMarkerNotAffectOtherMarker)
|
||||
}
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{},
|
||||
@@ -1566,7 +1566,7 @@ TEST_F(RPCAccountObjectsHandlerTest, LimitLessThanMin)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit": {}
|
||||
@@ -1642,7 +1642,7 @@ TEST_F(RPCAccountObjectsHandlerTest, LimitMoreThanMax)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit": {}
|
||||
@@ -1684,7 +1684,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilterMPTIssuanceType)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"type": "mpt_issuance"
|
||||
@@ -1733,7 +1733,7 @@ TEST_F(RPCAccountObjectsHandlerTest, TypeFilterMPTokenType)
|
||||
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).WillOnce(Return(bbs));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"type": "mptoken"
|
||||
|
||||
@@ -176,7 +176,7 @@ TEST_F(RPCAccountOffersHandlerTest, LedgerNotFoundViaHash)
|
||||
ON_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _))
|
||||
.WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_hash":"{}"
|
||||
@@ -202,7 +202,7 @@ TEST_F(RPCAccountOffersHandlerTest, LedgerNotFoundViaStringIndex)
|
||||
// return empty ledgerHeader
|
||||
ON_CALL(*backend_, fetchLedgerBySequence(kSEQ, _)).WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_index":"{}"
|
||||
@@ -228,7 +228,7 @@ TEST_F(RPCAccountOffersHandlerTest, LedgerNotFoundViaIntIndex)
|
||||
// return empty ledgerHeader
|
||||
ON_CALL(*backend_, fetchLedgerBySequence(kSEQ, _)).WillByDefault(Return(std::optional<ripple::LedgerHeader>{}));
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"ledger_index":{}
|
||||
@@ -255,7 +255,7 @@ TEST_F(RPCAccountOffersHandlerTest, AccountNotFound)
|
||||
ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional<Blob>{}));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -332,7 +332,7 @@ TEST_F(RPCAccountOffersHandlerTest, DefaultParams)
|
||||
ON_CALL(*backend_, doFetchLedgerObjects).WillByDefault(Return(bbs));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}"
|
||||
}})JSON",
|
||||
@@ -380,7 +380,7 @@ TEST_F(RPCAccountOffersHandlerTest, Limit)
|
||||
ON_CALL(*backend_, doFetchLedgerObjects).WillByDefault(Return(bbs));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":10
|
||||
@@ -433,7 +433,7 @@ TEST_F(RPCAccountOffersHandlerTest, Marker)
|
||||
ON_CALL(*backend_, doFetchLedgerObjects).WillByDefault(Return(bbs));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{},{}"
|
||||
@@ -469,7 +469,7 @@ TEST_F(RPCAccountOffersHandlerTest, MarkerNotExists)
|
||||
ON_CALL(*backend_, doFetchLedgerObject(hintIndex, kLEDGER_SEQ, _)).WillByDefault(Return(std::nullopt));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(2);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"marker":"{},{}"
|
||||
@@ -527,7 +527,7 @@ TEST_F(RPCAccountOffersHandlerTest, LimitLessThanMin)
|
||||
ON_CALL(*backend_, doFetchLedgerObjects).WillByDefault(Return(bbs));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
@@ -582,7 +582,7 @@ TEST_F(RPCAccountOffersHandlerTest, LimitMoreThanMax)
|
||||
ON_CALL(*backend_, doFetchLedgerObjects).WillByDefault(Return(bbs));
|
||||
EXPECT_CALL(*backend_, doFetchLedgerObjects).Times(1);
|
||||
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account":"{}",
|
||||
"limit":{}
|
||||
|
||||
@@ -512,7 +512,7 @@ TEST_F(RPCAccountTxHandlerTest, IndexSpecificForwardTrue)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -554,7 +554,7 @@ TEST_F(RPCAccountTxHandlerTest, IndexSpecificForwardFalse)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -596,7 +596,7 @@ TEST_F(RPCAccountTxHandlerTest, IndexNotSpecificForwardTrue)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -638,7 +638,7 @@ TEST_F(RPCAccountTxHandlerTest, IndexNotSpecificForwardFalse)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -678,7 +678,7 @@ TEST_F(RPCAccountTxHandlerTest, BinaryTrue)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -732,7 +732,7 @@ TEST_F(RPCAccountTxHandlerTest, BinaryTrueV2)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -784,7 +784,7 @@ TEST_F(RPCAccountTxHandlerTest, LimitAndMarker)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -818,7 +818,7 @@ TEST_F(RPCAccountTxHandlerTest, LimitIsCapped)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -850,7 +850,7 @@ TEST_F(RPCAccountTxHandlerTest, LimitAllowedUpToCap)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -898,7 +898,7 @@ TEST_F(RPCAccountTxHandlerTest, SpecificLedgerIndex)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index": {}
|
||||
@@ -924,7 +924,7 @@ TEST_F(RPCAccountTxHandlerTest, SpecificNonexistLedgerIntIndex)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index": {}
|
||||
@@ -947,7 +947,7 @@ TEST_F(RPCAccountTxHandlerTest, SpecificNonexistLedgerStringIndex)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index": "{}"
|
||||
@@ -989,7 +989,7 @@ TEST_F(RPCAccountTxHandlerTest, SpecificLedgerHash)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_hash": "{}"
|
||||
@@ -1033,7 +1033,7 @@ TEST_F(RPCAccountTxHandlerTest, SpecificLedgerIndexValidated)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index": "validated"
|
||||
@@ -1071,7 +1071,7 @@ TEST_F(RPCAccountTxHandlerTest, TxLessThanMinSeq)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -1113,7 +1113,7 @@ TEST_F(RPCAccountTxHandlerTest, TxLargerThanMaxSeq)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -1355,7 +1355,7 @@ TEST_F(RPCAccountTxHandlerTest, NFTTxs_API_v1)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
@@ -1603,7 +1603,7 @@ TEST_F(RPCAccountTxHandlerTest, NFTTxs_API_v2)
|
||||
|
||||
runSpawn([&, this](auto yield) {
|
||||
auto const handler = AnyHandler{AccountTxHandler{backend_, mockETLServicePtr_}};
|
||||
auto static const kINPUT = json::parse(fmt::format(
|
||||
static auto const kINPUT = json::parse(fmt::format(
|
||||
R"JSON({{
|
||||
"account": "{}",
|
||||
"ledger_index_min": {},
|
||||
|
||||
@@ -74,11 +74,11 @@ using ::testing::Types;
|
||||
using namespace rpc;
|
||||
using TestServerInfoHandler = BaseServerInfoHandler<MockCounters>;
|
||||
|
||||
constexpr static auto kINDEX1 = "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD";
|
||||
constexpr static auto kAMM_ACCOUNT = "rLcS7XL6nxRAi7JcbJcn1Na179oF3vdfbh";
|
||||
constexpr static auto kACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn";
|
||||
constexpr static auto kNFT_ID = "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004";
|
||||
constexpr static auto kCURRENCY = "0158415500000000C1F76FF6ECB0BAC600000000";
|
||||
static constexpr auto kINDEX1 = "05FB0EB4B899F056FA095537C5817163801F544BAFCEA39C995D76DB4D16F9DD";
|
||||
static constexpr auto kAMM_ACCOUNT = "rLcS7XL6nxRAi7JcbJcn1Na179oF3vdfbh";
|
||||
static constexpr auto kACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn";
|
||||
static constexpr auto kNFT_ID = "00010000A7CAD27B688D14BA1A9FA5366554D6ADCF9CE0875B974D9F00000004";
|
||||
static constexpr auto kCURRENCY = "0158415500000000C1F76FF6ECB0BAC600000000";
|
||||
|
||||
using AnyHandlerType = Types<
|
||||
AccountChannelsHandler,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user