mirror of
https://github.com/XRPLF/clio.git
synced 2026-02-04 14:05:27 +00:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e8c977e7de | ||
|
|
c59fcf343f | ||
|
|
c35649eb6e | ||
|
|
4da4b49eda |
@@ -2,6 +2,15 @@ _help_parse: Options affecting listfile parsing
|
|||||||
parse:
|
parse:
|
||||||
_help_additional_commands:
|
_help_additional_commands:
|
||||||
- Specify structure for custom cmake functions
|
- Specify structure for custom cmake functions
|
||||||
|
additional_commands:
|
||||||
|
foo:
|
||||||
|
flags:
|
||||||
|
- BAR
|
||||||
|
- BAZ
|
||||||
|
kwargs:
|
||||||
|
HEADERS: "*"
|
||||||
|
SOURCES: "*"
|
||||||
|
DEPENDS: "*"
|
||||||
_help_override_spec:
|
_help_override_spec:
|
||||||
- Override configurations per-command where available
|
- Override configurations per-command where available
|
||||||
override_spec: {}
|
override_spec: {}
|
||||||
@@ -21,7 +30,7 @@ format:
|
|||||||
line_width: 120
|
line_width: 120
|
||||||
_help_tab_size:
|
_help_tab_size:
|
||||||
- How many spaces to tab for indent
|
- How many spaces to tab for indent
|
||||||
tab_size: 4
|
tab_size: 2
|
||||||
_help_use_tabchars:
|
_help_use_tabchars:
|
||||||
- If true, lines are indented using tab characters (utf-8
|
- If true, lines are indented using tab characters (utf-8
|
||||||
- 0x09) instead of <tab_size> space characters (utf-8 0x20).
|
- 0x09) instead of <tab_size> space characters (utf-8 0x20).
|
||||||
@@ -61,19 +70,19 @@ format:
|
|||||||
_help_dangle_parens:
|
_help_dangle_parens:
|
||||||
- If a statement is wrapped to more than one line, than dangle
|
- If a statement is wrapped to more than one line, than dangle
|
||||||
- the closing parenthesis on its own line.
|
- the closing parenthesis on its own line.
|
||||||
dangle_parens: false
|
dangle_parens: true
|
||||||
_help_dangle_align:
|
_help_dangle_align:
|
||||||
- If the trailing parenthesis must be 'dangled' on its on
|
- If the trailing parenthesis must be 'dangled' on its on
|
||||||
- "line, then align it to this reference: `prefix`: the start"
|
- "line, then align it to this reference: `prefix`: the start"
|
||||||
- "of the statement, `prefix-indent`: the start of the"
|
- "of the statement, `prefix-indent`: the start of the"
|
||||||
- "statement, plus one indentation level, `child`: align to"
|
- "statement, plus one indentation level, `child`: align to"
|
||||||
- the column of the arguments
|
- the column of the arguments
|
||||||
dangle_align: prefix
|
dangle_align: prefix
|
||||||
_help_min_prefix_chars:
|
_help_min_prefix_chars:
|
||||||
- If the statement spelling length (including space and
|
- If the statement spelling length (including space and
|
||||||
- parenthesis) is smaller than this amount, then force reject
|
- parenthesis) is smaller than this amount, then force reject
|
||||||
- nested layouts.
|
- nested layouts.
|
||||||
min_prefix_chars: 18
|
min_prefix_chars: 4
|
||||||
_help_max_prefix_chars:
|
_help_max_prefix_chars:
|
||||||
- If the statement spelling length (including space and
|
- If the statement spelling length (including space and
|
||||||
- parenthesis) is larger than the tab width by more than this
|
- parenthesis) is larger than the tab width by more than this
|
||||||
@@ -118,7 +127,7 @@ _help_markup: Options affecting comment reflow and formatting.
|
|||||||
markup:
|
markup:
|
||||||
_help_bullet_char:
|
_help_bullet_char:
|
||||||
- What character to use for bulleted lists
|
- What character to use for bulleted lists
|
||||||
bullet_char: "-"
|
bullet_char: "*"
|
||||||
_help_enum_char:
|
_help_enum_char:
|
||||||
- What character to use as punctuation after numerals in an
|
- What character to use as punctuation after numerals in an
|
||||||
- enumerated list
|
- enumerated list
|
||||||
|
|||||||
@@ -34,14 +34,14 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
|
if: ${{ inputs.push_image == 'true' && inputs.dockerhub_repo != '' }}
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||||
with:
|
with:
|
||||||
username: ${{ env.DOCKERHUB_USER }}
|
username: ${{ env.DOCKERHUB_USER }}
|
||||||
password: ${{ env.DOCKERHUB_PW }}
|
password: ${{ env.DOCKERHUB_PW }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: ${{ inputs.push_image == 'true' }}
|
if: ${{ inputs.push_image == 'true' }}
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
|
|||||||
46
.github/scripts/execute-tests-under-sanitizer.sh
vendored
Executable file
46
.github/scripts/execute-tests-under-sanitizer.sh
vendored
Executable file
@@ -0,0 +1,46 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Note: This script is intended to be run from the root of the repository.
|
||||||
|
#
|
||||||
|
# This script runs each unit-test separately and generates reports from the currently active sanitizer.
|
||||||
|
# Output is saved in ./.sanitizer-report in the root of the repository
|
||||||
|
|
||||||
|
if [[ -z "$1" ]]; then
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
ERROR
|
||||||
|
-----------------------------------------------------------------------------
|
||||||
|
Path to clio_tests should be passed as first argument to the script.
|
||||||
|
-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
TEST_BINARY=$1
|
||||||
|
|
||||||
|
if [[ ! -f "$TEST_BINARY" ]]; then
|
||||||
|
echo "Test binary not found: $TEST_BINARY"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
TESTS=$($TEST_BINARY --gtest_list_tests | awk '/^ / {print suite $1} !/^ / {suite=$1}')
|
||||||
|
|
||||||
|
OUTPUT_DIR="./.sanitizer-report"
|
||||||
|
mkdir -p "$OUTPUT_DIR"
|
||||||
|
|
||||||
|
export TSAN_OPTIONS="die_after_fork=0"
|
||||||
|
export MallocNanoZone='0' # for MacOSX
|
||||||
|
|
||||||
|
for TEST in $TESTS; do
|
||||||
|
OUTPUT_FILE="$OUTPUT_DIR/${TEST//\//_}.log"
|
||||||
|
$TEST_BINARY --gtest_filter="$TEST" >"$OUTPUT_FILE" 2>&1
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "'$TEST' failed a sanitizer check."
|
||||||
|
else
|
||||||
|
rm "$OUTPUT_FILE"
|
||||||
|
fi
|
||||||
|
done
|
||||||
@@ -48,7 +48,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Download Clio binary from artifact
|
- name: Download Clio binary from artifact
|
||||||
if: ${{ inputs.artifact_name != null }}
|
if: ${{ inputs.artifact_name != null }}
|
||||||
|
|||||||
3
.github/workflows/build.yml
vendored
3
.github/workflows/build.yml
vendored
@@ -23,7 +23,6 @@ on:
|
|||||||
- "cmake/**"
|
- "cmake/**"
|
||||||
- "src/**"
|
- "src/**"
|
||||||
- "tests/**"
|
- "tests/**"
|
||||||
- "benchmarks/**"
|
|
||||||
|
|
||||||
- docs/config-description.md
|
- docs/config-description.md
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -101,7 +100,7 @@ jobs:
|
|||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||||
with:
|
with:
|
||||||
|
|||||||
6
.github/workflows/check-libxrpl.yml
vendored
6
.github/workflows/check-libxrpl.yml
vendored
@@ -24,12 +24,12 @@ jobs:
|
|||||||
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
image: ghcr.io/xrplf/clio-ci:14342e087ceb8b593027198bf9ef06a43833c696
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
enable_ccache: false
|
||||||
|
|
||||||
@@ -92,7 +92,7 @@ jobs:
|
|||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Create an issue
|
- name: Create an issue
|
||||||
uses: ./.github/actions/create-issue
|
uses: ./.github/actions/create-issue
|
||||||
|
|||||||
6
.github/workflows/clang-tidy.yml
vendored
6
.github/workflows/clang-tidy.yml
vendored
@@ -39,12 +39,12 @@ jobs:
|
|||||||
pull-requests: write
|
pull-requests: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
enable_ccache: false
|
||||||
|
|
||||||
@@ -107,7 +107,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Create PR with fixes
|
- name: Create PR with fixes
|
||||||
if: ${{ steps.files_changed.outcome != 'success' && github.event_name != 'pull_request' }}
|
if: ${{ steps.files_changed.outcome != 'success' && github.event_name != 'pull_request' }}
|
||||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||||
env:
|
env:
|
||||||
GH_REPO: ${{ github.repository }}
|
GH_REPO: ${{ github.repository }}
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
|||||||
4
.github/workflows/docs.yml
vendored
4
.github/workflows/docs.yml
vendored
@@ -22,12 +22,12 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
with:
|
with:
|
||||||
lfs: true
|
lfs: true
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
enable_ccache: false
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/nightly.yml
vendored
2
.github/workflows/nightly.yml
vendored
@@ -169,7 +169,7 @@ jobs:
|
|||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Create an issue
|
- name: Create an issue
|
||||||
uses: ./.github/actions/create-issue
|
uses: ./.github/actions/create-issue
|
||||||
|
|||||||
2
.github/workflows/pre-commit-autoupdate.yml
vendored
2
.github/workflows/pre-commit-autoupdate.yml
vendored
@@ -12,7 +12,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
auto-update:
|
auto-update:
|
||||||
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@8b19d3462e52cd8ea4d76b4c8d0f7533e7469c15
|
uses: XRPLF/actions/.github/workflows/pre-commit-autoupdate.yml@ad4ab1ae5a54a4bab0e87294c31fc0729f788b2b
|
||||||
with:
|
with:
|
||||||
sign_commit: true
|
sign_commit: true
|
||||||
committer: "Clio CI <skuznetsov@ripple.com>"
|
committer: "Clio CI <skuznetsov@ripple.com>"
|
||||||
|
|||||||
2
.github/workflows/pre-commit.yml
vendored
2
.github/workflows/pre-commit.yml
vendored
@@ -8,7 +8,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run-hooks:
|
run-hooks:
|
||||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@320be44621ca2a080f05aeb15817c44b84518108
|
uses: XRPLF/actions/.github/workflows/pre-commit.yml@282890f46d6921249d5659dd38babcb0bd8aef48
|
||||||
with:
|
with:
|
||||||
runs_on: heavy
|
runs_on: heavy
|
||||||
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
container: '{ "image": "ghcr.io/xrplf/clio-pre-commit:14342e087ceb8b593027198bf9ef06a43833c696" }'
|
||||||
|
|||||||
8
.github/workflows/reusable-build.yml
vendored
8
.github/workflows/reusable-build.yml
vendored
@@ -90,12 +90,12 @@ jobs:
|
|||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||||
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
|
||||||
with:
|
with:
|
||||||
enable_ccache: ${{ inputs.download_ccache }}
|
enable_ccache: ${{ inputs.download_ccache }}
|
||||||
|
|
||||||
@@ -113,7 +113,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Restore ccache cache
|
- name: Restore ccache cache
|
||||||
if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }}
|
if: ${{ inputs.download_ccache && github.ref != 'refs/heads/develop' }}
|
||||||
uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||||
with:
|
with:
|
||||||
path: ${{ env.CCACHE_DIR }}
|
path: ${{ env.CCACHE_DIR }}
|
||||||
key: ${{ steps.cache_key.outputs.key }}
|
key: ${{ steps.cache_key.outputs.key }}
|
||||||
@@ -164,7 +164,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Save ccache cache
|
- name: Save ccache cache
|
||||||
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
|
if: ${{ inputs.upload_ccache && github.ref == 'refs/heads/develop' }}
|
||||||
uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||||
with:
|
with:
|
||||||
path: ${{ env.CCACHE_DIR }}
|
path: ${{ env.CCACHE_DIR }}
|
||||||
key: ${{ steps.cache_key.outputs.key }}
|
key: ${{ steps.cache_key.outputs.key }}
|
||||||
|
|||||||
4
.github/workflows/reusable-release.yml
vendored
4
.github/workflows/reusable-release.yml
vendored
@@ -55,12 +55,12 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
enable_ccache: false
|
||||||
|
|
||||||
|
|||||||
60
.github/workflows/reusable-test.yml
vendored
60
.github/workflows/reusable-test.yml
vendored
@@ -45,12 +45,16 @@ jobs:
|
|||||||
|
|
||||||
if: ${{ inputs.run_unit_tests }}
|
if: ${{ inputs.run_unit_tests }}
|
||||||
|
|
||||||
|
env:
|
||||||
|
# TODO: remove completely when we have fixed all currently existing issues with sanitizers
|
||||||
|
SANITIZER_IGNORE_ERRORS: ${{ endsWith(inputs.conan_profile, '.tsan') }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Cleanup workspace
|
- name: Cleanup workspace
|
||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||||
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -61,13 +65,34 @@ jobs:
|
|||||||
- name: Make clio_tests executable
|
- name: Make clio_tests executable
|
||||||
run: chmod +x ./clio_tests
|
run: chmod +x ./clio_tests
|
||||||
|
|
||||||
- name: Run clio_tests
|
- name: Run clio_tests (regular)
|
||||||
continue-on-error: true
|
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'false' }}
|
||||||
id: run_clio_tests
|
|
||||||
run: ./clio_tests
|
run: ./clio_tests
|
||||||
|
|
||||||
|
- name: Run clio_tests (sanitizer errors ignored)
|
||||||
|
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
|
||||||
|
run: ./.github/scripts/execute-tests-under-sanitizer.sh ./clio_tests
|
||||||
|
|
||||||
|
- name: Check for sanitizer report
|
||||||
|
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' }}
|
||||||
|
id: check_report
|
||||||
|
run: |
|
||||||
|
if ls .sanitizer-report/* 1> /dev/null 2>&1; then
|
||||||
|
echo "found_report=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "found_report=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload sanitizer report
|
||||||
|
if: ${{ env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
|
||||||
|
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||||
|
with:
|
||||||
|
name: sanitizer_report_${{ runner.os }}_${{ inputs.build_type }}_${{ inputs.conan_profile }}
|
||||||
|
path: .sanitizer-report/*
|
||||||
|
include-hidden-files: true
|
||||||
|
|
||||||
- name: Create an issue
|
- name: Create an issue
|
||||||
if: ${{ steps.run_clio_tests.outcome == 'failure' && endsWith(inputs.conan_profile, 'san') }}
|
if: ${{ false && env.SANITIZER_IGNORE_ERRORS == 'true' && steps.check_report.outputs.found_report == 'true' }}
|
||||||
uses: ./.github/actions/create-issue
|
uses: ./.github/actions/create-issue
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
@@ -75,13 +100,10 @@ jobs:
|
|||||||
labels: "bug"
|
labels: "bug"
|
||||||
title: "[${{ inputs.conan_profile }}] reported issues"
|
title: "[${{ inputs.conan_profile }}] reported issues"
|
||||||
body: >
|
body: >
|
||||||
Clio tests failed one or more sanitizer checks when built with `${{ inputs.conan_profile }}`.
|
Clio tests failed one or more sanitizer checks when built with ${{ inputs.conan_profile }}`.
|
||||||
|
|
||||||
Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
Workflow: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/
|
||||||
|
Reports are available as artifacts.
|
||||||
- name: Fail the job if clio_tests failed
|
|
||||||
if: ${{ steps.run_clio_tests.outcome == 'failure' }}
|
|
||||||
run: exit 1
|
|
||||||
|
|
||||||
integration_tests:
|
integration_tests:
|
||||||
name: Integration testing
|
name: Integration testing
|
||||||
@@ -104,17 +126,11 @@ jobs:
|
|||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||||
|
|
||||||
- name: Delete and start colima (macOS)
|
- name: Spin up scylladb
|
||||||
# This is a temporary workaround for colima issues on macOS runners
|
|
||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
|
timeout-minutes: 3
|
||||||
run: |
|
run: |
|
||||||
colima delete --force
|
docker rm --force scylladb || true
|
||||||
colima start
|
|
||||||
|
|
||||||
- name: Spin up scylladb (macOS)
|
|
||||||
if: ${{ runner.os == 'macOS' }}
|
|
||||||
timeout-minutes: 1
|
|
||||||
run: |
|
|
||||||
docker run \
|
docker run \
|
||||||
--detach \
|
--detach \
|
||||||
--name scylladb \
|
--name scylladb \
|
||||||
@@ -126,12 +142,8 @@ jobs:
|
|||||||
--memory 16G \
|
--memory 16G \
|
||||||
scylladb/scylla
|
scylladb/scylla
|
||||||
|
|
||||||
- name: Wait for scylladb container to be healthy (macOS)
|
|
||||||
if: ${{ runner.os == 'macOS' }}
|
|
||||||
timeout-minutes: 1
|
|
||||||
run: |
|
|
||||||
until [ "$(docker inspect -f '{{.State.Health.Status}}' scylladb)" == "healthy" ]; do
|
until [ "$(docker inspect -f '{{.State.Health.Status}}' scylladb)" == "healthy" ]; do
|
||||||
sleep 1
|
sleep 5
|
||||||
done
|
done
|
||||||
|
|
||||||
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
1
.github/workflows/sanitizers.yml
vendored
1
.github/workflows/sanitizers.yml
vendored
@@ -15,6 +15,7 @@ on:
|
|||||||
- ".github/actions/**"
|
- ".github/actions/**"
|
||||||
- "!.github/actions/build-docker-image/**"
|
- "!.github/actions/build-docker-image/**"
|
||||||
- "!.github/actions/create-issue/**"
|
- "!.github/actions/create-issue/**"
|
||||||
|
- .github/scripts/execute-tests-under-sanitizer.sh
|
||||||
|
|
||||||
- CMakeLists.txt
|
- CMakeLists.txt
|
||||||
- conanfile.py
|
- conanfile.py
|
||||||
|
|||||||
24
.github/workflows/update-docker-ci.yml
vendored
24
.github/workflows/update-docker-ci.yml
vendored
@@ -56,7 +56,7 @@ jobs:
|
|||||||
needs: repo
|
needs: repo
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
@@ -94,7 +94,7 @@ jobs:
|
|||||||
needs: repo
|
needs: repo
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
@@ -132,7 +132,7 @@ jobs:
|
|||||||
needs: [repo, gcc-amd64, gcc-arm64]
|
needs: [repo, gcc-amd64, gcc-arm64]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
@@ -145,7 +145,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
@@ -153,7 +153,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
|
if: ${{ github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' }}
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USER }}
|
username: ${{ secrets.DOCKERHUB_USER }}
|
||||||
password: ${{ secrets.DOCKERHUB_PW }}
|
password: ${{ secrets.DOCKERHUB_PW }}
|
||||||
@@ -183,7 +183,7 @@ jobs:
|
|||||||
needs: repo
|
needs: repo
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
@@ -219,7 +219,7 @@ jobs:
|
|||||||
needs: [repo, gcc-merge]
|
needs: [repo, gcc-merge]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
@@ -250,7 +250,7 @@ jobs:
|
|||||||
needs: [repo, gcc-merge]
|
needs: [repo, gcc-merge]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
@@ -281,7 +281,7 @@ jobs:
|
|||||||
needs: [repo, tools-amd64, tools-arm64]
|
needs: [repo, tools-amd64, tools-arm64]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
@@ -294,7 +294,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
@@ -316,7 +316,7 @@ jobs:
|
|||||||
needs: [repo, tools-merge]
|
needs: [repo, tools-merge]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
- uses: ./.github/actions/build-docker-image
|
- uses: ./.github/actions/build-docker-image
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@@ -338,7 +338,7 @@ jobs:
|
|||||||
needs: [repo, gcc-merge, clang, tools-merge]
|
needs: [repo, gcc-merge, clang, tools-merge]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
- uses: ./.github/actions/build-docker-image
|
- uses: ./.github/actions/build-docker-image
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
6
.github/workflows/upload-conan-deps.yml
vendored
6
.github/workflows/upload-conan-deps.yml
vendored
@@ -52,7 +52,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Calculate conan matrix
|
- name: Calculate conan matrix
|
||||||
id: set-matrix
|
id: set-matrix
|
||||||
@@ -75,10 +75,10 @@ jobs:
|
|||||||
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
CONAN_PROFILE: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Prepare runner
|
- name: Prepare runner
|
||||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
uses: XRPLF/actions/prepare-runner@f05cab7b8541eee6473aa42beb9d2fe35608a190
|
||||||
with:
|
with:
|
||||||
enable_ccache: false
|
enable_ccache: false
|
||||||
|
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,7 +4,6 @@
|
|||||||
.build
|
.build
|
||||||
.cache
|
.cache
|
||||||
.vscode
|
.vscode
|
||||||
.zed
|
|
||||||
.python-version
|
.python-version
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.sanitizer-report
|
.sanitizer-report
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ repos:
|
|||||||
|
|
||||||
# Autoformat: YAML, JSON, Markdown, etc.
|
# Autoformat: YAML, JSON, Markdown, etc.
|
||||||
- repo: https://github.com/rbubley/mirrors-prettier
|
- repo: https://github.com/rbubley/mirrors-prettier
|
||||||
rev: c2bc67fe8f8f549cc489e00ba8b45aa18ee713b1 # frozen: v3.8.1
|
rev: 14abee445aea04b39069c19b4bd54efff6775819 # frozen: v3.7.4
|
||||||
hooks:
|
hooks:
|
||||||
- id: prettier
|
- id: prettier
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ repos:
|
|||||||
]
|
]
|
||||||
|
|
||||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||||
rev: ea488cebbfd88a5f50b8bd95d5c829d0bb76feb8 # frozen: 26.1.0
|
rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: 25.12.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: black
|
- id: black
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
cmake_minimum_required(VERSION 3.20)
|
cmake_minimum_required(VERSION 3.20)
|
||||||
|
|
||||||
project(clio VERSION ${CLIO_VERSION} HOMEPAGE_URL "https://github.com/XRPLF/clio"
|
project(clio VERSION ${CLIO_VERSION} HOMEPAGE_URL "https://github.com/XRPLF/clio"
|
||||||
DESCRIPTION "An XRP Ledger API Server")
|
DESCRIPTION "An XRP Ledger API Server"
|
||||||
|
)
|
||||||
|
|
||||||
# =========================== Options ====================================== #
|
# =========================== Options ====================================== #
|
||||||
option(verbose "Verbose build" FALSE)
|
option(verbose "Verbose build" FALSE)
|
||||||
@@ -35,7 +36,7 @@ target_compile_features(clio_options INTERFACE cxx_std_23) # Clio needs c++23 bu
|
|||||||
target_include_directories(clio_options INTERFACE ${CMAKE_SOURCE_DIR}/src)
|
target_include_directories(clio_options INTERFACE ${CMAKE_SOURCE_DIR}/src)
|
||||||
|
|
||||||
if (verbose)
|
if (verbose)
|
||||||
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Clio tweaks and checks
|
# Clio tweaks and checks
|
||||||
@@ -57,36 +58,36 @@ add_subdirectory(src)
|
|||||||
add_subdirectory(tests)
|
add_subdirectory(tests)
|
||||||
|
|
||||||
if (benchmark)
|
if (benchmark)
|
||||||
add_subdirectory(benchmarks)
|
add_subdirectory(benchmarks)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Enable selected sanitizer if enabled via `san`
|
# Enable selected sanitizer if enabled via `san`
|
||||||
if (san)
|
if (san)
|
||||||
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
|
set(SUPPORTED_SANITIZERS "address" "thread" "memory" "undefined")
|
||||||
if (NOT san IN_LIST SUPPORTED_SANITIZERS)
|
if (NOT san IN_LIST SUPPORTED_SANITIZERS)
|
||||||
message(FATAL_ERROR "Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}.")
|
message(FATAL_ERROR "Error: Unsupported sanitizer '${san}'. Supported values are: ${SUPPORTED_SANITIZERS}.")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Sanitizers recommend minimum of -O1 for reasonable performance so we enable it for debug builds
|
# Sanitizers recommend minimum of -O1 for reasonable performance so we enable it for debug builds
|
||||||
set(SAN_OPTIMIZATION_FLAG "")
|
set(SAN_OPTIMIZATION_FLAG "")
|
||||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||||
set(SAN_OPTIMIZATION_FLAG -O1)
|
set(SAN_OPTIMIZATION_FLAG -O1)
|
||||||
endif ()
|
endif ()
|
||||||
target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer)
|
target_compile_options(clio_options INTERFACE ${SAN_OPTIMIZATION_FLAG} ${SAN_FLAG} -fno-omit-frame-pointer)
|
||||||
|
|
||||||
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
target_link_libraries(clio_options INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Generate `docs` target for doxygen documentation if enabled Note: use `make docs` to generate the documentation
|
# Generate `docs` target for doxygen documentation if enabled Note: use `make docs` to generate the documentation
|
||||||
if (docs)
|
if (docs)
|
||||||
add_subdirectory(docs)
|
add_subdirectory(docs)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
include(install/install)
|
include(install/install)
|
||||||
if (package)
|
if (package)
|
||||||
include(ClioPackage)
|
include(ClioPackage)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (snapshot)
|
if (snapshot)
|
||||||
add_subdirectory(tools/snapshot)
|
add_subdirectory(tools/snapshot)
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -1,18 +1,20 @@
|
|||||||
add_executable(clio_benchmark)
|
add_executable(clio_benchmark)
|
||||||
|
|
||||||
target_sources(clio_benchmark
|
target_sources(
|
||||||
PRIVATE # Common
|
clio_benchmark
|
||||||
Main.cpp
|
PRIVATE # Common
|
||||||
Playground.cpp
|
Main.cpp
|
||||||
# ExecutionContext
|
Playground.cpp
|
||||||
util/async/ExecutionContextBenchmarks.cpp
|
# ExecutionContext
|
||||||
# Logger
|
util/async/ExecutionContextBenchmarks.cpp
|
||||||
util/log/LoggerBenchmark.cpp
|
# Logger
|
||||||
# WorkQueue
|
util/log/LoggerBenchmark.cpp
|
||||||
rpc/WorkQueueBenchmarks.cpp)
|
# WorkQueue
|
||||||
|
rpc/WorkQueueBenchmarks.cpp
|
||||||
|
)
|
||||||
|
|
||||||
include(deps/gbench)
|
include(deps/gbench)
|
||||||
|
|
||||||
target_include_directories(clio_benchmark PRIVATE .)
|
target_include_directories(clio_benchmark PRIVATE .)
|
||||||
target_link_libraries(clio_benchmark PRIVATE clio_rpc clio_util benchmark::benchmark_main spdlog::spdlog)
|
target_link_libraries(clio_benchmark PUBLIC clio_util clio_rpc benchmark::benchmark_main spdlog::spdlog)
|
||||||
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
set_target_properties(clio_benchmark PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||||
|
|||||||
@@ -29,6 +29,8 @@
|
|||||||
|
|
||||||
#include <benchmark/benchmark.h>
|
#include <benchmark/benchmark.h>
|
||||||
#include <boost/asio/steady_timer.hpp>
|
#include <boost/asio/steady_timer.hpp>
|
||||||
|
#include <boost/asio/thread_pool.hpp>
|
||||||
|
#include <boost/json/object.hpp>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
find_program(CCACHE_PATH "ccache")
|
find_program(CCACHE_PATH "ccache")
|
||||||
if (CCACHE_PATH)
|
if (CCACHE_PATH)
|
||||||
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
|
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
|
||||||
message(STATUS "Using ccache: ${CCACHE_PATH}")
|
message(STATUS "Using ccache: ${CCACHE_PATH}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -1,42 +1,42 @@
|
|||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
|
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
|
||||||
message(FATAL_ERROR "Clang 16+ required for building clio")
|
message(FATAL_ERROR "Clang 16+ required for building clio")
|
||||||
endif ()
|
endif ()
|
||||||
set(is_clang TRUE)
|
set(is_clang TRUE)
|
||||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15)
|
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15)
|
||||||
message(FATAL_ERROR "AppleClang 15+ required for building clio")
|
message(FATAL_ERROR "AppleClang 15+ required for building clio")
|
||||||
endif ()
|
endif ()
|
||||||
set(is_appleclang TRUE)
|
set(is_appleclang TRUE)
|
||||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12)
|
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12)
|
||||||
message(FATAL_ERROR "GCC 12+ required for building clio")
|
message(FATAL_ERROR "GCC 12+ required for building clio")
|
||||||
endif ()
|
endif ()
|
||||||
set(is_gcc TRUE)
|
set(is_gcc TRUE)
|
||||||
else ()
|
else ()
|
||||||
message(FATAL_ERROR "Supported compilers: AppleClang 15+, Clang 16+, GCC 12+")
|
message(FATAL_ERROR "Supported compilers: AppleClang 15+, Clang 16+, GCC 12+")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (san)
|
if (san)
|
||||||
string(TOLOWER ${san} san)
|
string(TOLOWER ${san} san)
|
||||||
set(SAN_FLAG "-fsanitize=${san}")
|
set(SAN_FLAG "-fsanitize=${san}")
|
||||||
set(SAN_LIB "")
|
set(SAN_LIB "")
|
||||||
if (is_gcc)
|
if (is_gcc)
|
||||||
if (san STREQUAL "address")
|
if (san STREQUAL "address")
|
||||||
set(SAN_LIB "asan")
|
set(SAN_LIB "asan")
|
||||||
elseif (san STREQUAL "thread")
|
elseif (san STREQUAL "thread")
|
||||||
set(SAN_LIB "tsan")
|
set(SAN_LIB "tsan")
|
||||||
elseif (san STREQUAL "memory")
|
elseif (san STREQUAL "memory")
|
||||||
set(SAN_LIB "msan")
|
set(SAN_LIB "msan")
|
||||||
elseif (san STREQUAL "undefined")
|
elseif (san STREQUAL "undefined")
|
||||||
set(SAN_LIB "ubsan")
|
set(SAN_LIB "ubsan")
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
set(_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
|
||||||
set(CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
|
||||||
check_cxx_compiler_flag(${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
|
||||||
set(CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
|
||||||
if (NOT COMPILER_SUPPORTS_SAN)
|
|
||||||
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
endif ()
|
||||||
|
set(_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
||||||
|
set(CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
||||||
|
check_cxx_compiler_flag(${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
||||||
|
set(CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
||||||
|
if (NOT COMPILER_SUPPORTS_SAN)
|
||||||
|
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
||||||
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -1,30 +1,33 @@
|
|||||||
if (lint)
|
if (lint)
|
||||||
|
|
||||||
# Find clang-tidy binary
|
# Find clang-tidy binary
|
||||||
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
|
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
|
||||||
set(_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
|
set(_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
|
||||||
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
|
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
|
||||||
message(FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
|
message(FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
|
||||||
endif ()
|
|
||||||
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
|
||||||
else ()
|
|
||||||
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-20" "clang-tidy" REQUIRED)
|
|
||||||
endif ()
|
endif ()
|
||||||
|
message(STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||||
|
else ()
|
||||||
|
find_program(_CLANG_TIDY_BIN NAMES "clang-tidy-20" "clang-tidy" REQUIRED)
|
||||||
|
endif ()
|
||||||
|
|
||||||
if (NOT _CLANG_TIDY_BIN)
|
if (NOT _CLANG_TIDY_BIN)
|
||||||
message(FATAL_ERROR "clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy."
|
message(
|
||||||
)
|
FATAL_ERROR
|
||||||
endif ()
|
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy."
|
||||||
|
)
|
||||||
|
endif ()
|
||||||
|
|
||||||
# Support for https://github.com/matus-chochlik/ctcache
|
# Support for https://github.com/matus-chochlik/ctcache
|
||||||
find_program(CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
|
find_program(CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
|
||||||
if (CLANG_TIDY_CACHE_PATH)
|
if (CLANG_TIDY_CACHE_PATH)
|
||||||
set(_CLANG_TIDY_CMD "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
|
set(_CLANG_TIDY_CMD "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
|
||||||
CACHE STRING "A combined command to run clang-tidy with caching wrapper")
|
CACHE STRING "A combined command to run clang-tidy with caching wrapper"
|
||||||
else ()
|
)
|
||||||
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
|
else ()
|
||||||
endif ()
|
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
|
||||||
|
endif ()
|
||||||
|
|
||||||
set(CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
|
set(CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
|
||||||
message(STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
|
message(STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -1,41 +1,47 @@
|
|||||||
find_package(Git REQUIRED)
|
find_package(Git REQUIRED)
|
||||||
|
|
||||||
if (DEFINED ENV{GITHUB_BRANCH_NAME})
|
if (DEFINED ENV{GITHUB_BRANCH_NAME})
|
||||||
set(GIT_BUILD_BRANCH $ENV{GITHUB_BRANCH_NAME})
|
set(GIT_BUILD_BRANCH $ENV{GITHUB_BRANCH_NAME})
|
||||||
set(GIT_COMMIT_HASH $ENV{GITHUB_HEAD_SHA})
|
set(GIT_COMMIT_HASH $ENV{GITHUB_HEAD_SHA})
|
||||||
else ()
|
else ()
|
||||||
set(GIT_COMMAND branch --show-current)
|
set(GIT_COMMAND branch --show-current)
|
||||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
execute_process(
|
||||||
OUTPUT_VARIABLE GIT_BUILD_BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY)
|
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_BUILD_BRANCH
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
set(GIT_COMMAND rev-parse HEAD)
|
set(GIT_COMMAND rev-parse HEAD)
|
||||||
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
execute_process(
|
||||||
OUTPUT_VARIABLE GIT_COMMIT_HASH OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY)
|
COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMIT_HASH
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
execute_process(COMMAND date +%Y%m%d%H%M%S WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BUILD_DATE
|
execute_process(
|
||||||
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY)
|
COMMAND date +%Y%m%d%H%M%S WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE BUILD_DATE
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY
|
||||||
|
)
|
||||||
|
|
||||||
message(STATUS "Git branch: ${GIT_BUILD_BRANCH}")
|
message(STATUS "Git branch: ${GIT_BUILD_BRANCH}")
|
||||||
message(STATUS "Git commit hash: ${GIT_COMMIT_HASH}")
|
message(STATUS "Git commit hash: ${GIT_COMMIT_HASH}")
|
||||||
message(STATUS "Build date: ${BUILD_DATE}")
|
message(STATUS "Build date: ${BUILD_DATE}")
|
||||||
|
|
||||||
if (DEFINED ENV{FORCE_CLIO_VERSION} AND NOT "$ENV{FORCE_CLIO_VERSION}" STREQUAL "")
|
if (DEFINED ENV{FORCE_CLIO_VERSION} AND NOT "$ENV{FORCE_CLIO_VERSION}" STREQUAL "")
|
||||||
message(STATUS "Using explicitly provided '${FORCE_CLIO_VERSION}' as Clio version")
|
message(STATUS "Using explicitly provided '${FORCE_CLIO_VERSION}' as Clio version")
|
||||||
|
|
||||||
set(CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
|
set(CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
|
||||||
set(DOC_CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
|
set(DOC_CLIO_VERSION "$ENV{FORCE_CLIO_VERSION}")
|
||||||
else ()
|
else ()
|
||||||
message(STATUS "Using 'YYYYMMDDHMS-<branch>-<git short rev>' as Clio version")
|
message(STATUS "Using 'YYYYMMDDHMS-<branch>-<git short rev>' as Clio version")
|
||||||
|
|
||||||
string(SUBSTRING ${GIT_COMMIT_HASH} 0 7 GIT_COMMIT_HASH_SHORT)
|
string(SUBSTRING ${GIT_COMMIT_HASH} 0 7 GIT_COMMIT_HASH_SHORT)
|
||||||
|
|
||||||
set(CLIO_VERSION "${BUILD_DATE}-${GIT_BUILD_BRANCH}-${GIT_COMMIT_HASH_SHORT}")
|
set(CLIO_VERSION "${BUILD_DATE}-${GIT_BUILD_BRANCH}-${GIT_COMMIT_HASH_SHORT}")
|
||||||
set(DOC_CLIO_VERSION "develop")
|
set(DOC_CLIO_VERSION "develop")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (CMAKE_BUILD_TYPE MATCHES Debug)
|
if (CMAKE_BUILD_TYPE MATCHES Debug)
|
||||||
set(CLIO_VERSION "${CLIO_VERSION}+DEBUG")
|
set(CLIO_VERSION "${CLIO_VERSION}+DEBUG")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
message(STATUS "Build version: ${CLIO_VERSION}")
|
message(STATUS "Build version: ${CLIO_VERSION}")
|
||||||
|
|||||||
@@ -107,69 +107,76 @@ option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
|
|||||||
find_program(GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
find_program(GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
||||||
|
|
||||||
if (DEFINED CODE_COVERAGE_GCOV_TOOL)
|
if (DEFINED CODE_COVERAGE_GCOV_TOOL)
|
||||||
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
|
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
|
||||||
elseif (DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
|
elseif (DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
|
||||||
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
|
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
|
||||||
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||||
if (APPLE)
|
if (APPLE)
|
||||||
execute_process(COMMAND xcrun -f llvm-cov OUTPUT_VARIABLE LLVMCOV_PATH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
execute_process(COMMAND xcrun -f llvm-cov OUTPUT_VARIABLE LLVMCOV_PATH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
else ()
|
else ()
|
||||||
find_program(LLVMCOV_PATH llvm-cov)
|
find_program(LLVMCOV_PATH llvm-cov)
|
||||||
endif ()
|
endif ()
|
||||||
if (LLVMCOV_PATH)
|
if (LLVMCOV_PATH)
|
||||||
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
|
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
|
||||||
endif ()
|
endif ()
|
||||||
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||||
find_program(GCOV_PATH gcov)
|
find_program(GCOV_PATH gcov)
|
||||||
set(GCOV_TOOL "${GCOV_PATH}")
|
set(GCOV_TOOL "${GCOV_PATH}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Check supported compiler (Clang, GNU and Flang)
|
# Check supported compiler (Clang, GNU and Flang)
|
||||||
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||||
foreach (LANG ${LANGUAGES})
|
foreach (LANG ${LANGUAGES})
|
||||||
if ("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
if ("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||||
if ("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
if ("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
||||||
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
||||||
endif ()
|
|
||||||
elseif (NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES
|
|
||||||
"(LLVM)?[Ff]lang")
|
|
||||||
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
elseif (NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES
|
||||||
|
"(LLVM)?[Ff]lang"
|
||||||
|
)
|
||||||
|
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
||||||
|
endif ()
|
||||||
endforeach ()
|
endforeach ()
|
||||||
|
|
||||||
set(COVERAGE_COMPILER_FLAGS "-g --coverage" CACHE INTERNAL "")
|
set(COVERAGE_COMPILER_FLAGS "-g --coverage" CACHE INTERNAL "")
|
||||||
if (CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
if (CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||||
include(CheckCXXCompilerFlag)
|
include(CheckCXXCompilerFlag)
|
||||||
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
|
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
|
||||||
if (HAVE_cxx_fprofile_abs_path)
|
if (HAVE_cxx_fprofile_abs_path)
|
||||||
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||||
endif ()
|
endif ()
|
||||||
include(CheckCCompilerFlag)
|
include(CheckCCompilerFlag)
|
||||||
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
|
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
|
||||||
if (HAVE_c_fprofile_abs_path)
|
if (HAVE_c_fprofile_abs_path)
|
||||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(CMAKE_Fortran_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
|
set(CMAKE_Fortran_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
|
||||||
CACHE STRING "Flags used by the Fortran compiler during coverage builds." FORCE)
|
CACHE STRING "Flags used by the Fortran compiler during coverage builds." FORCE
|
||||||
|
)
|
||||||
set(CMAKE_CXX_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
|
set(CMAKE_CXX_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
|
||||||
CACHE STRING "Flags used by the C++ compiler during coverage builds." FORCE)
|
CACHE STRING "Flags used by the C++ compiler during coverage builds." FORCE
|
||||||
|
)
|
||||||
set(CMAKE_C_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
|
set(CMAKE_C_FLAGS_COVERAGE ${COVERAGE_COMPILER_FLAGS}
|
||||||
CACHE STRING "Flags used by the C compiler during coverage builds." FORCE)
|
CACHE STRING "Flags used by the C compiler during coverage builds." FORCE
|
||||||
|
)
|
||||||
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE "" CACHE STRING "Flags used for linking binaries during coverage builds." FORCE)
|
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE "" CACHE STRING "Flags used for linking binaries during coverage builds." FORCE)
|
||||||
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE ""
|
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE ""
|
||||||
CACHE STRING "Flags used by the shared libraries linker during coverage builds." FORCE)
|
CACHE STRING "Flags used by the shared libraries linker during coverage builds." FORCE
|
||||||
mark_as_advanced(CMAKE_Fortran_FLAGS_COVERAGE CMAKE_CXX_FLAGS_COVERAGE CMAKE_C_FLAGS_COVERAGE
|
)
|
||||||
CMAKE_EXE_LINKER_FLAGS_COVERAGE CMAKE_SHARED_LINKER_FLAGS_COVERAGE)
|
mark_as_advanced(
|
||||||
|
CMAKE_Fortran_FLAGS_COVERAGE CMAKE_CXX_FLAGS_COVERAGE CMAKE_C_FLAGS_COVERAGE CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||||
|
CMAKE_SHARED_LINKER_FLAGS_COVERAGE
|
||||||
|
)
|
||||||
|
|
||||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||||
if (NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
if (NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
||||||
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
|
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
|
||||||
endif () # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
endif () # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||||
|
|
||||||
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||||
link_libraries(gcov)
|
link_libraries(gcov)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Defines a target for running and collection code coverage information Builds dependencies, runs the given executable
|
# Defines a target for running and collection code coverage information Builds dependencies, runs the given executable
|
||||||
@@ -185,164 +192,170 @@ endif ()
|
|||||||
# with CMake 3.4+) ) The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the GCVOR
|
# with CMake 3.4+) ) The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the GCVOR
|
||||||
# command.
|
# command.
|
||||||
function (setup_target_for_coverage_gcovr)
|
function (setup_target_for_coverage_gcovr)
|
||||||
set(options NONE)
|
set(options NONE)
|
||||||
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
|
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
|
||||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
||||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||||
|
|
||||||
if (NOT GCOV_TOOL)
|
if (NOT GCOV_TOOL)
|
||||||
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
|
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT GCOVR_PATH)
|
if (NOT GCOVR_PATH)
|
||||||
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
|
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||||
if (DEFINED Coverage_BASE_DIRECTORY)
|
if (DEFINED Coverage_BASE_DIRECTORY)
|
||||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||||
else ()
|
else ()
|
||||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT DEFINED Coverage_FORMAT)
|
if (NOT DEFINED Coverage_FORMAT)
|
||||||
set(Coverage_FORMAT xml)
|
set(Coverage_FORMAT xml)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if ("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
|
if ("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
|
||||||
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
|
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
|
||||||
else ()
|
else ()
|
||||||
if ((Coverage_FORMAT STREQUAL "html-details") OR (Coverage_FORMAT STREQUAL "html-nested"))
|
if ((Coverage_FORMAT STREQUAL "html-details") OR (Coverage_FORMAT STREQUAL "html-nested"))
|
||||||
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
|
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
|
||||||
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
|
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
|
||||||
elseif (Coverage_FORMAT STREQUAL "html-single")
|
|
||||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
|
|
||||||
elseif ((Coverage_FORMAT STREQUAL "json-summary") OR (Coverage_FORMAT STREQUAL "json-details")
|
|
||||||
OR (Coverage_FORMAT STREQUAL "coveralls"))
|
|
||||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "txt")
|
|
||||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "csv")
|
|
||||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
|
|
||||||
else ()
|
|
||||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if ((Coverage_FORMAT STREQUAL "cobertura") OR (Coverage_FORMAT STREQUAL "xml"))
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty)
|
|
||||||
set(Coverage_FORMAT cobertura) # overwrite xml
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "sonarqube")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}")
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "json-summary")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "json-details")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "coveralls")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "csv")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}")
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "txt")
|
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}")
|
|
||||||
elseif (Coverage_FORMAT STREQUAL "html-single")
|
elseif (Coverage_FORMAT STREQUAL "html-single")
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}")
|
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
|
elseif ((Coverage_FORMAT STREQUAL "json-summary") OR (Coverage_FORMAT STREQUAL "json-details")
|
||||||
elseif (Coverage_FORMAT STREQUAL "html-nested")
|
OR (Coverage_FORMAT STREQUAL "coveralls")
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}")
|
)
|
||||||
elseif (Coverage_FORMAT STREQUAL "html-details")
|
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
|
||||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}")
|
elseif (Coverage_FORMAT STREQUAL "txt")
|
||||||
|
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "csv")
|
||||||
|
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
|
||||||
else ()
|
else ()
|
||||||
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
|
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
|
||||||
|
endif ()
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if ((Coverage_FORMAT STREQUAL "cobertura") OR (Coverage_FORMAT STREQUAL "xml"))
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty)
|
||||||
|
set(Coverage_FORMAT cobertura) # overwrite xml
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "sonarqube")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}")
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "json-summary")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "json-details")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "coveralls")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "csv")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}")
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "txt")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}")
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "html-single")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "html-nested")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}")
|
||||||
|
elseif (Coverage_FORMAT STREQUAL "html-details")
|
||||||
|
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}")
|
||||||
|
else ()
|
||||||
|
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||||
|
set(GCOVR_EXCLUDES "")
|
||||||
|
foreach (EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||||
|
if (CMAKE_VERSION VERSION_GREATER 3.4)
|
||||||
|
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||||
|
endif ()
|
||||||
|
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
||||||
|
endforeach ()
|
||||||
|
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
||||||
|
|
||||||
|
# Combine excludes to several -e arguments
|
||||||
|
set(GCOVR_EXCLUDE_ARGS "")
|
||||||
|
foreach (EXCLUDE ${GCOVR_EXCLUDES})
|
||||||
|
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
||||||
|
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
||||||
|
endforeach ()
|
||||||
|
|
||||||
|
# Set up commands which will be run to generate coverage data Run tests
|
||||||
|
set(GCOVR_EXEC_TESTS_CMD ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS})
|
||||||
|
|
||||||
|
# Create folder
|
||||||
|
if (DEFINED GCOVR_CREATE_FOLDER)
|
||||||
|
set(GCOVR_FOLDER_CMD ${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
|
||||||
|
else ()
|
||||||
|
set(GCOVR_FOLDER_CMD echo) # dummy
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Running gcovr
|
||||||
|
set(GCOVR_CMD
|
||||||
|
${GCOVR_PATH}
|
||||||
|
--gcov-executable
|
||||||
|
${GCOV_TOOL}
|
||||||
|
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
|
||||||
|
-r
|
||||||
|
${BASEDIR}
|
||||||
|
${GCOVR_ADDITIONAL_ARGS}
|
||||||
|
${GCOVR_EXCLUDE_ARGS}
|
||||||
|
--object-directory=${PROJECT_BINARY_DIR}
|
||||||
|
)
|
||||||
|
|
||||||
|
if (CODE_COVERAGE_VERBOSE)
|
||||||
|
message(STATUS "Executed command report")
|
||||||
|
|
||||||
|
message(STATUS "Command to run tests: ")
|
||||||
|
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
|
||||||
|
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
|
||||||
|
|
||||||
|
if (NOT GCOVR_FOLDER_CMD STREQUAL "echo")
|
||||||
|
message(STATUS "Command to create a folder: ")
|
||||||
|
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
|
||||||
|
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
message(STATUS "Command to generate gcovr coverage data: ")
|
||||||
set(GCOVR_EXCLUDES "")
|
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
|
||||||
foreach (EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
message(STATUS "${GCOVR_CMD_SPACED}")
|
||||||
if (CMAKE_VERSION VERSION_GREATER 3.4)
|
endif ()
|
||||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
|
||||||
endif ()
|
|
||||||
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
|
||||||
endforeach ()
|
|
||||||
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
|
||||||
|
|
||||||
# Combine excludes to several -e arguments
|
add_custom_target(
|
||||||
set(GCOVR_EXCLUDE_ARGS "")
|
${Coverage_NAME}
|
||||||
foreach (EXCLUDE ${GCOVR_EXCLUDES})
|
COMMAND ${GCOVR_EXEC_TESTS_CMD}
|
||||||
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
COMMAND ${GCOVR_FOLDER_CMD}
|
||||||
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
COMMAND ${GCOVR_CMD}
|
||||||
endforeach ()
|
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
|
||||||
|
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||||
|
DEPENDS ${Coverage_DEPENDENCIES}
|
||||||
|
VERBATIM # Protect arguments to commands
|
||||||
|
COMMENT "Running gcovr to produce code coverage report."
|
||||||
|
)
|
||||||
|
|
||||||
# Set up commands which will be run to generate coverage data Run tests
|
# Show info where to find the report
|
||||||
set(GCOVR_EXEC_TESTS_CMD ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS})
|
add_custom_command(
|
||||||
|
TARGET ${Coverage_NAME} POST_BUILD COMMAND ;
|
||||||
# Create folder
|
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
|
||||||
if (DEFINED GCOVR_CREATE_FOLDER)
|
)
|
||||||
set(GCOVR_FOLDER_CMD ${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
|
|
||||||
else ()
|
|
||||||
set(GCOVR_FOLDER_CMD echo) # dummy
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# Running gcovr
|
|
||||||
set(GCOVR_CMD
|
|
||||||
${GCOVR_PATH}
|
|
||||||
--gcov-executable
|
|
||||||
${GCOV_TOOL}
|
|
||||||
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
|
|
||||||
-r
|
|
||||||
${BASEDIR}
|
|
||||||
${GCOVR_ADDITIONAL_ARGS}
|
|
||||||
${GCOVR_EXCLUDE_ARGS}
|
|
||||||
--object-directory=${PROJECT_BINARY_DIR})
|
|
||||||
|
|
||||||
if (CODE_COVERAGE_VERBOSE)
|
|
||||||
message(STATUS "Executed command report")
|
|
||||||
|
|
||||||
message(STATUS "Command to run tests: ")
|
|
||||||
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
|
|
||||||
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
|
|
||||||
|
|
||||||
if (NOT GCOVR_FOLDER_CMD STREQUAL "echo")
|
|
||||||
message(STATUS "Command to create a folder: ")
|
|
||||||
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
|
|
||||||
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
message(STATUS "Command to generate gcovr coverage data: ")
|
|
||||||
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
|
|
||||||
message(STATUS "${GCOVR_CMD_SPACED}")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
add_custom_target(${Coverage_NAME}
|
|
||||||
COMMAND ${GCOVR_EXEC_TESTS_CMD}
|
|
||||||
COMMAND ${GCOVR_FOLDER_CMD}
|
|
||||||
COMMAND ${GCOVR_CMD}
|
|
||||||
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
|
|
||||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
|
||||||
DEPENDS ${Coverage_DEPENDENCIES}
|
|
||||||
VERBATIM # Protect arguments to commands
|
|
||||||
COMMENT "Running gcovr to produce code coverage report.")
|
|
||||||
|
|
||||||
# Show info where to find the report
|
|
||||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD COMMAND ;
|
|
||||||
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}")
|
|
||||||
endfunction () # setup_target_for_coverage_gcovr
|
endfunction () # setup_target_for_coverage_gcovr
|
||||||
|
|
||||||
function (append_coverage_compiler_flags)
|
function (append_coverage_compiler_flags)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||||
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||||
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
|
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
|
||||||
endfunction () # append_coverage_compiler_flags
|
endfunction () # append_coverage_compiler_flags
|
||||||
|
|
||||||
# Setup coverage for specific library
|
# Setup coverage for specific library
|
||||||
function (append_coverage_compiler_flags_to_target name mode)
|
function (append_coverage_compiler_flags_to_target name mode)
|
||||||
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
|
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
|
||||||
target_compile_options(${name} ${mode} ${_flag_list})
|
target_compile_options(${name} ${mode} ${_flag_list})
|
||||||
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||||
target_link_libraries(${name} ${mode} gcov)
|
target_link_libraries(${name} ${mode} gcov)
|
||||||
endif ()
|
endif ()
|
||||||
endfunction ()
|
endfunction ()
|
||||||
|
|||||||
@@ -11,8 +11,10 @@ set(DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile)
|
|||||||
set(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
|
set(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
|
||||||
|
|
||||||
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT})
|
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT})
|
||||||
add_custom_target(docs
|
add_custom_target(
|
||||||
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
|
docs
|
||||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
|
||||||
COMMENT "Generating API documentation with Doxygen"
|
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
VERBATIM)
|
COMMENT "Generating API documentation with Doxygen"
|
||||||
|
VERBATIM
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
if (DEFINED CMAKE_LINKER_TYPE)
|
if (DEFINED CMAKE_LINKER_TYPE)
|
||||||
message(STATUS "Custom linker is already set: ${CMAKE_LINKER_TYPE}")
|
message(STATUS "Custom linker is already set: ${CMAKE_LINKER_TYPE}")
|
||||||
return()
|
return()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
find_program(MOLD_PATH mold)
|
find_program(MOLD_PATH mold)
|
||||||
|
|
||||||
if (MOLD_PATH AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
if (MOLD_PATH AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||||
message(STATUS "Using Mold linker: ${MOLD_PATH}")
|
message(STATUS "Using Mold linker: ${MOLD_PATH}")
|
||||||
set(CMAKE_LINKER_TYPE MOLD)
|
set(CMAKE_LINKER_TYPE MOLD)
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -31,51 +31,52 @@ set(COMPILER_FLAGS
|
|||||||
# -Wduplicated-cond -Wlogical-op -Wuseless-cast ) endif ()
|
# -Wduplicated-cond -Wlogical-op -Wuseless-cast ) endif ()
|
||||||
|
|
||||||
if (is_clang)
|
if (is_clang)
|
||||||
list(APPEND COMPILER_FLAGS -Wshadow # gcc is to aggressive with shadowing
|
list(APPEND COMPILER_FLAGS -Wshadow # gcc is to aggressive with shadowing
|
||||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
|
||||||
)
|
)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (is_appleclang)
|
if (is_appleclang)
|
||||||
list(APPEND COMPILER_FLAGS -Wreorder-init-list)
|
list(APPEND COMPILER_FLAGS -Wreorder-init-list)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (san)
|
if (san)
|
||||||
# When building with sanitizers some compilers will actually produce extra warnings/errors. We don't want this yet,
|
# When building with sanitizers some compilers will actually produce extra warnings/errors. We don't want this yet, at
|
||||||
# at least not until we have fixed all runtime issues reported by the sanitizers. Once that is done we can start
|
# least not until we have fixed all runtime issues reported by the sanitizers. Once that is done we can start removing
|
||||||
# removing some of these and trying to fix it in our codebase. We can never remove all of below because most of them
|
# some of these and trying to fix it in our codebase. We can never remove all of below because most of them are
|
||||||
# are reported from deep inside libraries like boost or libxrpl.
|
# reported from deep inside libraries like boost or libxrpl.
|
||||||
#
|
#
|
||||||
# TODO: Address in https://github.com/XRPLF/clio/issues/1885
|
# TODO: Address in https://github.com/XRPLF/clio/issues/1885
|
||||||
list(APPEND
|
list(
|
||||||
COMPILER_FLAGS
|
APPEND
|
||||||
-Wno-error=tsan # Disables treating TSAN warnings as errors
|
COMPILER_FLAGS
|
||||||
-Wno-tsan # Disables TSAN warnings (thread-safety analysis)
|
-Wno-error=tsan # Disables treating TSAN warnings as errors
|
||||||
-Wno-uninitialized # Disables warnings about uninitialized variables (AddressSanitizer,
|
-Wno-tsan # Disables TSAN warnings (thread-safety analysis)
|
||||||
# UndefinedBehaviorSanitizer, etc.)
|
-Wno-uninitialized # Disables warnings about uninitialized variables (AddressSanitizer, UndefinedBehaviorSanitizer,
|
||||||
-Wno-stringop-overflow # Disables warnings about potential string operation overflows (AddressSanitizer)
|
# etc.)
|
||||||
-Wno-unsafe-buffer-usage # Disables warnings about unsafe memory operations (AddressSanitizer)
|
-Wno-stringop-overflow # Disables warnings about potential string operation overflows (AddressSanitizer)
|
||||||
-Wno-frame-larger-than # Disables warnings about stack frame size being too large (AddressSanitizer)
|
-Wno-unsafe-buffer-usage # Disables warnings about unsafe memory operations (AddressSanitizer)
|
||||||
-Wno-unused-function # Disables warnings about unused functions (LeakSanitizer, memory-related issues)
|
-Wno-frame-larger-than # Disables warnings about stack frame size being too large (AddressSanitizer)
|
||||||
-Wno-unused-but-set-variable # Disables warnings about unused variables (MemorySanitizer)
|
-Wno-unused-function # Disables warnings about unused functions (LeakSanitizer, memory-related issues)
|
||||||
-Wno-thread-safety-analysis # Disables warnings related to thread safety usage (ThreadSanitizer)
|
-Wno-unused-but-set-variable # Disables warnings about unused variables (MemorySanitizer)
|
||||||
-Wno-thread-safety # Disables warnings related to thread safety usage (ThreadSanitizer)
|
-Wno-thread-safety-analysis # Disables warnings related to thread safety usage (ThreadSanitizer)
|
||||||
-Wno-sign-compare # Disables warnings about signed/unsigned comparison (UndefinedBehaviorSanitizer)
|
-Wno-thread-safety # Disables warnings related to thread safety usage (ThreadSanitizer)
|
||||||
-Wno-nonnull # Disables warnings related to null pointer dereferencing (UndefinedBehaviorSanitizer)
|
-Wno-sign-compare # Disables warnings about signed/unsigned comparison (UndefinedBehaviorSanitizer)
|
||||||
-Wno-address # Disables warnings about address-related issues (UndefinedBehaviorSanitizer)
|
-Wno-nonnull # Disables warnings related to null pointer dereferencing (UndefinedBehaviorSanitizer)
|
||||||
-Wno-array-bounds # Disables array bounds checks (UndefinedBehaviorSanitizer)
|
-Wno-address # Disables warnings about address-related issues (UndefinedBehaviorSanitizer)
|
||||||
)
|
-Wno-array-bounds # Disables array bounds checks (UndefinedBehaviorSanitizer)
|
||||||
|
)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for
|
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for
|
||||||
# the flags description
|
# the flags description
|
||||||
|
|
||||||
if (time_trace)
|
if (time_trace)
|
||||||
if (is_clang OR is_appleclang)
|
if (is_clang OR is_appleclang)
|
||||||
list(APPEND COMPILER_FLAGS -ftime-trace)
|
list(APPEND COMPILER_FLAGS -ftime-trace)
|
||||||
else ()
|
else ()
|
||||||
message(FATAL_ERROR "Clang or AppleClang is required to use `-ftime-trace`")
|
message(FATAL_ERROR "Clang or AppleClang is required to use `-ftime-trace`")
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
target_compile_options(clio_options INTERFACE ${COMPILER_FLAGS})
|
target_compile_options(clio_options INTERFACE ${COMPILER_FLAGS})
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ include(CheckIncludeFileCXX)
|
|||||||
|
|
||||||
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
|
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
|
||||||
if (SOURCE_LOCATION_AVAILABLE)
|
if (SOURCE_LOCATION_AVAILABLE)
|
||||||
target_compile_definitions(clio_options INTERFACE "HAS_SOURCE_LOCATION")
|
target_compile_definitions(clio_options INTERFACE "HAS_SOURCE_LOCATION")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||||
if (EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
if (EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||||
target_compile_definitions(clio_options INTERFACE "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
target_compile_definitions(clio_options INTERFACE "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
if ("${san}" STREQUAL "")
|
if ("${san}" STREQUAL "")
|
||||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
|
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_LINK)
|
||||||
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
|
target_compile_definitions(clio_options INTERFACE BOOST_STACKTRACE_USE_BACKTRACE)
|
||||||
find_package(libbacktrace REQUIRED CONFIG)
|
find_package(libbacktrace REQUIRED CONFIG)
|
||||||
else ()
|
else ()
|
||||||
# Some sanitizers (TSAN and ASAN for sure) can't be used with libbacktrace because they have their own backtracing
|
# Some sanitizers (TSAN and ASAN for sure) can't be used with libbacktrace because they have their own backtracing
|
||||||
# capabilities and there are conflicts. In any case, this makes sure Clio code knows that backtrace is not
|
# capabilities and there are conflicts. In any case, this makes sure Clio code knows that backtrace is not available.
|
||||||
# available. See relevant conan profiles for sanitizers where we disable stacktrace in Boost explicitly.
|
# See relevant conan profiles for sanitizers where we disable stacktrace in Boost explicitly.
|
||||||
target_compile_definitions(clio_options INTERFACE CLIO_WITHOUT_STACKTRACE)
|
target_compile_definitions(clio_options INTERFACE CLIO_WITHOUT_STACKTRACE)
|
||||||
message(STATUS "Sanitizer enabled, disabling stacktrace")
|
message(STATUS "Sanitizer enabled, disabling stacktrace")
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
find_package(spdlog REQUIRED)
|
find_package(spdlog REQUIRED)
|
||||||
|
|
||||||
if (NOT TARGET spdlog::spdlog)
|
if (NOT TARGET spdlog::spdlog)
|
||||||
message(FATAL_ERROR "spdlog::spdlog target not found")
|
message(FATAL_ERROR "spdlog::spdlog target not found")
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -93,42 +93,3 @@ To completely disable Prometheus metrics add `"prometheus": { "enabled": false }
|
|||||||
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
|
It is important to know that Clio responds to Prometheus request only if they are admin requests. If you are using the admin password feature, the same password should be provided in the Authorization header of Prometheus requests.
|
||||||
|
|
||||||
You can find an example Docker Compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
|
You can find an example Docker Compose file, with Prometheus and Grafana configs, in [examples/infrastructure](../docs/examples/infrastructure/).
|
||||||
|
|
||||||
## Ledger cache file
|
|
||||||
|
|
||||||
Since version 2.7.0, Clio supports saving the ledger cache to a local file on shutdown and loading it on startup. This feature is disabled by default but can significantly improve restart times.
|
|
||||||
|
|
||||||
### Benefits
|
|
||||||
|
|
||||||
- **Faster startup**: Loading cache from a file takes less than a minute, compared to 40-90 minutes on Mainnet when loading from the database.
|
|
||||||
- **Reduced database load**: Clio doesn't put extra load on the database when starting with a cache file.
|
|
||||||
- **Improved availability**: Faster restart times mean less downtime during maintenance or updates.
|
|
||||||
|
|
||||||
> [!NOTE]
|
|
||||||
> This feature only works when Clio is restarted. When starting Clio for the first time, the cache must be loaded from `rippled` or the database as usual.
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
To enable the ledger cache file feature, specify the [`cache.file.path`](./config-description.md#cachefilepath) option in your `config.json`:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"cache": {
|
|
||||||
"file": {
|
|
||||||
"path": "/path/to/cache/file"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can optionally configure additional settings such as [`cache.file.max_sequence_age`](./config-description.md#cachefilemax_sequence_age) and [`cache.file.async_save`](./config-description.md#cachefileasync_save) to fine-tune the behavior. For a complete list of available options and their default values, see the [Configuration Description](./config-description.md#cachefilepath) documentation.
|
|
||||||
|
|
||||||
### How it works
|
|
||||||
|
|
||||||
1. **On shutdown**: Clio saves the current ledger cache to the specified file path. The file includes a hash for integrity verification.
|
|
||||||
2. **On startup**: Clio checks if a cache file exists at the configured path. If the file exists, Clio will:
|
|
||||||
- Verify the file's integrity to ensure it is complete and not corrupted.
|
|
||||||
- Compare the latest ledger sequence in the cache file with the latest sequence in the database.
|
|
||||||
- Use the cache file only if the difference is less than [`cache.file.max_sequence_age`](./config-description.md#cachefilemax_sequence_age).
|
|
||||||
- If validation fails or the cache is too old, Clio will fall back to loading from the database.
|
|
||||||
|
|
||||||
> [!IMPORTANT]
|
|
||||||
> The cache file path should point to a location with sufficient disk space. On typical deployments, the cache file size can be several gigabytes.
|
|
||||||
|
|||||||
@@ -2,11 +2,12 @@ add_library(clio_app)
|
|||||||
target_sources(clio_app PRIVATE CliArgs.cpp ClioApplication.cpp Stopper.cpp WebHandlers.cpp)
|
target_sources(clio_app PRIVATE CliArgs.cpp ClioApplication.cpp Stopper.cpp WebHandlers.cpp)
|
||||||
|
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
clio_app
|
clio_app
|
||||||
PUBLIC clio_cluster
|
PUBLIC clio_cluster
|
||||||
clio_etl
|
clio_etl
|
||||||
clio_feed
|
clio_feed
|
||||||
clio_migration
|
clio_migration
|
||||||
clio_rpc
|
clio_rpc
|
||||||
clio_web
|
clio_web
|
||||||
PRIVATE Boost::program_options)
|
PRIVATE Boost::program_options
|
||||||
|
)
|
||||||
|
|||||||
@@ -29,8 +29,6 @@
|
|||||||
#include "etl/ETLService.hpp"
|
#include "etl/ETLService.hpp"
|
||||||
#include "etl/LoadBalancer.hpp"
|
#include "etl/LoadBalancer.hpp"
|
||||||
#include "etl/NetworkValidatedLedgers.hpp"
|
#include "etl/NetworkValidatedLedgers.hpp"
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "feed/SubscriptionManager.hpp"
|
#include "feed/SubscriptionManager.hpp"
|
||||||
#include "migration/MigrationInspectorFactory.hpp"
|
#include "migration/MigrationInspectorFactory.hpp"
|
||||||
#include "rpc/Counters.hpp"
|
#include "rpc/Counters.hpp"
|
||||||
@@ -123,11 +121,7 @@ ClioApplication::run(bool const useNgWebServer)
|
|||||||
// Interface to the database
|
// Interface to the database
|
||||||
auto backend = data::makeBackend(config_, cache);
|
auto backend = data::makeBackend(config_, cache);
|
||||||
|
|
||||||
auto systemState = etl::SystemState::makeSystemState(config_);
|
cluster::ClusterCommunicationService clusterCommunicationService{backend};
|
||||||
|
|
||||||
cluster::ClusterCommunicationService clusterCommunicationService{
|
|
||||||
backend, std::make_unique<etl::WriterState>(systemState)
|
|
||||||
};
|
|
||||||
clusterCommunicationService.run();
|
clusterCommunicationService.run();
|
||||||
|
|
||||||
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
auto const amendmentCenter = std::make_shared<data::AmendmentCenter const>(backend);
|
||||||
@@ -157,9 +151,7 @@ ClioApplication::run(bool const useNgWebServer)
|
|||||||
);
|
);
|
||||||
|
|
||||||
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
|
// ETL is responsible for writing and publishing to streams. In read-only mode, ETL only publishes
|
||||||
auto etl = etl::ETLService::makeETLService(
|
auto etl = etl::ETLService::makeETLService(config_, ctx, backend, subscriptions, balancer, ledgers);
|
||||||
config_, std::move(systemState), ctx, backend, subscriptions, balancer, ledgers
|
|
||||||
);
|
|
||||||
|
|
||||||
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
auto workQueue = rpc::WorkQueue::makeWorkQueue(config_);
|
||||||
auto counters = rpc::Counters::makeCounters(workQueue);
|
auto counters = rpc::Counters::makeCounters(workQueue);
|
||||||
@@ -205,16 +197,7 @@ ClioApplication::run(bool const useNgWebServer)
|
|||||||
}
|
}
|
||||||
|
|
||||||
appStopper_.setOnStop(
|
appStopper_.setOnStop(
|
||||||
Stopper::makeOnStopCallback(
|
Stopper::makeOnStopCallback(httpServer.value(), *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
|
||||||
httpServer.value(),
|
|
||||||
*balancer,
|
|
||||||
*etl,
|
|
||||||
*subscriptions,
|
|
||||||
*backend,
|
|
||||||
cacheSaver,
|
|
||||||
clusterCommunicationService,
|
|
||||||
ioc
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// Blocks until stopped.
|
// Blocks until stopped.
|
||||||
@@ -230,9 +213,7 @@ ClioApplication::run(bool const useNgWebServer)
|
|||||||
|
|
||||||
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
|
auto const httpServer = web::makeHttpServer(config_, ioc, dosGuard, handler, cache);
|
||||||
appStopper_.setOnStop(
|
appStopper_.setOnStop(
|
||||||
Stopper::makeOnStopCallback(
|
Stopper::makeOnStopCallback(*httpServer, *balancer, *etl, *subscriptions, *backend, cacheSaver, ioc)
|
||||||
*httpServer, *balancer, *etl, *subscriptions, *backend, cacheSaver, clusterCommunicationService, ioc
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// Blocks until stopped.
|
// Blocks until stopped.
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "cluster/Concepts.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
#include "data/BackendInterface.hpp"
|
||||||
#include "data/LedgerCacheSaver.hpp"
|
#include "data/LedgerCacheSaver.hpp"
|
||||||
#include "etl/ETLServiceInterface.hpp"
|
#include "etl/ETLServiceInterface.hpp"
|
||||||
@@ -83,14 +82,10 @@ public:
|
|||||||
* @param subscriptions The subscription manager to stop.
|
* @param subscriptions The subscription manager to stop.
|
||||||
* @param backend The backend to stop.
|
* @param backend The backend to stop.
|
||||||
* @param cacheSaver The ledger cache saver
|
* @param cacheSaver The ledger cache saver
|
||||||
* @param clusterCommunicationService The cluster communication service to stop.
|
|
||||||
* @param ioc The io_context to stop.
|
* @param ioc The io_context to stop.
|
||||||
* @return The callback to be called on application stop.
|
* @return The callback to be called on application stop.
|
||||||
*/
|
*/
|
||||||
template <
|
template <web::SomeServer ServerType, data::SomeLedgerCacheSaver LedgerCacheSaverType>
|
||||||
web::SomeServer ServerType,
|
|
||||||
data::SomeLedgerCacheSaver LedgerCacheSaverType,
|
|
||||||
cluster::SomeClusterCommunicationService ClusterCommunicationServiceType>
|
|
||||||
static std::function<void(boost::asio::yield_context)>
|
static std::function<void(boost::asio::yield_context)>
|
||||||
makeOnStopCallback(
|
makeOnStopCallback(
|
||||||
ServerType& server,
|
ServerType& server,
|
||||||
@@ -99,7 +94,6 @@ public:
|
|||||||
feed::SubscriptionManagerInterface& subscriptions,
|
feed::SubscriptionManagerInterface& subscriptions,
|
||||||
data::BackendInterface& backend,
|
data::BackendInterface& backend,
|
||||||
LedgerCacheSaverType& cacheSaver,
|
LedgerCacheSaverType& cacheSaver,
|
||||||
ClusterCommunicationServiceType& clusterCommunicationService,
|
|
||||||
boost::asio::io_context& ioc
|
boost::asio::io_context& ioc
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
@@ -117,8 +111,6 @@ public:
|
|||||||
});
|
});
|
||||||
coroutineGroup.asyncWait(yield);
|
coroutineGroup.asyncWait(yield);
|
||||||
|
|
||||||
clusterCommunicationService.stop();
|
|
||||||
|
|
||||||
etl.stop();
|
etl.stop();
|
||||||
LOG(util::LogService::info()) << "ETL stopped";
|
LOG(util::LogService::info()) << "ETL stopped";
|
||||||
|
|
||||||
|
|||||||
@@ -1,134 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/json/parse.hpp>
|
|
||||||
#include <boost/json/serialize.hpp>
|
|
||||||
#include <boost/json/value.hpp>
|
|
||||||
#include <boost/json/value_from.hpp>
|
|
||||||
#include <boost/json/value_to.hpp>
|
|
||||||
#include <boost/uuid/random_generator.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
#include <fmt/format.h>
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <memory>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
Backend::Backend(
|
|
||||||
boost::asio::thread_pool& ctx,
|
|
||||||
std::shared_ptr<data::BackendInterface> backend,
|
|
||||||
std::unique_ptr<etl::WriterStateInterface const> writerState,
|
|
||||||
std::chrono::steady_clock::duration readInterval,
|
|
||||||
std::chrono::steady_clock::duration writeInterval
|
|
||||||
)
|
|
||||||
: backend_(std::move(backend))
|
|
||||||
, writerState_(std::move(writerState))
|
|
||||||
, readerTask_(readInterval, ctx)
|
|
||||||
, writerTask_(writeInterval, ctx)
|
|
||||||
, selfUuid_(std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()))
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
Backend::run()
|
|
||||||
{
|
|
||||||
readerTask_.run([this](boost::asio::yield_context yield) {
|
|
||||||
auto clusterData = doRead(yield);
|
|
||||||
onNewState_(selfUuid_, std::make_shared<ClusterData>(std::move(clusterData)));
|
|
||||||
});
|
|
||||||
|
|
||||||
writerTask_.run([this]() { doWrite(); });
|
|
||||||
}
|
|
||||||
|
|
||||||
Backend::~Backend()
|
|
||||||
{
|
|
||||||
stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
Backend::stop()
|
|
||||||
{
|
|
||||||
readerTask_.stop();
|
|
||||||
writerTask_.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
ClioNode::CUuid
|
|
||||||
Backend::selfId() const
|
|
||||||
{
|
|
||||||
return selfUuid_;
|
|
||||||
}
|
|
||||||
|
|
||||||
Backend::ClusterData
|
|
||||||
Backend::doRead(boost::asio::yield_context yield)
|
|
||||||
{
|
|
||||||
BackendInterface::ClioNodesDataFetchResult expectedResult;
|
|
||||||
try {
|
|
||||||
expectedResult = backend_->fetchClioNodesData(yield);
|
|
||||||
} catch (...) {
|
|
||||||
expectedResult = std::unexpected{"Failed to fetch Clio nodes data"};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!expectedResult.has_value()) {
|
|
||||||
return std::unexpected{std::move(expectedResult).error()};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<ClioNode> otherNodesData;
|
|
||||||
for (auto const& [uuid, nodeDataStr] : expectedResult.value()) {
|
|
||||||
if (uuid == *selfUuid_) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::system::error_code errorCode;
|
|
||||||
auto const json = boost::json::parse(nodeDataStr, errorCode);
|
|
||||||
if (errorCode.failed()) {
|
|
||||||
return std::unexpected{fmt::format("Error parsing json from DB: {}", nodeDataStr)};
|
|
||||||
}
|
|
||||||
|
|
||||||
auto expectedNodeData = boost::json::try_value_to<ClioNode>(json);
|
|
||||||
if (expectedNodeData.has_error()) {
|
|
||||||
return std::unexpected{fmt::format("Error converting json to ClioNode: {}", nodeDataStr)};
|
|
||||||
}
|
|
||||||
*expectedNodeData->uuid = uuid;
|
|
||||||
otherNodesData.push_back(std::move(expectedNodeData).value());
|
|
||||||
}
|
|
||||||
otherNodesData.push_back(ClioNode::from(selfUuid_, *writerState_));
|
|
||||||
return otherNodesData;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
Backend::doWrite()
|
|
||||||
{
|
|
||||||
auto const selfData = ClioNode::from(selfUuid_, *writerState_);
|
|
||||||
boost::json::value jsonValue{};
|
|
||||||
boost::json::value_from(selfData, jsonValue);
|
|
||||||
backend_->writeNodeMessage(*selfData.uuid, boost::json::serialize(jsonValue.as_object()));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,147 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "cluster/impl/RepeatedTask.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "util/log/Logger.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/any_io_executor.hpp>
|
|
||||||
#include <boost/asio/cancellation_signal.hpp>
|
|
||||||
#include <boost/asio/execution_context.hpp>
|
|
||||||
#include <boost/asio/executor.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/strand.hpp>
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/signals2/connection.hpp>
|
|
||||||
#include <boost/signals2/signal.hpp>
|
|
||||||
#include <boost/signals2/variadic_signal.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <concepts>
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Backend communication handler for cluster state synchronization.
|
|
||||||
*
|
|
||||||
* This class manages reading and writing cluster state information to/from the backend database.
|
|
||||||
* It periodically reads the state of other nodes in the cluster and writes the current node's state,
|
|
||||||
* enabling cluster-wide coordination and awareness.
|
|
||||||
*/
|
|
||||||
class Backend {
|
|
||||||
public:
|
|
||||||
/** @brief Type representing cluster data result - either a vector of nodes or an error message */
|
|
||||||
using ClusterData = std::expected<std::vector<ClioNode>, std::string>;
|
|
||||||
|
|
||||||
private:
|
|
||||||
util::Logger log_{"ClusterCommunication"};
|
|
||||||
|
|
||||||
std::shared_ptr<data::BackendInterface> backend_;
|
|
||||||
std::unique_ptr<etl::WriterStateInterface const> writerState_;
|
|
||||||
|
|
||||||
impl::RepeatedTask<boost::asio::thread_pool> readerTask_;
|
|
||||||
impl::RepeatedTask<boost::asio::thread_pool> writerTask_;
|
|
||||||
|
|
||||||
ClioNode::Uuid selfUuid_;
|
|
||||||
|
|
||||||
boost::signals2::signal<void(ClioNode::CUuid, std::shared_ptr<ClusterData const>)> onNewState_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief Construct a Backend communication handler.
|
|
||||||
*
|
|
||||||
* @param ctx The execution context for asynchronous operations
|
|
||||||
* @param backend Interface to the backend database
|
|
||||||
* @param writerState State indicating whether this node is writing to the database
|
|
||||||
* @param readInterval How often to read cluster state from the backend
|
|
||||||
* @param writeInterval How often to write this node's state to the backend
|
|
||||||
*/
|
|
||||||
Backend(
|
|
||||||
boost::asio::thread_pool& ctx,
|
|
||||||
std::shared_ptr<data::BackendInterface> backend,
|
|
||||||
std::unique_ptr<etl::WriterStateInterface const> writerState,
|
|
||||||
std::chrono::steady_clock::duration readInterval,
|
|
||||||
std::chrono::steady_clock::duration writeInterval
|
|
||||||
);
|
|
||||||
|
|
||||||
~Backend();
|
|
||||||
|
|
||||||
Backend(Backend&&) = delete;
|
|
||||||
Backend&
|
|
||||||
operator=(Backend&&) = delete;
|
|
||||||
Backend(Backend const&) = delete;
|
|
||||||
Backend&
|
|
||||||
operator=(Backend const&) = delete;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Start the backend read and write tasks.
|
|
||||||
*
|
|
||||||
* Begins periodic reading of cluster state from the backend and writing of this node's state.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
run();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Stop the backend read and write tasks.
|
|
||||||
*
|
|
||||||
* Stops all periodic tasks and waits for them to complete.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
stop();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Subscribe to new cluster state notifications.
|
|
||||||
*
|
|
||||||
* @tparam S Callable type accepting (ClioNode::cUUID, ClusterData)
|
|
||||||
* @param s Subscriber callback to be invoked when new cluster state is available
|
|
||||||
* @return A connection object that can be used to unsubscribe
|
|
||||||
*/
|
|
||||||
template <typename S>
|
|
||||||
requires std::invocable<S, ClioNode::CUuid, std::shared_ptr<ClusterData const>>
|
|
||||||
boost::signals2::connection
|
|
||||||
subscribeToNewState(S&& s)
|
|
||||||
{
|
|
||||||
return onNewState_.connect(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Get the UUID of this node in the cluster.
|
|
||||||
*
|
|
||||||
* @return The UUID of this node.
|
|
||||||
*/
|
|
||||||
ClioNode::CUuid
|
|
||||||
selfId() const;
|
|
||||||
|
|
||||||
private:
|
|
||||||
ClusterData
|
|
||||||
doRead(boost::asio::yield_context yield);
|
|
||||||
|
|
||||||
void
|
|
||||||
doWrite();
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
add_library(clio_cluster)
|
add_library(clio_cluster)
|
||||||
|
|
||||||
target_sources(clio_cluster PRIVATE Backend.cpp ClioNode.cpp ClusterCommunicationService.cpp Metrics.cpp
|
target_sources(clio_cluster PRIVATE ClioNode.cpp ClusterCommunicationService.cpp)
|
||||||
WriterDecider.cpp)
|
|
||||||
|
|
||||||
target_link_libraries(clio_cluster PRIVATE clio_util clio_data)
|
target_link_libraries(clio_cluster PRIVATE clio_util clio_data)
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
#include "cluster/ClioNode.hpp"
|
#include "cluster/ClioNode.hpp"
|
||||||
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "util/TimeUtils.hpp"
|
#include "util/TimeUtils.hpp"
|
||||||
|
|
||||||
#include <boost/json/conversion.hpp>
|
#include <boost/json/conversion.hpp>
|
||||||
@@ -27,72 +26,39 @@
|
|||||||
#include <boost/json/value.hpp>
|
#include <boost/json/value.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <cstdint>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <utility>
|
|
||||||
|
|
||||||
namespace cluster {
|
namespace cluster {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
struct JsonFields {
|
struct Fields {
|
||||||
static constexpr std::string_view const kUPDATE_TIME = "update_time";
|
static constexpr std::string_view const kUPDATE_TIME = "update_time";
|
||||||
static constexpr std::string_view const kDB_ROLE = "db_role";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
ClioNode
|
|
||||||
ClioNode::from(ClioNode::Uuid uuid, etl::WriterStateInterface const& writerState)
|
|
||||||
{
|
|
||||||
auto const dbRole = [&writerState]() {
|
|
||||||
if (writerState.isReadOnly()) {
|
|
||||||
return ClioNode::DbRole::ReadOnly;
|
|
||||||
}
|
|
||||||
if (writerState.isFallback()) {
|
|
||||||
return ClioNode::DbRole::Fallback;
|
|
||||||
}
|
|
||||||
if (writerState.isLoadingCache()) {
|
|
||||||
return ClioNode::DbRole::LoadingCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
return writerState.isWriting() ? ClioNode::DbRole::Writer : ClioNode::DbRole::NotWriter;
|
|
||||||
}();
|
|
||||||
return ClioNode{.uuid = std::move(uuid), .updateTime = std::chrono::system_clock::now(), .dbRole = dbRole};
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, ClioNode const& node)
|
tag_invoke(boost::json::value_from_tag, boost::json::value& jv, ClioNode const& node)
|
||||||
{
|
{
|
||||||
jv = {
|
jv = {
|
||||||
{JsonFields::kUPDATE_TIME, util::systemTpToUtcStr(node.updateTime, ClioNode::kTIME_FORMAT)},
|
{Fields::kUPDATE_TIME, util::systemTpToUtcStr(node.updateTime, ClioNode::kTIME_FORMAT)},
|
||||||
{JsonFields::kDB_ROLE, static_cast<int64_t>(node.dbRole)}
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
ClioNode
|
ClioNode
|
||||||
tag_invoke(boost::json::value_to_tag<ClioNode>, boost::json::value const& jv)
|
tag_invoke(boost::json::value_to_tag<ClioNode>, boost::json::value const& jv)
|
||||||
{
|
{
|
||||||
auto const& updateTimeStr = jv.as_object().at(JsonFields::kUPDATE_TIME).as_string();
|
auto const& updateTimeStr = jv.as_object().at(Fields::kUPDATE_TIME).as_string();
|
||||||
auto const updateTime = util::systemTpFromUtcStr(std::string(updateTimeStr), ClioNode::kTIME_FORMAT);
|
auto const updateTime = util::systemTpFromUtcStr(std::string(updateTimeStr), ClioNode::kTIME_FORMAT);
|
||||||
if (!updateTime.has_value()) {
|
if (!updateTime.has_value()) {
|
||||||
throw std::runtime_error("Failed to parse update time");
|
throw std::runtime_error("Failed to parse update time");
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const dbRoleValue = jv.as_object().at(JsonFields::kDB_ROLE).as_int64();
|
return ClioNode{.uuid = std::make_shared<boost::uuids::uuid>(), .updateTime = updateTime.value()};
|
||||||
if (dbRoleValue > static_cast<int64_t>(ClioNode::DbRole::MAX))
|
|
||||||
throw std::runtime_error("Invalid db_role value");
|
|
||||||
|
|
||||||
return ClioNode{
|
|
||||||
// Json data doesn't contain uuid so leaving it empty here. It will be filled outside of this parsing
|
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(),
|
|
||||||
.updateTime = updateTime.value(),
|
|
||||||
.dbRole = static_cast<ClioNode::DbRole>(dbRoleValue)
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cluster
|
} // namespace cluster
|
||||||
|
|||||||
@@ -19,8 +19,6 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/json/conversion.hpp>
|
#include <boost/json/conversion.hpp>
|
||||||
#include <boost/json/value.hpp>
|
#include <boost/json/value.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
@@ -39,37 +37,16 @@ struct ClioNode {
|
|||||||
*/
|
*/
|
||||||
static constexpr char const* kTIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ";
|
static constexpr char const* kTIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ";
|
||||||
|
|
||||||
/**
|
// enum class WriterRole {
|
||||||
* @brief Database role of a node in the cluster.
|
// ReadOnly,
|
||||||
*
|
// NotWriter,
|
||||||
* Roles are used to coordinate which node writes to the database:
|
// Writer
|
||||||
* - ReadOnly: Node is configured to never write (strict read-only mode)
|
// };
|
||||||
* - NotWriter: Node can write but is currently not the designated writer
|
|
||||||
* - Writer: Node is actively writing to the database
|
|
||||||
* - Fallback: Node is using the fallback writer decision mechanism
|
|
||||||
*
|
|
||||||
* When any node in the cluster is in Fallback mode, the entire cluster switches
|
|
||||||
* from the cluster communication mechanism to the slower but more reliable
|
|
||||||
* database-based conflict detection mechanism.
|
|
||||||
*/
|
|
||||||
enum class DbRole { ReadOnly = 0, LoadingCache = 1, NotWriter = 2, Writer = 3, Fallback = 4, MAX = 4 };
|
|
||||||
|
|
||||||
using Uuid = std::shared_ptr<boost::uuids::uuid>;
|
std::shared_ptr<boost::uuids::uuid> uuid; ///< The UUID of the node.
|
||||||
using CUuid = std::shared_ptr<boost::uuids::uuid const>;
|
|
||||||
|
|
||||||
Uuid uuid; ///< The UUID of the node.
|
|
||||||
std::chrono::system_clock::time_point updateTime; ///< The time the data about the node was last updated.
|
std::chrono::system_clock::time_point updateTime; ///< The time the data about the node was last updated.
|
||||||
DbRole dbRole; ///< The database role of the node
|
|
||||||
|
|
||||||
/**
|
// WriterRole writerRole;
|
||||||
* @brief Create a ClioNode from writer state.
|
|
||||||
*
|
|
||||||
* @param uuid The UUID of the node
|
|
||||||
* @param writerState The writer state to determine the node's database role
|
|
||||||
* @return A ClioNode with the current time and role derived from writerState
|
|
||||||
*/
|
|
||||||
static ClioNode
|
|
||||||
from(Uuid uuid, etl::WriterStateInterface const& writerState);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|||||||
@@ -19,37 +19,98 @@
|
|||||||
|
|
||||||
#include "cluster/ClusterCommunicationService.hpp"
|
#include "cluster/ClusterCommunicationService.hpp"
|
||||||
|
|
||||||
|
#include "cluster/ClioNode.hpp"
|
||||||
#include "data/BackendInterface.hpp"
|
#include "data/BackendInterface.hpp"
|
||||||
#include "etl/WriterState.hpp"
|
#include "util/Assert.hpp"
|
||||||
|
#include "util/Spawn.hpp"
|
||||||
|
#include "util/log/Logger.hpp"
|
||||||
|
|
||||||
|
#include <boost/asio/bind_cancellation_slot.hpp>
|
||||||
|
#include <boost/asio/cancellation_type.hpp>
|
||||||
|
#include <boost/asio/error.hpp>
|
||||||
|
#include <boost/asio/spawn.hpp>
|
||||||
|
#include <boost/asio/steady_timer.hpp>
|
||||||
|
#include <boost/asio/use_future.hpp>
|
||||||
|
#include <boost/json/parse.hpp>
|
||||||
|
#include <boost/json/serialize.hpp>
|
||||||
|
#include <boost/json/value.hpp>
|
||||||
|
#include <boost/json/value_from.hpp>
|
||||||
|
#include <boost/json/value_to.hpp>
|
||||||
|
#include <boost/uuid/random_generator.hpp>
|
||||||
|
#include <boost/uuid/uuid.hpp>
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
|
#include <latch>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
constexpr auto kTOTAL_WORKERS = 2uz; // 1 reading and 1 writing worker (coroutines)
|
||||||
|
} // namespace
|
||||||
|
|
||||||
namespace cluster {
|
namespace cluster {
|
||||||
|
|
||||||
ClusterCommunicationService::ClusterCommunicationService(
|
ClusterCommunicationService::ClusterCommunicationService(
|
||||||
std::shared_ptr<data::BackendInterface> backend,
|
std::shared_ptr<data::BackendInterface> backend,
|
||||||
std::unique_ptr<etl::WriterStateInterface> writerState,
|
|
||||||
std::chrono::steady_clock::duration readInterval,
|
std::chrono::steady_clock::duration readInterval,
|
||||||
std::chrono::steady_clock::duration writeInterval
|
std::chrono::steady_clock::duration writeInterval
|
||||||
)
|
)
|
||||||
: backend_(ctx_, std::move(backend), writerState->clone(), readInterval, writeInterval)
|
: backend_(std::move(backend))
|
||||||
, writerDecider_(ctx_, std::move(writerState))
|
, readInterval_(readInterval)
|
||||||
|
, writeInterval_(writeInterval)
|
||||||
|
, finishedCountdown_(kTOTAL_WORKERS)
|
||||||
|
, selfData_{ClioNode{
|
||||||
|
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator{}()),
|
||||||
|
.updateTime = std::chrono::system_clock::time_point{}
|
||||||
|
}}
|
||||||
{
|
{
|
||||||
|
nodesInClusterMetric_.set(1); // The node always sees itself
|
||||||
|
isHealthy_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ClusterCommunicationService::run()
|
ClusterCommunicationService::run()
|
||||||
{
|
{
|
||||||
backend_.subscribeToNewState([this](auto&&... args) {
|
ASSERT(not running_ and not stopped_, "Can only be ran once");
|
||||||
metrics_.onNewState(std::forward<decltype(args)>(args)...);
|
running_ = true;
|
||||||
|
|
||||||
|
util::spawn(strand_, [this](boost::asio::yield_context yield) {
|
||||||
|
boost::asio::steady_timer timer(yield.get_executor());
|
||||||
|
boost::system::error_code ec;
|
||||||
|
|
||||||
|
while (running_) {
|
||||||
|
timer.expires_after(readInterval_);
|
||||||
|
auto token = cancelSignal_.slot();
|
||||||
|
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
|
||||||
|
|
||||||
|
if (ec == boost::asio::error::operation_aborted or not running_)
|
||||||
|
break;
|
||||||
|
|
||||||
|
doRead(yield);
|
||||||
|
}
|
||||||
|
|
||||||
|
finishedCountdown_.count_down(1);
|
||||||
});
|
});
|
||||||
backend_.subscribeToNewState([this](auto&&... args) {
|
|
||||||
writerDecider_.onNewState(std::forward<decltype(args)>(args)...);
|
util::spawn(strand_, [this](boost::asio::yield_context yield) {
|
||||||
|
boost::asio::steady_timer timer(yield.get_executor());
|
||||||
|
boost::system::error_code ec;
|
||||||
|
|
||||||
|
while (running_) {
|
||||||
|
doWrite();
|
||||||
|
timer.expires_after(writeInterval_);
|
||||||
|
auto token = cancelSignal_.slot();
|
||||||
|
timer.async_wait(boost::asio::bind_cancellation_slot(token, yield[ec]));
|
||||||
|
|
||||||
|
if (ec == boost::asio::error::operation_aborted or not running_)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
finishedCountdown_.count_down(1);
|
||||||
});
|
});
|
||||||
backend_.run();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ClusterCommunicationService::~ClusterCommunicationService()
|
ClusterCommunicationService::~ClusterCommunicationService()
|
||||||
@@ -60,7 +121,107 @@ ClusterCommunicationService::~ClusterCommunicationService()
|
|||||||
void
|
void
|
||||||
ClusterCommunicationService::stop()
|
ClusterCommunicationService::stop()
|
||||||
{
|
{
|
||||||
backend_.stop();
|
if (stopped_)
|
||||||
|
return;
|
||||||
|
|
||||||
|
stopped_ = true;
|
||||||
|
|
||||||
|
// for ASAN to see through concurrency correctly we need to exit all coroutines before joining the ctx
|
||||||
|
running_ = false;
|
||||||
|
|
||||||
|
// cancelSignal_ is not thread safe so we execute emit on the same strand
|
||||||
|
boost::asio::spawn(
|
||||||
|
strand_, [this](auto&&) { cancelSignal_.emit(boost::asio::cancellation_type::all); }, boost::asio::use_future
|
||||||
|
)
|
||||||
|
.wait();
|
||||||
|
finishedCountdown_.wait();
|
||||||
|
|
||||||
|
ctx_.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<boost::uuids::uuid>
|
||||||
|
ClusterCommunicationService::selfUuid() const
|
||||||
|
{
|
||||||
|
// Uuid never changes so it is safe to copy it without using strand_
|
||||||
|
return selfData_.uuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
ClioNode
|
||||||
|
ClusterCommunicationService::selfData() const
|
||||||
|
{
|
||||||
|
ClioNode result{};
|
||||||
|
util::spawn(strand_, [this, &result](boost::asio::yield_context) { result = selfData_; });
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::expected<std::vector<ClioNode>, std::string>
|
||||||
|
ClusterCommunicationService::clusterData() const
|
||||||
|
{
|
||||||
|
if (not isHealthy_) {
|
||||||
|
return std::unexpected{"Service is not healthy"};
|
||||||
|
}
|
||||||
|
std::vector<ClioNode> result;
|
||||||
|
util::spawn(strand_, [this, &result](boost::asio::yield_context) {
|
||||||
|
result = otherNodesData_;
|
||||||
|
result.push_back(selfData_);
|
||||||
|
});
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ClusterCommunicationService::doRead(boost::asio::yield_context yield)
|
||||||
|
{
|
||||||
|
otherNodesData_.clear();
|
||||||
|
|
||||||
|
BackendInterface::ClioNodesDataFetchResult expectedResult;
|
||||||
|
try {
|
||||||
|
expectedResult = backend_->fetchClioNodesData(yield);
|
||||||
|
} catch (...) {
|
||||||
|
expectedResult = std::unexpected{"Failed to fecth Clio nodes data"};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!expectedResult.has_value()) {
|
||||||
|
LOG(log_.error()) << "Failed to fetch nodes data";
|
||||||
|
isHealthy_ = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new vector here to not have partially parsed data in otherNodesData_
|
||||||
|
std::vector<ClioNode> otherNodesData;
|
||||||
|
for (auto const& [uuid, nodeDataStr] : expectedResult.value()) {
|
||||||
|
if (uuid == *selfData_.uuid) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
boost::system::error_code errorCode;
|
||||||
|
auto const json = boost::json::parse(nodeDataStr, errorCode);
|
||||||
|
if (errorCode.failed()) {
|
||||||
|
LOG(log_.error()) << "Error parsing json from DB: " << nodeDataStr;
|
||||||
|
isHealthy_ = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto expectedNodeData = boost::json::try_value_to<ClioNode>(json);
|
||||||
|
if (expectedNodeData.has_error()) {
|
||||||
|
LOG(log_.error()) << "Error converting json to ClioNode: " << json;
|
||||||
|
isHealthy_ = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
*expectedNodeData->uuid = uuid;
|
||||||
|
otherNodesData.push_back(std::move(expectedNodeData).value());
|
||||||
|
}
|
||||||
|
otherNodesData_ = std::move(otherNodesData);
|
||||||
|
nodesInClusterMetric_.set(otherNodesData_.size() + 1);
|
||||||
|
isHealthy_ = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ClusterCommunicationService::doWrite()
|
||||||
|
{
|
||||||
|
selfData_.updateTime = std::chrono::system_clock::now();
|
||||||
|
boost::json::value jsonValue{};
|
||||||
|
boost::json::value_from(selfData_, jsonValue);
|
||||||
|
backend_->writeNodeMessage(*selfData_.uuid, boost::json::serialize(jsonValue.as_object()));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cluster
|
} // namespace cluster
|
||||||
|
|||||||
@@ -19,12 +19,13 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
#include "cluster/ClioNode.hpp"
|
||||||
#include "cluster/Concepts.hpp"
|
#include "cluster/ClusterCommunicationServiceInterface.hpp"
|
||||||
#include "cluster/Metrics.hpp"
|
|
||||||
#include "cluster/WriterDecider.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
#include "data/BackendInterface.hpp"
|
||||||
#include "etl/WriterState.hpp"
|
#include "util/log/Logger.hpp"
|
||||||
|
#include "util/prometheus/Bool.hpp"
|
||||||
|
#include "util/prometheus/Gauge.hpp"
|
||||||
|
#include "util/prometheus/Prometheus.hpp"
|
||||||
|
|
||||||
#include <boost/asio/cancellation_signal.hpp>
|
#include <boost/asio/cancellation_signal.hpp>
|
||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
@@ -32,49 +33,67 @@
|
|||||||
#include <boost/asio/thread_pool.hpp>
|
#include <boost/asio/thread_pool.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
#include <latch>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace cluster {
|
namespace cluster {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Service to post and read messages to/from the cluster. It uses a backend to communicate with the cluster.
|
* @brief Service to post and read messages to/from the cluster. It uses a backend to communicate with the cluster.
|
||||||
*/
|
*/
|
||||||
class ClusterCommunicationService : public ClusterCommunicationServiceTag {
|
class ClusterCommunicationService : public ClusterCommunicationServiceInterface {
|
||||||
|
util::prometheus::GaugeInt& nodesInClusterMetric_ = PrometheusService::gaugeInt(
|
||||||
|
"cluster_nodes_total_number",
|
||||||
|
{},
|
||||||
|
"Total number of nodes this node can detect in the cluster."
|
||||||
|
);
|
||||||
|
util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric(
|
||||||
|
"cluster_communication_is_healthy",
|
||||||
|
{},
|
||||||
|
"Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a problem)"
|
||||||
|
);
|
||||||
|
|
||||||
// TODO: Use util::async::CoroExecutionContext after https://github.com/XRPLF/clio/issues/1973 is implemented
|
// TODO: Use util::async::CoroExecutionContext after https://github.com/XRPLF/clio/issues/1973 is implemented
|
||||||
boost::asio::thread_pool ctx_{1};
|
boost::asio::thread_pool ctx_{1};
|
||||||
Backend backend_;
|
boost::asio::strand<boost::asio::thread_pool::executor_type> strand_ = boost::asio::make_strand(ctx_);
|
||||||
Metrics metrics_;
|
|
||||||
WriterDecider writerDecider_;
|
util::Logger log_{"ClusterCommunication"};
|
||||||
|
|
||||||
|
std::shared_ptr<data::BackendInterface> backend_;
|
||||||
|
|
||||||
|
std::chrono::steady_clock::duration readInterval_;
|
||||||
|
std::chrono::steady_clock::duration writeInterval_;
|
||||||
|
|
||||||
|
boost::asio::cancellation_signal cancelSignal_;
|
||||||
|
std::latch finishedCountdown_;
|
||||||
|
std::atomic_bool running_ = false;
|
||||||
|
bool stopped_ = false;
|
||||||
|
|
||||||
|
ClioNode selfData_;
|
||||||
|
std::vector<ClioNode> otherNodesData_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{1000};
|
static constexpr std::chrono::milliseconds kDEFAULT_READ_INTERVAL{2100};
|
||||||
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1000};
|
static constexpr std::chrono::milliseconds kDEFAULT_WRITE_INTERVAL{1200};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Construct a new Cluster Communication Service object.
|
* @brief Construct a new Cluster Communication Service object.
|
||||||
*
|
*
|
||||||
* @param backend The backend to use for communication.
|
* @param backend The backend to use for communication.
|
||||||
* @param writerState The state showing whether clio is writing to the database.
|
|
||||||
* @param readInterval The interval to read messages from the cluster.
|
* @param readInterval The interval to read messages from the cluster.
|
||||||
* @param writeInterval The interval to write messages to the cluster.
|
* @param writeInterval The interval to write messages to the cluster.
|
||||||
*/
|
*/
|
||||||
ClusterCommunicationService(
|
ClusterCommunicationService(
|
||||||
std::shared_ptr<data::BackendInterface> backend,
|
std::shared_ptr<data::BackendInterface> backend,
|
||||||
std::unique_ptr<etl::WriterStateInterface> writerState,
|
|
||||||
std::chrono::steady_clock::duration readInterval = kDEFAULT_READ_INTERVAL,
|
std::chrono::steady_clock::duration readInterval = kDEFAULT_READ_INTERVAL,
|
||||||
std::chrono::steady_clock::duration writeInterval = kDEFAULT_WRITE_INTERVAL
|
std::chrono::steady_clock::duration writeInterval = kDEFAULT_WRITE_INTERVAL
|
||||||
);
|
);
|
||||||
|
|
||||||
~ClusterCommunicationService() override;
|
~ClusterCommunicationService() override;
|
||||||
|
|
||||||
ClusterCommunicationService(ClusterCommunicationService&&) = delete;
|
|
||||||
ClusterCommunicationService(ClusterCommunicationService const&) = delete;
|
|
||||||
ClusterCommunicationService&
|
|
||||||
operator=(ClusterCommunicationService&&) = delete;
|
|
||||||
ClusterCommunicationService&
|
|
||||||
operator=(ClusterCommunicationService const&) = delete;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Start the service.
|
* @brief Start the service.
|
||||||
*/
|
*/
|
||||||
@@ -86,6 +105,44 @@ public:
|
|||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
stop();
|
stop();
|
||||||
|
|
||||||
|
ClusterCommunicationService(ClusterCommunicationService&&) = delete;
|
||||||
|
ClusterCommunicationService(ClusterCommunicationService const&) = delete;
|
||||||
|
ClusterCommunicationService&
|
||||||
|
operator=(ClusterCommunicationService&&) = delete;
|
||||||
|
ClusterCommunicationService&
|
||||||
|
operator=(ClusterCommunicationService const&) = delete;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the UUID of the current node.
|
||||||
|
*
|
||||||
|
* @return The UUID of the current node.
|
||||||
|
*/
|
||||||
|
std::shared_ptr<boost::uuids::uuid>
|
||||||
|
selfUuid() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the data of the current node.
|
||||||
|
*
|
||||||
|
* @return The data of the current node.
|
||||||
|
*/
|
||||||
|
ClioNode
|
||||||
|
selfData() const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the data of all nodes in the cluster (including self).
|
||||||
|
*
|
||||||
|
* @return The data of all nodes in the cluster or error if the service is not healthy.
|
||||||
|
*/
|
||||||
|
std::expected<std::vector<ClioNode>, std::string>
|
||||||
|
clusterData() const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void
|
||||||
|
doRead(boost::asio::yield_context yield);
|
||||||
|
|
||||||
|
void
|
||||||
|
doWrite();
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace cluster
|
} // namespace cluster
|
||||||
|
|||||||
@@ -17,31 +17,38 @@
|
|||||||
*/
|
*/
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
#include "cluster/Metrics.hpp"
|
#pragma once
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
#include "cluster/ClioNode.hpp"
|
||||||
|
|
||||||
#include <memory>
|
#include <expected>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace cluster {
|
namespace cluster {
|
||||||
|
|
||||||
Metrics::Metrics()
|
/**
|
||||||
{
|
* @brief Interface for the cluster communication service.
|
||||||
nodesInClusterMetric_.set(1); // The node always sees itself
|
*/
|
||||||
isHealthy_ = true;
|
class ClusterCommunicationServiceInterface {
|
||||||
}
|
public:
|
||||||
|
virtual ~ClusterCommunicationServiceInterface() = default;
|
||||||
|
|
||||||
void
|
/**
|
||||||
Metrics::onNewState(ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData)
|
* @brief Get the data of the current node.
|
||||||
{
|
*
|
||||||
if (clusterData->has_value()) {
|
* @return The data of the current node.
|
||||||
isHealthy_ = true;
|
*/
|
||||||
nodesInClusterMetric_.set(clusterData->value().size());
|
[[nodiscard]] virtual ClioNode
|
||||||
} else {
|
selfData() const = 0;
|
||||||
isHealthy_ = false;
|
|
||||||
nodesInClusterMetric_.set(1);
|
/**
|
||||||
}
|
* @brief Get the data of all nodes in the cluster (including self).
|
||||||
}
|
*
|
||||||
|
* @return The data of all nodes in the cluster or error if the service is not healthy.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] virtual std::expected<std::vector<ClioNode>, std::string>
|
||||||
|
clusterData() const = 0;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace cluster
|
} // namespace cluster
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <concepts>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Tag type for cluster communication service implementations.
|
|
||||||
*
|
|
||||||
* This tag is used to identify types that implement cluster communication functionality.
|
|
||||||
* Types should inherit from this tag to be recognized as cluster communication services.
|
|
||||||
*/
|
|
||||||
struct ClusterCommunicationServiceTag {
|
|
||||||
virtual ~ClusterCommunicationServiceTag() = default;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
concept SomeClusterCommunicationService = std::derived_from<T, ClusterCommunicationServiceTag>;
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "util/prometheus/Bool.hpp"
|
|
||||||
#include "util/prometheus/Gauge.hpp"
|
|
||||||
#include "util/prometheus/Prometheus.hpp"
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Manages Prometheus metrics for cluster communication and node tracking.
|
|
||||||
*
|
|
||||||
* This class tracks cluster-related metrics including:
|
|
||||||
* - Total number of nodes detected in the cluster
|
|
||||||
* - Health status of cluster communication
|
|
||||||
*/
|
|
||||||
class Metrics {
|
|
||||||
/** @brief Gauge tracking the total number of nodes visible in the cluster */
|
|
||||||
util::prometheus::GaugeInt& nodesInClusterMetric_ = PrometheusService::gaugeInt(
|
|
||||||
"cluster_nodes_total_number",
|
|
||||||
{},
|
|
||||||
"Total number of nodes this node can detect in the cluster."
|
|
||||||
);
|
|
||||||
|
|
||||||
/** @brief Boolean metric indicating whether cluster communication is healthy */
|
|
||||||
util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric(
|
|
||||||
"cluster_communication_is_healthy",
|
|
||||||
{},
|
|
||||||
"Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a problem)"
|
|
||||||
);
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief Constructs a Metrics instance and initializes metrics.
|
|
||||||
*
|
|
||||||
* Sets the initial node count to 1 (self) and marks communication as healthy.
|
|
||||||
*/
|
|
||||||
Metrics();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Updates metrics based on new cluster state.
|
|
||||||
*
|
|
||||||
* This callback is invoked when cluster state changes. It updates:
|
|
||||||
* - Health status based on whether cluster data is available
|
|
||||||
* - Node count to reflect the current cluster size
|
|
||||||
*
|
|
||||||
* @param uuid The UUID of the node (unused in current implementation)
|
|
||||||
* @param clusterData Shared pointer to the current cluster data; may be empty if communication failed
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
onNewState(ClioNode::CUuid uuid, std::shared_ptr<Backend::ClusterData const> clusterData);
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,98 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/WriterDecider.hpp"
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "util/Assert.hpp"
|
|
||||||
#include "util/Spawn.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <memory>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
WriterDecider::WriterDecider(boost::asio::thread_pool& ctx, std::unique_ptr<etl::WriterStateInterface> writerState)
|
|
||||||
: ctx_(ctx), writerState_(std::move(writerState))
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WriterDecider::onNewState(ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData)
|
|
||||||
{
|
|
||||||
if (not clusterData->has_value())
|
|
||||||
return;
|
|
||||||
|
|
||||||
util::spawn(
|
|
||||||
ctx_,
|
|
||||||
[writerState = writerState_->clone(),
|
|
||||||
selfId = std::move(selfId),
|
|
||||||
clusterData = clusterData->value()](auto&&) mutable {
|
|
||||||
auto const selfData =
|
|
||||||
std::ranges::find_if(clusterData, [&selfId](ClioNode const& node) { return node.uuid == selfId; });
|
|
||||||
ASSERT(selfData != clusterData.end(), "Self data should always be in the cluster data");
|
|
||||||
|
|
||||||
if (selfData->dbRole == ClioNode::DbRole::Fallback) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (selfData->dbRole == ClioNode::DbRole::ReadOnly) {
|
|
||||||
writerState->giveUpWriting();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If any node in the cluster is in Fallback mode, the entire cluster must switch
|
|
||||||
// to the fallback writer decision mechanism for consistency
|
|
||||||
if (std::ranges::any_of(clusterData, [](ClioNode const& node) {
|
|
||||||
return node.dbRole == ClioNode::DbRole::Fallback;
|
|
||||||
})) {
|
|
||||||
writerState->setWriterDecidingFallback();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We are not ReadOnly and there is no Fallback in the cluster
|
|
||||||
std::ranges::sort(clusterData, [](ClioNode const& lhs, ClioNode const& rhs) {
|
|
||||||
return *lhs.uuid < *rhs.uuid;
|
|
||||||
});
|
|
||||||
|
|
||||||
auto const it = std::ranges::find_if(clusterData, [](ClioNode const& node) {
|
|
||||||
return node.dbRole == ClioNode::DbRole::NotWriter or node.dbRole == ClioNode::DbRole::Writer;
|
|
||||||
});
|
|
||||||
|
|
||||||
if (it == clusterData.end()) {
|
|
||||||
// No writer nodes in the cluster yet
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*it->uuid == *selfId) {
|
|
||||||
writerState->startWriting();
|
|
||||||
} else {
|
|
||||||
writerState->giveUpWriting();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace cluster {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Decides which node in the cluster should be the writer based on cluster state.
|
|
||||||
*
|
|
||||||
* This class monitors cluster state changes and determines whether the current node
|
|
||||||
* should act as the writer to the database. The decision is made by:
|
|
||||||
* 1. Sorting all nodes by UUID for deterministic ordering
|
|
||||||
* 2. Selecting the first node that is allowed to write (not ReadOnly)
|
|
||||||
* 3. Activating writing on this node if it's the current node, otherwise deactivating
|
|
||||||
*
|
|
||||||
* This ensures only one node in the cluster actively writes to the database at a time.
|
|
||||||
*/
|
|
||||||
class WriterDecider {
|
|
||||||
/** @brief Thread pool for spawning asynchronous tasks */
|
|
||||||
boost::asio::thread_pool& ctx_;
|
|
||||||
|
|
||||||
/** @brief Interface for controlling the writer state of this node */
|
|
||||||
std::unique_ptr<etl::WriterStateInterface> writerState_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief Constructs a WriterDecider.
|
|
||||||
*
|
|
||||||
* @param ctx Thread pool for executing asynchronous operations
|
|
||||||
* @param writerState Writer state interface for controlling write operations
|
|
||||||
*/
|
|
||||||
WriterDecider(boost::asio::thread_pool& ctx, std::unique_ptr<etl::WriterStateInterface> writerState);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Handles cluster state changes and decides whether this node should be the writer.
|
|
||||||
*
|
|
||||||
* This method is called when cluster state changes. It asynchronously:
|
|
||||||
* - Sorts all nodes by UUID to establish a deterministic order
|
|
||||||
* - Identifies the first node allowed to write (not ReadOnly)
|
|
||||||
* - Activates writing if this node is selected, otherwise deactivates writing
|
|
||||||
* - Logs a warning if no nodes in the cluster are allowed to write
|
|
||||||
*
|
|
||||||
* @param selfId The UUID of the current node
|
|
||||||
* @param clusterData Shared pointer to current cluster data; may be empty if communication failed
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
onNewState(ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData);
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace cluster
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "util/Assert.hpp"
|
|
||||||
#include "util/Spawn.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/bind_cancellation_slot.hpp>
|
|
||||||
#include <boost/asio/cancellation_signal.hpp>
|
|
||||||
#include <boost/asio/cancellation_type.hpp>
|
|
||||||
#include <boost/asio/error.hpp>
|
|
||||||
#include <boost/asio/executor.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/steady_timer.hpp>
|
|
||||||
#include <boost/asio/strand.hpp>
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
|
||||||
#include <concepts>
|
|
||||||
#include <semaphore>
|
|
||||||
|
|
||||||
namespace cluster::impl {
|
|
||||||
|
|
||||||
// TODO: Try to replace util::Repeat by this. https://github.com/XRPLF/clio/issues/2926
|
|
||||||
template <typename Context>
|
|
||||||
class RepeatedTask {
|
|
||||||
std::chrono::steady_clock::duration interval_;
|
|
||||||
boost::asio::strand<typename Context::executor_type> strand_;
|
|
||||||
|
|
||||||
enum class State { Running, Stopped };
|
|
||||||
std::atomic<State> state_ = State::Stopped;
|
|
||||||
|
|
||||||
std::binary_semaphore semaphore_{0};
|
|
||||||
boost::asio::steady_timer timer_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
RepeatedTask(std::chrono::steady_clock::duration interval, Context& ctx)
|
|
||||||
: interval_(interval), strand_(boost::asio::make_strand(ctx)), timer_(strand_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
~RepeatedTask()
|
|
||||||
{
|
|
||||||
stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename Fn>
|
|
||||||
requires std::invocable<Fn, boost::asio::yield_context> or std::invocable<Fn>
|
|
||||||
void
|
|
||||||
run(Fn&& f)
|
|
||||||
{
|
|
||||||
ASSERT(state_ == State::Stopped, "Can only be ran once");
|
|
||||||
state_ = State::Running;
|
|
||||||
util::spawn(strand_, [this, f = std::forward<Fn>(f)](boost::asio::yield_context yield) {
|
|
||||||
boost::system::error_code ec;
|
|
||||||
|
|
||||||
while (state_ == State::Running) {
|
|
||||||
timer_.expires_after(interval_);
|
|
||||||
timer_.async_wait(yield[ec]);
|
|
||||||
|
|
||||||
if (ec or state_ != State::Running)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if constexpr (std::invocable<decltype(f), boost::asio::yield_context>) {
|
|
||||||
f(yield);
|
|
||||||
} else {
|
|
||||||
f();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
semaphore_.release();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
stop()
|
|
||||||
{
|
|
||||||
if (auto expected = State::Running; not state_.compare_exchange_strong(expected, State::Stopped))
|
|
||||||
return; // Already stopped or not started
|
|
||||||
|
|
||||||
std::binary_semaphore cancelSemaphore{0};
|
|
||||||
boost::asio::post(strand_, [this, &cancelSemaphore]() {
|
|
||||||
timer_.cancel();
|
|
||||||
cancelSemaphore.release();
|
|
||||||
});
|
|
||||||
cancelSemaphore.acquire();
|
|
||||||
semaphore_.acquire();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace cluster::impl
|
|
||||||
@@ -1,21 +1,23 @@
|
|||||||
add_library(clio_data)
|
add_library(clio_data)
|
||||||
target_sources(clio_data
|
target_sources(
|
||||||
PRIVATE AmendmentCenter.cpp
|
clio_data
|
||||||
BackendCounters.cpp
|
PRIVATE AmendmentCenter.cpp
|
||||||
BackendInterface.cpp
|
BackendCounters.cpp
|
||||||
LedgerCache.cpp
|
BackendInterface.cpp
|
||||||
LedgerCacheSaver.cpp
|
LedgerCache.cpp
|
||||||
LedgerHeaderCache.cpp
|
LedgerCacheSaver.cpp
|
||||||
cassandra/impl/Future.cpp
|
LedgerHeaderCache.cpp
|
||||||
cassandra/impl/Cluster.cpp
|
cassandra/impl/Future.cpp
|
||||||
cassandra/impl/Batch.cpp
|
cassandra/impl/Cluster.cpp
|
||||||
cassandra/impl/Result.cpp
|
cassandra/impl/Batch.cpp
|
||||||
cassandra/impl/Tuple.cpp
|
cassandra/impl/Result.cpp
|
||||||
cassandra/impl/SslContext.cpp
|
cassandra/impl/Tuple.cpp
|
||||||
cassandra/Handle.cpp
|
cassandra/impl/SslContext.cpp
|
||||||
cassandra/SettingsProvider.cpp
|
cassandra/Handle.cpp
|
||||||
impl/InputFile.cpp
|
cassandra/SettingsProvider.cpp
|
||||||
impl/LedgerCacheFile.cpp
|
impl/InputFile.cpp
|
||||||
impl/OutputFile.cpp)
|
impl/LedgerCacheFile.cpp
|
||||||
|
impl/OutputFile.cpp
|
||||||
|
)
|
||||||
|
|
||||||
target_link_libraries(clio_data PUBLIC cassandra-cpp-driver::cassandra-cpp-driver clio_util)
|
target_link_libraries(clio_data PUBLIC cassandra-cpp-driver::cassandra-cpp-driver clio_util)
|
||||||
|
|||||||
@@ -115,11 +115,6 @@ LedgerCacheFile::write(DataView dataView)
|
|||||||
auto const hash = file.hash();
|
auto const hash = file.hash();
|
||||||
file.write(hash.data(), decltype(hash)::bytes);
|
file.write(hash.data(), decltype(hash)::bytes);
|
||||||
|
|
||||||
// flush internal buffer explicitly before renaming
|
|
||||||
if (auto const expectedSuccess = file.close(); not expectedSuccess.has_value()) {
|
|
||||||
return expectedSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
std::filesystem::rename(newFilePath, path_);
|
std::filesystem::rename(newFilePath, path_);
|
||||||
} catch (std::exception const& e) {
|
} catch (std::exception const& e) {
|
||||||
|
|||||||
@@ -23,7 +23,6 @@
|
|||||||
|
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <expected>
|
|
||||||
#include <ios>
|
#include <ios>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
@@ -60,14 +59,4 @@ OutputFile::hash() const
|
|||||||
return std::move(sum).finalize();
|
return std::move(sum).finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::expected<void, std::string>
|
|
||||||
OutputFile::close()
|
|
||||||
{
|
|
||||||
file_.close();
|
|
||||||
if (not file_) {
|
|
||||||
return std::unexpected{"Error closing cache file"};
|
|
||||||
}
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace data::impl
|
} // namespace data::impl
|
||||||
|
|||||||
@@ -25,7 +25,6 @@
|
|||||||
|
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <expected>
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
@@ -61,9 +60,6 @@ public:
|
|||||||
ripple::uint256
|
ripple::uint256
|
||||||
hash() const;
|
hash() const;
|
||||||
|
|
||||||
std::expected<void, std::string>
|
|
||||||
close();
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void
|
void
|
||||||
writeToFile(char const* data, size_t size);
|
writeToFile(char const* data, size_t size);
|
||||||
|
|||||||
@@ -1,29 +1,30 @@
|
|||||||
add_library(clio_etl)
|
add_library(clio_etl)
|
||||||
|
|
||||||
target_sources(clio_etl
|
target_sources(
|
||||||
PRIVATE CacheLoaderSettings.cpp
|
clio_etl
|
||||||
ETLHelpers.cpp
|
PRIVATE CacheLoaderSettings.cpp
|
||||||
ETLService.cpp
|
ETLHelpers.cpp
|
||||||
ETLState.cpp
|
ETLService.cpp
|
||||||
LoadBalancer.cpp
|
ETLState.cpp
|
||||||
MPTHelpers.cpp
|
LoadBalancer.cpp
|
||||||
NetworkValidatedLedgers.cpp
|
MPTHelpers.cpp
|
||||||
NFTHelpers.cpp
|
NetworkValidatedLedgers.cpp
|
||||||
Source.cpp
|
NFTHelpers.cpp
|
||||||
WriterState.cpp
|
Source.cpp
|
||||||
impl/AmendmentBlockHandler.cpp
|
impl/AmendmentBlockHandler.cpp
|
||||||
impl/AsyncGrpcCall.cpp
|
impl/AsyncGrpcCall.cpp
|
||||||
impl/Extraction.cpp
|
impl/Extraction.cpp
|
||||||
impl/ForwardingSource.cpp
|
impl/ForwardingSource.cpp
|
||||||
impl/GrpcSource.cpp
|
impl/GrpcSource.cpp
|
||||||
impl/Loading.cpp
|
impl/Loading.cpp
|
||||||
impl/Monitor.cpp
|
impl/Monitor.cpp
|
||||||
impl/SubscriptionSource.cpp
|
impl/SubscriptionSource.cpp
|
||||||
impl/TaskManager.cpp
|
impl/TaskManager.cpp
|
||||||
impl/ext/Cache.cpp
|
impl/ext/Cache.cpp
|
||||||
impl/ext/Core.cpp
|
impl/ext/Core.cpp
|
||||||
impl/ext/MPT.cpp
|
impl/ext/MPT.cpp
|
||||||
impl/ext/NFT.cpp
|
impl/ext/NFT.cpp
|
||||||
impl/ext/Successor.cpp)
|
impl/ext/Successor.cpp
|
||||||
|
)
|
||||||
|
|
||||||
target_link_libraries(clio_etl PUBLIC clio_data)
|
target_link_libraries(clio_etl PUBLIC clio_data)
|
||||||
|
|||||||
@@ -78,7 +78,6 @@ namespace etl {
|
|||||||
std::shared_ptr<ETLServiceInterface>
|
std::shared_ptr<ETLServiceInterface>
|
||||||
ETLService::makeETLService(
|
ETLService::makeETLService(
|
||||||
util::config::ClioConfigDefinition const& config,
|
util::config::ClioConfigDefinition const& config,
|
||||||
std::shared_ptr<SystemState> state,
|
|
||||||
util::async::AnyExecutionContext ctx,
|
util::async::AnyExecutionContext ctx,
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||||
@@ -88,6 +87,9 @@ ETLService::makeETLService(
|
|||||||
{
|
{
|
||||||
std::shared_ptr<ETLServiceInterface> ret;
|
std::shared_ptr<ETLServiceInterface> ret;
|
||||||
|
|
||||||
|
auto state = std::make_shared<SystemState>();
|
||||||
|
state->isStrictReadonly = config.get<bool>("read_only");
|
||||||
|
|
||||||
auto fetcher = std::make_shared<impl::LedgerFetcher>(backend, balancer);
|
auto fetcher = std::make_shared<impl::LedgerFetcher>(backend, balancer);
|
||||||
auto extractor = std::make_shared<impl::Extractor>(fetcher);
|
auto extractor = std::make_shared<impl::Extractor>(fetcher);
|
||||||
auto publisher = std::make_shared<impl::LedgerPublisher>(ctx, backend, subscriptions, *state);
|
auto publisher = std::make_shared<impl::LedgerPublisher>(ctx, backend, subscriptions, *state);
|
||||||
@@ -171,7 +173,6 @@ ETLService::ETLService(
|
|||||||
, state_(std::move(state))
|
, state_(std::move(state))
|
||||||
, startSequence_(config.get().maybeValue<uint32_t>("start_sequence"))
|
, startSequence_(config.get().maybeValue<uint32_t>("start_sequence"))
|
||||||
, finishSequence_(config.get().maybeValue<uint32_t>("finish_sequence"))
|
, finishSequence_(config.get().maybeValue<uint32_t>("finish_sequence"))
|
||||||
, writeCommandStrand_(ctx_.makeStrand())
|
|
||||||
{
|
{
|
||||||
ASSERT(not state_->isWriting, "ETL should never start in writer mode");
|
ASSERT(not state_->isWriting, "ETL should never start in writer mode");
|
||||||
|
|
||||||
@@ -212,13 +213,14 @@ ETLService::run()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const nextSequence = syncCacheWithDb();
|
auto nextSequence = rng->maxSequence + 1;
|
||||||
|
if (backend_->cache().latestLedgerSequence() != 0) {
|
||||||
|
nextSequence = backend_->cache().latestLedgerSequence();
|
||||||
|
}
|
||||||
|
|
||||||
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
LOG(log_.debug()) << "Database is populated. Starting monitor loop. sequence = " << nextSequence;
|
||||||
|
|
||||||
startMonitor(nextSequence);
|
startMonitor(nextSequence);
|
||||||
|
|
||||||
state_->isLoadingCache = false;
|
|
||||||
|
|
||||||
// If we are a writer as the result of loading the initial ledger - start loading
|
// If we are a writer as the result of loading the initial ledger - start loading
|
||||||
if (state_->isWriting)
|
if (state_->isWriting)
|
||||||
startLoading(nextSequence);
|
startLoading(nextSequence);
|
||||||
@@ -230,13 +232,6 @@ ETLService::stop()
|
|||||||
{
|
{
|
||||||
LOG(log_.info()) << "Stop called";
|
LOG(log_.info()) << "Stop called";
|
||||||
|
|
||||||
systemStateWriteCommandSubscription_.disconnect();
|
|
||||||
auto count = runningWriteCommandHandlers_.load();
|
|
||||||
while (count != 0) {
|
|
||||||
runningWriteCommandHandlers_.wait(count); // Blocks until value changes
|
|
||||||
count = runningWriteCommandHandlers_.load();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mainLoop_)
|
if (mainLoop_)
|
||||||
mainLoop_->wait();
|
mainLoop_->wait();
|
||||||
if (taskMan_)
|
if (taskMan_)
|
||||||
@@ -348,77 +343,35 @@ ETLService::loadInitialLedgerIfNeeded()
|
|||||||
return rng;
|
return rng;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t
|
|
||||||
ETLService::syncCacheWithDb()
|
|
||||||
{
|
|
||||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
|
||||||
|
|
||||||
while (not backend_->cache().isDisabled() and rng->maxSequence > backend_->cache().latestLedgerSequence()) {
|
|
||||||
LOG(log_.info()) << "Syncing cache with DB. DB latest seq: " << rng->maxSequence
|
|
||||||
<< ". Cache latest seq: " << backend_->cache().latestLedgerSequence();
|
|
||||||
for (auto seq = backend_->cache().latestLedgerSequence(); seq <= rng->maxSequence; ++seq) {
|
|
||||||
LOG(log_.info()) << "ETLService (via syncCacheWithDb) got new seq from db: " << seq;
|
|
||||||
updateCache(seq);
|
|
||||||
}
|
|
||||||
rng = backend_->hardFetchLedgerRangeNoThrow();
|
|
||||||
}
|
|
||||||
return rng->maxSequence + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
ETLService::updateCache(uint32_t seq)
|
|
||||||
{
|
|
||||||
auto const cacheNeedsUpdate = backend_->cache().latestLedgerSequence() < seq;
|
|
||||||
auto const backendRange = backend_->fetchLedgerRange();
|
|
||||||
auto const backendNeedsUpdate = backendRange.has_value() and backendRange->maxSequence < seq;
|
|
||||||
|
|
||||||
if (cacheNeedsUpdate) {
|
|
||||||
auto const diff = data::synchronousAndRetryOnTimeout([this, seq](auto yield) {
|
|
||||||
return backend_->fetchLedgerDiff(seq, yield);
|
|
||||||
});
|
|
||||||
cacheUpdater_->update(seq, diff);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (backendNeedsUpdate)
|
|
||||||
backend_->updateRange(seq);
|
|
||||||
|
|
||||||
publisher_->publish(seq, {});
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
ETLService::startMonitor(uint32_t seq)
|
ETLService::startMonitor(uint32_t seq)
|
||||||
{
|
{
|
||||||
monitor_ = monitorProvider_->make(ctx_, backend_, ledgers_, seq);
|
monitor_ = monitorProvider_->make(ctx_, backend_, ledgers_, seq);
|
||||||
|
|
||||||
systemStateWriteCommandSubscription_ =
|
|
||||||
state_->writeCommandSignal.connect([this](SystemState::WriteCommand command) {
|
|
||||||
++runningWriteCommandHandlers_;
|
|
||||||
writeCommandStrand_.submit([this, command]() {
|
|
||||||
switch (command) {
|
|
||||||
case etl::SystemState::WriteCommand::StartWriting:
|
|
||||||
attemptTakeoverWriter();
|
|
||||||
break;
|
|
||||||
case etl::SystemState::WriteCommand::StopWriting:
|
|
||||||
giveUpWriter();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
--runningWriteCommandHandlers_;
|
|
||||||
runningWriteCommandHandlers_.notify_one();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
monitorNewSeqSubscription_ = monitor_->subscribeToNewSequence([this](uint32_t seq) {
|
monitorNewSeqSubscription_ = monitor_->subscribeToNewSequence([this](uint32_t seq) {
|
||||||
LOG(log_.info()) << "ETLService (via Monitor) got new seq from db: " << seq;
|
LOG(log_.info()) << "ETLService (via Monitor) got new seq from db: " << seq;
|
||||||
updateCache(seq);
|
|
||||||
|
if (state_->writeConflict) {
|
||||||
|
LOG(log_.info()) << "Got a write conflict; Giving up writer seat immediately";
|
||||||
|
giveUpWriter();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (not state_->isWriting) {
|
||||||
|
auto const diff = data::synchronousAndRetryOnTimeout([this, seq](auto yield) {
|
||||||
|
return backend_->fetchLedgerDiff(seq, yield);
|
||||||
|
});
|
||||||
|
|
||||||
|
cacheUpdater_->update(seq, diff);
|
||||||
|
backend_->updateRange(seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
publisher_->publish(seq, {});
|
||||||
});
|
});
|
||||||
|
|
||||||
monitorDbStalledSubscription_ = monitor_->subscribeToDbStalled([this]() {
|
monitorDbStalledSubscription_ = monitor_->subscribeToDbStalled([this]() {
|
||||||
LOG(log_.warn()) << "ETLService received DbStalled signal from Monitor";
|
LOG(log_.warn()) << "ETLService received DbStalled signal from Monitor";
|
||||||
// Database stall detected - no writer has been active for 10 seconds
|
|
||||||
// This triggers the fallback mechanism and attempts to become the writer
|
|
||||||
if (not state_->isStrictReadonly and not state_->isWriting)
|
if (not state_->isStrictReadonly and not state_->isWriting)
|
||||||
state_->writeCommandSignal(SystemState::WriteCommand::StartWriting);
|
attemptTakeoverWriter();
|
||||||
state_->isWriterDecidingFallback = true;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
monitor_->run();
|
monitor_->run();
|
||||||
@@ -441,13 +394,6 @@ ETLService::attemptTakeoverWriter()
|
|||||||
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
auto rng = backend_->hardFetchLedgerRangeNoThrow();
|
||||||
ASSERT(rng.has_value(), "Ledger range can't be null");
|
ASSERT(rng.has_value(), "Ledger range can't be null");
|
||||||
|
|
||||||
if (backend_->cache().latestLedgerSequence() != rng->maxSequence) {
|
|
||||||
LOG(log_.info()) << "Wanted to take over the ETL writer seat but LedgerCache is outdated";
|
|
||||||
// Give ETL time to update LedgerCache. This method will be called because ClusterCommunication will likely to
|
|
||||||
// continue sending StartWriting signal every 1 second
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
state_->isWriting = true; // switch to writer
|
state_->isWriting = true; // switch to writer
|
||||||
LOG(log_.info()) << "Taking over the ETL writer seat";
|
LOG(log_.info()) << "Taking over the ETL writer seat";
|
||||||
startLoading(rng->maxSequence + 1);
|
startLoading(rng->maxSequence + 1);
|
||||||
@@ -458,7 +404,7 @@ ETLService::giveUpWriter()
|
|||||||
{
|
{
|
||||||
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes");
|
||||||
state_->isWriting = false;
|
state_->isWriting = false;
|
||||||
LOG(log_.info()) << "Giving up writer seat";
|
state_->writeConflict = false;
|
||||||
taskMan_ = nullptr;
|
taskMan_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -52,7 +52,6 @@
|
|||||||
#include "feed/SubscriptionManagerInterface.hpp"
|
#include "feed/SubscriptionManagerInterface.hpp"
|
||||||
#include "util/async/AnyExecutionContext.hpp"
|
#include "util/async/AnyExecutionContext.hpp"
|
||||||
#include "util/async/AnyOperation.hpp"
|
#include "util/async/AnyOperation.hpp"
|
||||||
#include "util/async/AnyStrand.hpp"
|
|
||||||
#include "util/config/ConfigDefinition.hpp"
|
#include "util/config/ConfigDefinition.hpp"
|
||||||
#include "util/log/Logger.hpp"
|
#include "util/log/Logger.hpp"
|
||||||
|
|
||||||
@@ -70,12 +69,12 @@
|
|||||||
#include <xrpl/protocol/TxFormats.h>
|
#include <xrpl/protocol/TxFormats.h>
|
||||||
#include <xrpl/protocol/TxMeta.h>
|
#include <xrpl/protocol/TxMeta.h>
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace etl {
|
namespace etl {
|
||||||
|
|
||||||
@@ -118,9 +117,6 @@ class ETLService : public ETLServiceInterface {
|
|||||||
|
|
||||||
boost::signals2::scoped_connection monitorNewSeqSubscription_;
|
boost::signals2::scoped_connection monitorNewSeqSubscription_;
|
||||||
boost::signals2::scoped_connection monitorDbStalledSubscription_;
|
boost::signals2::scoped_connection monitorDbStalledSubscription_;
|
||||||
boost::signals2::scoped_connection systemStateWriteCommandSubscription_;
|
|
||||||
util::async::AnyStrand writeCommandStrand_;
|
|
||||||
std::atomic<size_t> runningWriteCommandHandlers_{0};
|
|
||||||
|
|
||||||
std::optional<util::async::AnyOperation<void>> mainLoop_;
|
std::optional<util::async::AnyOperation<void>> mainLoop_;
|
||||||
|
|
||||||
@@ -131,7 +127,6 @@ public:
|
|||||||
* Creates and runs the ETL service.
|
* Creates and runs the ETL service.
|
||||||
*
|
*
|
||||||
* @param config The configuration to use
|
* @param config The configuration to use
|
||||||
* @param state The system state tracking object
|
|
||||||
* @param ctx Execution context for asynchronous operations
|
* @param ctx Execution context for asynchronous operations
|
||||||
* @param backend BackendInterface implementation
|
* @param backend BackendInterface implementation
|
||||||
* @param subscriptions Subscription manager
|
* @param subscriptions Subscription manager
|
||||||
@@ -142,7 +137,6 @@ public:
|
|||||||
static std::shared_ptr<ETLServiceInterface>
|
static std::shared_ptr<ETLServiceInterface>
|
||||||
makeETLService(
|
makeETLService(
|
||||||
util::config::ClioConfigDefinition const& config,
|
util::config::ClioConfigDefinition const& config,
|
||||||
std::shared_ptr<SystemState> state,
|
|
||||||
util::async::AnyExecutionContext ctx,
|
util::async::AnyExecutionContext ctx,
|
||||||
std::shared_ptr<BackendInterface> backend,
|
std::shared_ptr<BackendInterface> backend,
|
||||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions,
|
||||||
@@ -166,7 +160,7 @@ public:
|
|||||||
* @param initialLoadObserver The observer for initial data loading
|
* @param initialLoadObserver The observer for initial data loading
|
||||||
* @param taskManagerProvider The provider of the task manager instance
|
* @param taskManagerProvider The provider of the task manager instance
|
||||||
* @param monitorProvider The provider of the monitor instance
|
* @param monitorProvider The provider of the monitor instance
|
||||||
* @param state The system state tracking object
|
* @param state System state tracking object
|
||||||
*/
|
*/
|
||||||
ETLService(
|
ETLService(
|
||||||
util::async::AnyExecutionContext ctx,
|
util::async::AnyExecutionContext ctx,
|
||||||
@@ -212,12 +206,6 @@ private:
|
|||||||
std::optional<data::LedgerRange>
|
std::optional<data::LedgerRange>
|
||||||
loadInitialLedgerIfNeeded();
|
loadInitialLedgerIfNeeded();
|
||||||
|
|
||||||
[[nodiscard]] uint32_t
|
|
||||||
syncCacheWithDb();
|
|
||||||
|
|
||||||
void
|
|
||||||
updateCache(uint32_t seq);
|
|
||||||
|
|
||||||
void
|
void
|
||||||
startMonitor(uint32_t seq);
|
startMonitor(uint32_t seq);
|
||||||
|
|
||||||
|
|||||||
@@ -19,16 +19,11 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "util/config/ConfigDefinition.hpp"
|
|
||||||
#include "util/log/Logger.hpp"
|
|
||||||
#include "util/prometheus/Bool.hpp"
|
#include "util/prometheus/Bool.hpp"
|
||||||
#include "util/prometheus/Label.hpp"
|
#include "util/prometheus/Label.hpp"
|
||||||
#include "util/prometheus/Prometheus.hpp"
|
#include "util/prometheus/Prometheus.hpp"
|
||||||
|
|
||||||
#include <boost/signals2/signal.hpp>
|
#include <atomic>
|
||||||
#include <boost/signals2/variadic_signal.hpp>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace etl {
|
namespace etl {
|
||||||
|
|
||||||
@@ -36,25 +31,6 @@ namespace etl {
|
|||||||
* @brief Represents the state of the ETL subsystem.
|
* @brief Represents the state of the ETL subsystem.
|
||||||
*/
|
*/
|
||||||
struct SystemState {
|
struct SystemState {
|
||||||
SystemState()
|
|
||||||
{
|
|
||||||
isLoadingCache = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Factory method to create a SystemState instance.
|
|
||||||
*
|
|
||||||
* @param config The configuration to use for initializing the system state
|
|
||||||
* @return A shared pointer to the newly created SystemState
|
|
||||||
*/
|
|
||||||
static std::shared_ptr<SystemState>
|
|
||||||
makeSystemState(util::config::ClioConfigDefinition const& config)
|
|
||||||
{
|
|
||||||
auto state = std::make_shared<SystemState>();
|
|
||||||
state->isStrictReadonly = config.get<bool>("read_only");
|
|
||||||
return state;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Whether the process is in strict read-only mode.
|
* @brief Whether the process is in strict read-only mode.
|
||||||
*
|
*
|
||||||
@@ -74,31 +50,8 @@ struct SystemState {
|
|||||||
"Whether the process is writing to the database"
|
"Whether the process is writing to the database"
|
||||||
);
|
);
|
||||||
|
|
||||||
/** @brief Whether the process is still loading cache after startup. */
|
std::atomic_bool isStopping = false; /**< @brief Whether the software is stopping. */
|
||||||
util::prometheus::Bool isLoadingCache = PrometheusService::boolMetric(
|
std::atomic_bool writeConflict = false; /**< @brief Whether a write conflict was detected. */
|
||||||
"etl_loading_cache",
|
|
||||||
util::prometheus::Labels{},
|
|
||||||
"Whether etl is loading cache after clio startup"
|
|
||||||
);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Commands for controlling the ETL writer state.
|
|
||||||
*
|
|
||||||
* These commands are emitted via writeCommandSignal to coordinate writer state transitions across components.
|
|
||||||
*/
|
|
||||||
enum class WriteCommand {
|
|
||||||
StartWriting, /**< Request to attempt taking over as the ETL writer */
|
|
||||||
StopWriting /**< Request to give up the ETL writer role (e.g., due to write conflict) */
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Signal for coordinating ETL writer state transitions.
|
|
||||||
*
|
|
||||||
* This signal allows components to request changes to the writer state without direct coupling.
|
|
||||||
* - Emitted with StartWriting when database stalls and node should attempt to become writer
|
|
||||||
* - Emitted with StopWriting when write conflicts are detected
|
|
||||||
*/
|
|
||||||
boost::signals2::signal<void(WriteCommand)> writeCommandSignal;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Whether clio detected an amendment block.
|
* @brief Whether clio detected an amendment block.
|
||||||
@@ -124,24 +77,6 @@ struct SystemState {
|
|||||||
util::prometheus::Labels{},
|
util::prometheus::Labels{},
|
||||||
"Whether clio detected a corruption that needs manual attention"
|
"Whether clio detected a corruption that needs manual attention"
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Whether the cluster is using the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* The fallback mechanism is triggered when:
|
|
||||||
* - The database stalls for 10 seconds (detected by Monitor), indicating no active writer
|
|
||||||
* - A write conflict is detected, indicating multiple nodes attempting to write simultaneously
|
|
||||||
*
|
|
||||||
* When fallback mode is active, the cluster stops using the cluster communication mechanism
|
|
||||||
* (TTL-based role announcements) and relies on the slower but more reliable database-based
|
|
||||||
* conflict detection. This flag propagates across the cluster - if any node enters fallback
|
|
||||||
* mode, all nodes in the cluster will switch to fallback mode.
|
|
||||||
*/
|
|
||||||
util::prometheus::Bool isWriterDecidingFallback = PrometheusService::boolMetric(
|
|
||||||
"etl_writing_deciding_fallback",
|
|
||||||
util::prometheus::Labels{},
|
|
||||||
"Whether the cluster is using the fallback writer decision mechanism"
|
|
||||||
);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace etl
|
} // namespace etl
|
||||||
|
|||||||
@@ -1,88 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <utility>
|
|
||||||
|
|
||||||
namespace etl {
|
|
||||||
|
|
||||||
WriterState::WriterState(std::shared_ptr<SystemState> state) : systemState_(std::move(state))
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
WriterState::isReadOnly() const
|
|
||||||
{
|
|
||||||
return systemState_->isStrictReadonly;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
WriterState::isWriting() const
|
|
||||||
{
|
|
||||||
return systemState_->isWriting;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WriterState::startWriting()
|
|
||||||
{
|
|
||||||
if (isWriting())
|
|
||||||
return;
|
|
||||||
|
|
||||||
systemState_->writeCommandSignal(SystemState::WriteCommand::StartWriting);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WriterState::giveUpWriting()
|
|
||||||
{
|
|
||||||
if (not isWriting())
|
|
||||||
return;
|
|
||||||
|
|
||||||
systemState_->writeCommandSignal(SystemState::WriteCommand::StopWriting);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
WriterState::setWriterDecidingFallback()
|
|
||||||
{
|
|
||||||
systemState_->isWriterDecidingFallback = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
WriterState::isFallback() const
|
|
||||||
{
|
|
||||||
return systemState_->isWriterDecidingFallback;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
WriterState::isLoadingCache() const
|
|
||||||
{
|
|
||||||
return systemState_->isLoadingCache;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<WriterStateInterface>
|
|
||||||
WriterState::clone() const
|
|
||||||
{
|
|
||||||
auto c = WriterState(*this);
|
|
||||||
return std::make_unique<WriterState>(std::move(c));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace etl
|
|
||||||
@@ -1,193 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace etl {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Interface for managing writer state in the ETL subsystem.
|
|
||||||
*
|
|
||||||
* This interface provides methods to query and control whether the ETL process
|
|
||||||
* is actively writing to the database. Implementations should coordinate with
|
|
||||||
* the ETL system state to manage write responsibilities.
|
|
||||||
*/
|
|
||||||
class WriterStateInterface {
|
|
||||||
public:
|
|
||||||
virtual ~WriterStateInterface() = default;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the ETL process is in strict read-only mode.
|
|
||||||
* @return true if the process is in strict read-only mode, false otherwise
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual bool
|
|
||||||
isReadOnly() const = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the ETL process is currently writing to the database.
|
|
||||||
* @return true if the process is writing, false otherwise
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual bool
|
|
||||||
isWriting() const = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Request to start writing to the database.
|
|
||||||
*
|
|
||||||
* This method signals that the process should take over writing responsibilities.
|
|
||||||
* The actual transition to writing state may not be immediate.
|
|
||||||
*/
|
|
||||||
virtual void
|
|
||||||
startWriting() = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Request to stop writing to the database.
|
|
||||||
*
|
|
||||||
* This method signals that the process should give up writing responsibilities.
|
|
||||||
* The actual transition from writing state may not be immediate.
|
|
||||||
*/
|
|
||||||
virtual void
|
|
||||||
giveUpWriting() = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the cluster is using the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* @return true if the cluster has switched to fallback mode, false otherwise
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual bool
|
|
||||||
isFallback() const = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Switch the cluster to the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* This method is called when the cluster needs to transition from the cluster
|
|
||||||
* communication mechanism to the slower but more reliable fallback mechanism.
|
|
||||||
* Once set, this flag propagates to all nodes in the cluster through the
|
|
||||||
* ClioNode DbRole::Fallback state.
|
|
||||||
*/
|
|
||||||
virtual void
|
|
||||||
setWriterDecidingFallback() = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Whether clio is still loading cache after startup.
|
|
||||||
*
|
|
||||||
* @return true if clio is still loading cache, false otherwise.
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual bool
|
|
||||||
isLoadingCache() const = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Create a clone of this writer state.
|
|
||||||
*
|
|
||||||
* Creates a new instance of the writer state with the same underlying system state.
|
|
||||||
* This is used when spawning operations that need their own writer state instance
|
|
||||||
* while sharing the same system state.
|
|
||||||
*
|
|
||||||
* @return A unique pointer to the cloned writer state.
|
|
||||||
*/
|
|
||||||
[[nodiscard]] virtual std::unique_ptr<WriterStateInterface>
|
|
||||||
clone() const = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Implementation of WriterStateInterface that manages ETL writer state.
|
|
||||||
*
|
|
||||||
* This class coordinates with SystemState to manage whether the ETL process
|
|
||||||
* is actively writing to the database. It provides methods to query the current
|
|
||||||
* writing state and request transitions between writing and non-writing states.
|
|
||||||
*/
|
|
||||||
class WriterState : public WriterStateInterface {
|
|
||||||
private:
|
|
||||||
std::shared_ptr<SystemState> systemState_; /**< @brief Shared system state for ETL coordination */
|
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
|
||||||
* @brief Construct a WriterState with the given system state.
|
|
||||||
* @param state Shared pointer to the system state for coordination
|
|
||||||
*/
|
|
||||||
WriterState(std::shared_ptr<SystemState> state);
|
|
||||||
|
|
||||||
bool
|
|
||||||
isReadOnly() const override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the ETL process is currently writing to the database.
|
|
||||||
* @return true if the process is writing, false otherwise
|
|
||||||
*/
|
|
||||||
bool
|
|
||||||
isWriting() const override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Request to start writing to the database.
|
|
||||||
*
|
|
||||||
* If already writing, this method does nothing. Otherwise, it sets the
|
|
||||||
* shouldTakeoverWriting flag in the system state to signal the request.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
startWriting() override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Request to stop writing to the database.
|
|
||||||
*
|
|
||||||
* If not currently writing, this method does nothing. Otherwise, it sets the
|
|
||||||
* shouldGiveUpWriter flag in the system state to signal the request.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
giveUpWriting() override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Switch the cluster to the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* Sets the isWriterDecidingFallback flag in the system state, which will be
|
|
||||||
* propagated to other nodes in the cluster through the ClioNode DbRole::Fallback state.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
setWriterDecidingFallback() override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Check if the cluster is using the fallback writer decision mechanism.
|
|
||||||
*
|
|
||||||
* @return true if the cluster has switched to fallback mode, false otherwise
|
|
||||||
*/
|
|
||||||
bool
|
|
||||||
isFallback() const override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Whether clio is still loading cache after startup.
|
|
||||||
*
|
|
||||||
* @return true if clio is still loading cache, false otherwise.
|
|
||||||
*/
|
|
||||||
bool
|
|
||||||
isLoadingCache() const override;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Create a clone of this writer state.
|
|
||||||
*
|
|
||||||
* Creates a new WriterState instance sharing the same system state.
|
|
||||||
*
|
|
||||||
* @return A unique pointer to the cloned writer state.
|
|
||||||
*/
|
|
||||||
std::unique_ptr<WriterStateInterface>
|
|
||||||
clone() const override;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace etl
|
|
||||||
@@ -45,7 +45,6 @@
|
|||||||
#include <xrpl/protocol/Serializer.h>
|
#include <xrpl/protocol/Serializer.h>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
@@ -77,8 +76,6 @@ class LedgerPublisher : public LedgerPublisherInterface {
|
|||||||
|
|
||||||
util::async::AnyStrand publishStrand_;
|
util::async::AnyStrand publishStrand_;
|
||||||
|
|
||||||
std::atomic_bool stop_{false};
|
|
||||||
|
|
||||||
std::shared_ptr<BackendInterface> backend_;
|
std::shared_ptr<BackendInterface> backend_;
|
||||||
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions_;
|
std::shared_ptr<feed::SubscriptionManagerInterface> subscriptions_;
|
||||||
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
std::reference_wrapper<SystemState const> state_; // shared state for ETL
|
||||||
@@ -128,7 +125,7 @@ public:
|
|||||||
{
|
{
|
||||||
LOG(log_.info()) << "Attempting to publish ledger = " << ledgerSequence;
|
LOG(log_.info()) << "Attempting to publish ledger = " << ledgerSequence;
|
||||||
size_t numAttempts = 0;
|
size_t numAttempts = 0;
|
||||||
while (not stop_) {
|
while (not state_.get().isStopping) {
|
||||||
auto range = backend_->hardFetchLedgerRangeNoThrow();
|
auto range = backend_->hardFetchLedgerRangeNoThrow();
|
||||||
|
|
||||||
if (!range || range->maxSequence < ledgerSequence) {
|
if (!range || range->maxSequence < ledgerSequence) {
|
||||||
@@ -261,18 +258,6 @@ public:
|
|||||||
return *lastPublishedSequence_.lock();
|
return *lastPublishedSequence_.lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Stops publishing
|
|
||||||
*
|
|
||||||
* @note This is a basic implementation to satisfy tests. This will be improved in
|
|
||||||
* https://github.com/XRPLF/clio/issues/2833
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
stop()
|
|
||||||
{
|
|
||||||
stop_ = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void
|
void
|
||||||
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
setLastClose(std::chrono::time_point<ripple::NetClock> lastCloseTime)
|
||||||
|
|||||||
@@ -75,10 +75,7 @@ Loader::load(model::LedgerData const& data)
|
|||||||
<< "; took " << duration << "ms";
|
<< "; took " << duration << "ms";
|
||||||
|
|
||||||
if (not success) {
|
if (not success) {
|
||||||
// Write conflict detected - another node wrote to the database
|
state_->writeConflict = true;
|
||||||
// This triggers the fallback mechanism and stops this node from writing
|
|
||||||
state_->writeCommandSignal(SystemState::WriteCommand::StopWriting);
|
|
||||||
state_->isWriterDecidingFallback = true;
|
|
||||||
LOG(log_.warn()) << "Another node wrote a ledger into the DB - we have a write conflict";
|
LOG(log_.warn()) << "Another node wrote a ledger into the DB - we have a write conflict";
|
||||||
return std::unexpected(LoaderError::WriteConflict);
|
return std::unexpected(LoaderError::WriteConflict);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
add_library(clio_feed)
|
add_library(clio_feed)
|
||||||
target_sources(clio_feed PRIVATE SubscriptionManager.cpp impl/TransactionFeed.cpp impl/LedgerFeed.cpp
|
target_sources(
|
||||||
impl/ProposedTransactionFeed.cpp impl/SingleFeedBase.cpp)
|
clio_feed PRIVATE SubscriptionManager.cpp impl/TransactionFeed.cpp impl/LedgerFeed.cpp
|
||||||
|
impl/ProposedTransactionFeed.cpp impl/SingleFeedBase.cpp
|
||||||
|
)
|
||||||
|
|
||||||
target_link_libraries(clio_feed PRIVATE clio_util)
|
target_link_libraries(clio_feed PRIVATE clio_util)
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ SingleFeedBase::unsub(SubscriberSharedPtr const& subscriber)
|
|||||||
void
|
void
|
||||||
SingleFeedBase::pub(std::string msg)
|
SingleFeedBase::pub(std::string msg)
|
||||||
{
|
{
|
||||||
strand_.submit([this, msg = std::move(msg)] {
|
[[maybe_unused]] auto task = strand_.execute([this, msg = std::move(msg)]() {
|
||||||
auto const msgPtr = std::make_shared<std::string>(msg);
|
auto const msgPtr = std::make_shared<std::string>(msg);
|
||||||
signal_.emit(msgPtr);
|
signal_.emit(msgPtr);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -73,15 +73,10 @@ public:
|
|||||||
|
|
||||||
// This class can't hold the trackable's shared_ptr, because disconnect should be able to be called in the
|
// This class can't hold the trackable's shared_ptr, because disconnect should be able to be called in the
|
||||||
// the trackable's destructor. However, the trackable can not be destroyed when the slot is being called
|
// the trackable's destructor. However, the trackable can not be destroyed when the slot is being called
|
||||||
// either. `track_foreign` is racey when one shared_ptr is tracked by multiple signals. Therefore we are storing
|
// either. track_foreign will hold a weak_ptr to the connection, which makes sure the connection is valid when
|
||||||
// a weak_ptr of the trackable and using weak_ptr::lock() to atomically check existence and acquire a shared_ptr
|
// the slot is called.
|
||||||
// during slot invocation. This guarantees to keep the trackable alive for the duration of the slot call and
|
|
||||||
// avoids potential race conditions.
|
|
||||||
connections->emplace(
|
connections->emplace(
|
||||||
trackable.get(), signal_.connect([slot, weakTrackable = std::weak_ptr(trackable)](Args&&... args) {
|
trackable.get(), signal_.connect(typename SignalType::slot_type(slot).track_foreign(trackable))
|
||||||
if (auto lifeExtender = weakTrackable.lock(); lifeExtender)
|
|
||||||
std::invoke(slot, std::forward<Args...>(args)...);
|
|
||||||
})
|
|
||||||
);
|
);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,14 +4,16 @@ target_sources(clio_server PRIVATE Main.cpp)
|
|||||||
target_link_libraries(clio_server PRIVATE clio_app)
|
target_link_libraries(clio_server PRIVATE clio_app)
|
||||||
|
|
||||||
if (static)
|
if (static)
|
||||||
if (san)
|
if (san)
|
||||||
message(FATAL_ERROR "Static linkage not allowed when using sanitizers")
|
message(FATAL_ERROR "Static linkage not allowed when using sanitizers")
|
||||||
elseif (is_appleclang)
|
elseif (is_appleclang)
|
||||||
message(FATAL_ERROR "Static linkage not supported on AppleClang")
|
message(FATAL_ERROR "Static linkage not supported on AppleClang")
|
||||||
else ()
|
else ()
|
||||||
target_link_options(# Note: -static-libstdc++ can statically link both libstdc++ and libc++
|
target_link_options(
|
||||||
clio_server PRIVATE -static-libstdc++ -static-libgcc)
|
# Note: -static-libstdc++ can statically link both libstdc++ and libc++
|
||||||
endif ()
|
clio_server PRIVATE -static-libstdc++ -static-libgcc
|
||||||
|
)
|
||||||
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set_target_properties(clio_server PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
set_target_properties(clio_server PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
add_library(clio_migration)
|
add_library(clio_migration)
|
||||||
|
|
||||||
target_sources(clio_migration PRIVATE MigrationApplication.cpp impl/MigrationManagerFactory.cpp MigratorStatus.cpp
|
target_sources(
|
||||||
cassandra/impl/ObjectsAdapter.cpp cassandra/impl/TransactionsAdapter.cpp)
|
clio_migration PRIVATE MigrationApplication.cpp impl/MigrationManagerFactory.cpp MigratorStatus.cpp
|
||||||
|
cassandra/impl/ObjectsAdapter.cpp cassandra/impl/TransactionsAdapter.cpp
|
||||||
|
)
|
||||||
|
|
||||||
target_link_libraries(clio_migration PRIVATE clio_util clio_data)
|
target_link_libraries(clio_migration PRIVATE clio_util clio_data)
|
||||||
|
|||||||
@@ -6,53 +6,55 @@ target_link_libraries(clio_rpc_center PUBLIC clio_options)
|
|||||||
|
|
||||||
add_library(clio_rpc)
|
add_library(clio_rpc)
|
||||||
|
|
||||||
target_sources(clio_rpc
|
target_sources(
|
||||||
PRIVATE Errors.cpp
|
clio_rpc
|
||||||
Factories.cpp
|
PRIVATE Errors.cpp
|
||||||
AMMHelpers.cpp
|
Factories.cpp
|
||||||
RPCHelpers.cpp
|
AMMHelpers.cpp
|
||||||
CredentialHelpers.cpp
|
RPCHelpers.cpp
|
||||||
Counters.cpp
|
CredentialHelpers.cpp
|
||||||
WorkQueue.cpp
|
Counters.cpp
|
||||||
common/Specs.cpp
|
WorkQueue.cpp
|
||||||
common/Validators.cpp
|
common/Specs.cpp
|
||||||
common/MetaProcessors.cpp
|
common/Validators.cpp
|
||||||
common/impl/APIVersionParser.cpp
|
common/MetaProcessors.cpp
|
||||||
common/impl/HandlerProvider.cpp
|
common/impl/APIVersionParser.cpp
|
||||||
handlers/AccountChannels.cpp
|
common/impl/HandlerProvider.cpp
|
||||||
handlers/AccountCurrencies.cpp
|
handlers/AccountChannels.cpp
|
||||||
handlers/AccountInfo.cpp
|
handlers/AccountCurrencies.cpp
|
||||||
handlers/AccountLines.cpp
|
handlers/AccountInfo.cpp
|
||||||
handlers/AccountMPTokenIssuances.cpp
|
handlers/AccountLines.cpp
|
||||||
handlers/AccountMPTokens.cpp
|
handlers/AccountMPTokenIssuances.cpp
|
||||||
handlers/AccountNFTs.cpp
|
handlers/AccountMPTokens.cpp
|
||||||
handlers/AccountObjects.cpp
|
handlers/AccountNFTs.cpp
|
||||||
handlers/AccountOffers.cpp
|
handlers/AccountObjects.cpp
|
||||||
handlers/AccountTx.cpp
|
handlers/AccountOffers.cpp
|
||||||
handlers/AMMInfo.cpp
|
handlers/AccountTx.cpp
|
||||||
handlers/BookChanges.cpp
|
handlers/AMMInfo.cpp
|
||||||
handlers/BookOffers.cpp
|
handlers/BookChanges.cpp
|
||||||
handlers/DepositAuthorized.cpp
|
handlers/BookOffers.cpp
|
||||||
handlers/Feature.cpp
|
handlers/DepositAuthorized.cpp
|
||||||
handlers/GatewayBalances.cpp
|
handlers/Feature.cpp
|
||||||
handlers/GetAggregatePrice.cpp
|
handlers/GatewayBalances.cpp
|
||||||
handlers/Ledger.cpp
|
handlers/GetAggregatePrice.cpp
|
||||||
handlers/LedgerData.cpp
|
handlers/Ledger.cpp
|
||||||
handlers/LedgerEntry.cpp
|
handlers/LedgerData.cpp
|
||||||
handlers/LedgerIndex.cpp
|
handlers/LedgerEntry.cpp
|
||||||
handlers/LedgerRange.cpp
|
handlers/LedgerIndex.cpp
|
||||||
handlers/MPTHolders.cpp
|
handlers/LedgerRange.cpp
|
||||||
handlers/NFTsByIssuer.cpp
|
handlers/MPTHolders.cpp
|
||||||
handlers/NFTBuyOffers.cpp
|
handlers/NFTsByIssuer.cpp
|
||||||
handlers/NFTHistory.cpp
|
handlers/NFTBuyOffers.cpp
|
||||||
handlers/NFTInfo.cpp
|
handlers/NFTHistory.cpp
|
||||||
handlers/NFTOffersCommon.cpp
|
handlers/NFTInfo.cpp
|
||||||
handlers/NFTSellOffers.cpp
|
handlers/NFTOffersCommon.cpp
|
||||||
handlers/NoRippleCheck.cpp
|
handlers/NFTSellOffers.cpp
|
||||||
handlers/Random.cpp
|
handlers/NoRippleCheck.cpp
|
||||||
handlers/Subscribe.cpp
|
handlers/Random.cpp
|
||||||
handlers/TransactionEntry.cpp
|
handlers/Subscribe.cpp
|
||||||
handlers/Unsubscribe.cpp
|
handlers/TransactionEntry.cpp
|
||||||
handlers/VaultInfo.cpp)
|
handlers/Unsubscribe.cpp
|
||||||
|
handlers/VaultInfo.cpp
|
||||||
|
)
|
||||||
|
|
||||||
target_link_libraries(clio_rpc PRIVATE clio_util)
|
target_link_libraries(clio_rpc PRIVATE clio_util)
|
||||||
|
|||||||
@@ -2,64 +2,67 @@ add_subdirectory(build)
|
|||||||
|
|
||||||
add_library(clio_util)
|
add_library(clio_util)
|
||||||
|
|
||||||
target_sources(clio_util
|
target_sources(
|
||||||
PRIVATE Assert.cpp
|
clio_util
|
||||||
Coroutine.cpp
|
PRIVATE Assert.cpp
|
||||||
CoroutineGroup.cpp
|
Coroutine.cpp
|
||||||
log/Logger.cpp
|
CoroutineGroup.cpp
|
||||||
log/PrettyPath.cpp
|
log/Logger.cpp
|
||||||
prometheus/Http.cpp
|
log/PrettyPath.cpp
|
||||||
prometheus/Label.cpp
|
prometheus/Http.cpp
|
||||||
prometheus/MetricBase.cpp
|
prometheus/Label.cpp
|
||||||
prometheus/MetricBuilder.cpp
|
prometheus/MetricBase.cpp
|
||||||
prometheus/MetricsFamily.cpp
|
prometheus/MetricBuilder.cpp
|
||||||
prometheus/OStream.cpp
|
prometheus/MetricsFamily.cpp
|
||||||
prometheus/Prometheus.cpp
|
prometheus/OStream.cpp
|
||||||
Random.cpp
|
prometheus/Prometheus.cpp
|
||||||
Retry.cpp
|
Random.cpp
|
||||||
Repeat.cpp
|
Retry.cpp
|
||||||
requests/RequestBuilder.cpp
|
Repeat.cpp
|
||||||
requests/Types.cpp
|
requests/RequestBuilder.cpp
|
||||||
requests/WsConnection.cpp
|
requests/Types.cpp
|
||||||
requests/impl/SslContext.cpp
|
requests/WsConnection.cpp
|
||||||
ResponseExpirationCache.cpp
|
requests/impl/SslContext.cpp
|
||||||
Shasum.cpp
|
ResponseExpirationCache.cpp
|
||||||
SignalsHandler.cpp
|
Shasum.cpp
|
||||||
StopHelper.cpp
|
SignalsHandler.cpp
|
||||||
StringHash.cpp
|
StopHelper.cpp
|
||||||
Taggable.cpp
|
StringHash.cpp
|
||||||
TerminationHandler.cpp
|
Taggable.cpp
|
||||||
TimeUtils.cpp
|
TerminationHandler.cpp
|
||||||
TxUtils.cpp
|
TimeUtils.cpp
|
||||||
LedgerUtils.cpp
|
TxUtils.cpp
|
||||||
config/Array.cpp
|
LedgerUtils.cpp
|
||||||
config/ArrayView.cpp
|
config/Array.cpp
|
||||||
config/ConfigConstraints.cpp
|
config/ArrayView.cpp
|
||||||
config/ConfigDefinition.cpp
|
config/ConfigConstraints.cpp
|
||||||
config/ConfigFileJson.cpp
|
config/ConfigDefinition.cpp
|
||||||
config/ObjectView.cpp
|
config/ConfigFileJson.cpp
|
||||||
config/Types.cpp
|
config/ObjectView.cpp
|
||||||
config/ValueView.cpp)
|
config/Types.cpp
|
||||||
|
config/ValueView.cpp
|
||||||
|
)
|
||||||
|
|
||||||
# This must be above the target_link_libraries call otherwise backtrace doesn't work
|
# This must be above the target_link_libraries call otherwise backtrace doesn't work
|
||||||
if ("${san}" STREQUAL "")
|
if ("${san}" STREQUAL "")
|
||||||
target_link_libraries(clio_util PUBLIC Boost::stacktrace_backtrace)
|
target_link_libraries(clio_util PUBLIC Boost::stacktrace_backtrace)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
clio_util
|
clio_util
|
||||||
PUBLIC Boost::headers
|
PUBLIC Boost::headers
|
||||||
Boost::iostreams
|
Boost::iostreams
|
||||||
Boost::coroutine
|
Boost::coroutine
|
||||||
Boost::context
|
Boost::context
|
||||||
fmt::fmt
|
fmt::fmt
|
||||||
openssl::openssl
|
openssl::openssl
|
||||||
xrpl::libxrpl
|
xrpl::libxrpl
|
||||||
Threads::Threads
|
Threads::Threads
|
||||||
clio_options
|
clio_options
|
||||||
clio_rpc_center
|
clio_rpc_center
|
||||||
clio_build_version
|
clio_build_version
|
||||||
PRIVATE spdlog::spdlog)
|
PRIVATE spdlog::spdlog
|
||||||
|
)
|
||||||
|
|
||||||
# FIXME: needed on gcc-12, clang-16 and AppleClang for now (known boost 1.82 issue for some compilers)
|
# FIXME: needed on gcc-12, clang-16 and AppleClang for now (known boost 1.82 issue for some compilers)
|
||||||
#
|
#
|
||||||
|
|||||||
@@ -19,8 +19,6 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "util/async/Concepts.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/any_io_executor.hpp>
|
#include <boost/asio/any_io_executor.hpp>
|
||||||
#include <boost/asio/experimental/channel.hpp>
|
#include <boost/asio/experimental/channel.hpp>
|
||||||
#include <boost/asio/experimental/concurrent_channel.hpp>
|
#include <boost/asio/experimental/concurrent_channel.hpp>
|
||||||
@@ -44,36 +42,15 @@ struct ChannelInstantiated;
|
|||||||
} // namespace detail
|
} // namespace detail
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Specifies the producer concurrency model for a Channel.
|
|
||||||
*/
|
|
||||||
enum class ProducerType {
|
|
||||||
Single, /**< Only one Sender can exist (non-copyable). Uses direct Guard ownership for zero overhead. */
|
|
||||||
Multi /**< Multiple Senders can exist (copyable). Uses shared_ptr<Guard> for shared ownership. */
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Specifies the consumer concurrency model for a Channel.
|
|
||||||
*/
|
|
||||||
enum class ConsumerType {
|
|
||||||
Single, /**< Only one Receiver can exist (non-copyable). Uses direct Guard ownership for zero overhead. */
|
|
||||||
Multi /**< Multiple Receivers can exist (copyable). Uses shared_ptr<Guard> for shared ownership. */
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Represents a go-like channel, a multi-producer (Sender) multi-consumer (Receiver) thread-safe data pipe.
|
* @brief Represents a go-like channel, a multi-producer (Sender) multi-consumer (Receiver) thread-safe data pipe.
|
||||||
* @note Use INSTANTIATE_CHANNEL_FOR_CLANG macro when using this class. See docs at the bottom of the file for more
|
* @note Use INSTANTIATE_CHANNEL_FOR_CLANG macro when using this class. See docs at the bottom of the file for more
|
||||||
* details.
|
* details.
|
||||||
*
|
*
|
||||||
* @tparam T The type of data the channel transfers
|
* @tparam T The type of data the channel transfers
|
||||||
* @tparam P ProducerType::Multi (default) for multi-producer or ProducerType::Single for single-producer
|
|
||||||
* @tparam C ConsumerType::Multi (default) for multi-consumer or ConsumerType::Single for single-consumer
|
|
||||||
*/
|
*/
|
||||||
template <typename T, ProducerType P = ProducerType::Multi, ConsumerType C = ConsumerType::Multi>
|
template <typename T>
|
||||||
class Channel {
|
class Channel {
|
||||||
static constexpr bool kIS_MULTI_PRODUCER = (P == ProducerType::Multi);
|
|
||||||
static constexpr bool kIS_MULTI_CONSUMER = (C == ConsumerType::Multi);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class ControlBlock {
|
class ControlBlock {
|
||||||
using InternalChannelType = boost::asio::experimental::concurrent_channel<void(boost::system::error_code, T)>;
|
using InternalChannelType = boost::asio::experimental::concurrent_channel<void(boost::system::error_code, T)>;
|
||||||
@@ -81,16 +58,7 @@ private:
|
|||||||
InternalChannelType ch_;
|
InternalChannelType ch_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
template <typename ContextType>
|
ControlBlock(auto&& context, std::size_t capacity) : executor_(context.get_executor()), ch_(context, capacity)
|
||||||
requires(not async::SomeExecutionContext<ContextType>)
|
|
||||||
ControlBlock(ContextType&& context, std::size_t capacity)
|
|
||||||
: executor_(context.get_executor()), ch_(context, capacity)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
template <async::SomeExecutionContext ContextType>
|
|
||||||
ControlBlock(ContextType&& context, std::size_t capacity)
|
|
||||||
: executor_(context.getExecutor().get_executor()), ch_(context.getExecutor(), capacity)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,54 +101,30 @@ private:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
|
||||||
/**
|
/**
|
||||||
* @brief The sending end of a channel.
|
* @brief The sending end of a channel.
|
||||||
*
|
*
|
||||||
* Sender is movable. For multi-producer channels, Sender is also copyable.
|
* Sender is copyable and movable. The channel remains open as long as at least one Sender exists.
|
||||||
* The channel remains open as long as at least one Sender exists.
|
|
||||||
* When all Sender instances are destroyed, the channel is closed and receivers will receive std::nullopt.
|
* When all Sender instances are destroyed, the channel is closed and receivers will receive std::nullopt.
|
||||||
*/
|
*/
|
||||||
class Sender {
|
class Sender {
|
||||||
std::shared_ptr<ControlBlock> shared_;
|
std::shared_ptr<ControlBlock> shared_;
|
||||||
std::conditional_t<kIS_MULTI_PRODUCER, std::shared_ptr<Guard>, Guard> guard_;
|
std::shared_ptr<Guard> guard_;
|
||||||
|
|
||||||
friend class Channel<T, P, C>;
|
|
||||||
|
|
||||||
|
public:
|
||||||
/**
|
/**
|
||||||
* @brief Constructs a Sender from a shared control block.
|
* @brief Constructs a Sender from a shared control block.
|
||||||
* @param shared The shared control block managing the channel state
|
* @param shared The shared control block managing the channel state
|
||||||
*/
|
*/
|
||||||
Sender(std::shared_ptr<ControlBlock> shared)
|
Sender(std::shared_ptr<ControlBlock> shared)
|
||||||
: shared_(shared), guard_([shared = std::move(shared)]() {
|
: shared_(std::move(shared)), guard_(std::make_shared<Guard>(shared_)) {};
|
||||||
if constexpr (kIS_MULTI_PRODUCER) {
|
|
||||||
return std::make_shared<Guard>(std::move(shared));
|
|
||||||
} else {
|
|
||||||
return Guard{std::move(shared)};
|
|
||||||
}
|
|
||||||
}())
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
Sender(Sender&&) = default;
|
Sender(Sender&&) = default;
|
||||||
Sender(Sender const&)
|
Sender(Sender const&) = default;
|
||||||
requires kIS_MULTI_PRODUCER
|
|
||||||
= default;
|
|
||||||
Sender(Sender const&)
|
|
||||||
requires(!kIS_MULTI_PRODUCER)
|
|
||||||
= delete;
|
|
||||||
|
|
||||||
Sender&
|
Sender&
|
||||||
operator=(Sender&&) = default;
|
operator=(Sender&&) = default;
|
||||||
Sender&
|
Sender&
|
||||||
operator=(Sender const&)
|
operator=(Sender const&) = default;
|
||||||
requires kIS_MULTI_PRODUCER
|
|
||||||
= default;
|
|
||||||
Sender&
|
|
||||||
operator=(Sender const&)
|
|
||||||
requires(!kIS_MULTI_PRODUCER)
|
|
||||||
= delete;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Asynchronously sends data through the channel using a coroutine.
|
* @brief Asynchronously sends data through the channel using a coroutine.
|
||||||
@@ -258,50 +202,27 @@ public:
|
|||||||
/**
|
/**
|
||||||
* @brief The receiving end of a channel.
|
* @brief The receiving end of a channel.
|
||||||
*
|
*
|
||||||
* Receiver is movable. For multi-consumer channels, Receiver is also copyable.
|
* Receiver is copyable and movable. Multiple receivers can consume from the same channel concurrently.
|
||||||
* Multiple receivers can consume from the same multi-consumer channel concurrently.
|
|
||||||
* When all Receiver instances are destroyed, the channel is closed and senders will fail to send.
|
* When all Receiver instances are destroyed, the channel is closed and senders will fail to send.
|
||||||
*/
|
*/
|
||||||
class Receiver {
|
class Receiver {
|
||||||
std::shared_ptr<ControlBlock> shared_;
|
std::shared_ptr<ControlBlock> shared_;
|
||||||
std::conditional_t<kIS_MULTI_CONSUMER, std::shared_ptr<Guard>, Guard> guard_;
|
std::shared_ptr<Guard> guard_;
|
||||||
|
|
||||||
friend class Channel<T, P, C>;
|
|
||||||
|
|
||||||
|
public:
|
||||||
/**
|
/**
|
||||||
* @brief Constructs a Receiver from a shared control block.
|
* @brief Constructs a Receiver from a shared control block.
|
||||||
* @param shared The shared control block managing the channel state
|
* @param shared The shared control block managing the channel state
|
||||||
*/
|
*/
|
||||||
Receiver(std::shared_ptr<ControlBlock> shared)
|
Receiver(std::shared_ptr<ControlBlock> shared)
|
||||||
: shared_(shared), guard_([shared = std::move(shared)]() {
|
: shared_(std::move(shared)), guard_(std::make_shared<Guard>(shared_)) {};
|
||||||
if constexpr (kIS_MULTI_CONSUMER) {
|
|
||||||
return std::make_shared<Guard>(std::move(shared));
|
|
||||||
} else {
|
|
||||||
return Guard{std::move(shared)};
|
|
||||||
}
|
|
||||||
}())
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
Receiver(Receiver&&) = default;
|
Receiver(Receiver&&) = default;
|
||||||
Receiver(Receiver const&)
|
Receiver(Receiver const&) = default;
|
||||||
requires kIS_MULTI_CONSUMER
|
|
||||||
= default;
|
|
||||||
Receiver(Receiver const&)
|
|
||||||
requires(!kIS_MULTI_CONSUMER)
|
|
||||||
= delete;
|
|
||||||
|
|
||||||
Receiver&
|
Receiver&
|
||||||
operator=(Receiver&&) = default;
|
operator=(Receiver&&) = default;
|
||||||
Receiver&
|
Receiver&
|
||||||
operator=(Receiver const&)
|
operator=(Receiver const&) = default;
|
||||||
requires kIS_MULTI_CONSUMER
|
|
||||||
= default;
|
|
||||||
Receiver&
|
|
||||||
operator=(Receiver const&)
|
|
||||||
requires(!kIS_MULTI_CONSUMER)
|
|
||||||
= delete;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Attempts to receive data from the channel without blocking.
|
* @brief Attempts to receive data from the channel without blocking.
|
||||||
@@ -376,6 +297,7 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
/**
|
/**
|
||||||
* @brief Factory function to create channel components.
|
* @brief Factory function to create channel components.
|
||||||
* @param context A supported context type (either io_context or thread_pool)
|
* @param context A supported context type (either io_context or thread_pool)
|
||||||
|
|||||||
@@ -22,7 +22,6 @@
|
|||||||
#include <boost/asio/spawn.hpp>
|
#include <boost/asio/spawn.hpp>
|
||||||
#include <boost/asio/strand.hpp>
|
#include <boost/asio/strand.hpp>
|
||||||
|
|
||||||
#include <concepts>
|
|
||||||
#include <exception>
|
#include <exception>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
|
||||||
|
|||||||
@@ -29,27 +29,6 @@
|
|||||||
|
|
||||||
namespace util::async {
|
namespace util::async {
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Tag type for identifying execution context types.
|
|
||||||
*
|
|
||||||
* Types that inherit from this tag can be detected using the SomeExecutionContext concept.
|
|
||||||
* This allows generic code to differentiate between raw Boost.Asio contexts and wrapped execution contexts.
|
|
||||||
*/
|
|
||||||
struct ExecutionContextTag {
|
|
||||||
virtual ~ExecutionContextTag() = default;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Concept that identifies types derived from ExecutionContextTag.
|
|
||||||
*
|
|
||||||
* This concept is used to detect custom execution context wrappers (like BasicExecutionContext)
|
|
||||||
* and distinguish them from raw Boost.Asio contexts (io_context, thread_pool, etc.).
|
|
||||||
*
|
|
||||||
* @tparam T The type to check
|
|
||||||
*/
|
|
||||||
template <typename T>
|
|
||||||
concept SomeExecutionContext = std::derived_from<std::remove_cvref_t<T>, ExecutionContextTag>;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Specifies the interface for an entity that can be stopped
|
* @brief Specifies the interface for an entity that can be stopped
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ template <
|
|||||||
typename DispatcherType,
|
typename DispatcherType,
|
||||||
typename TimerContextProvider = impl::SelfContextProvider,
|
typename TimerContextProvider = impl::SelfContextProvider,
|
||||||
typename ErrorHandlerType = impl::DefaultErrorHandler>
|
typename ErrorHandlerType = impl::DefaultErrorHandler>
|
||||||
class BasicExecutionContext : public ExecutionContextTag {
|
class BasicExecutionContext {
|
||||||
ContextType context_;
|
ContextType context_;
|
||||||
|
|
||||||
/** @cond */
|
/** @cond */
|
||||||
@@ -182,7 +182,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* @brief Stops the underlying thread pool.
|
* @brief Stops the underlying thread pool.
|
||||||
*/
|
*/
|
||||||
~BasicExecutionContext() override
|
~BasicExecutionContext()
|
||||||
{
|
{
|
||||||
stop();
|
stop();
|
||||||
}
|
}
|
||||||
@@ -402,20 +402,6 @@ public:
|
|||||||
{
|
{
|
||||||
context_.join();
|
context_.join();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Get the underlying executor.
|
|
||||||
*
|
|
||||||
* Provides access to the wrapped executor for cases where the execution context
|
|
||||||
* needs to interact with components that require explicit executor access (like Channel).
|
|
||||||
*
|
|
||||||
* @return Reference to the underlying executor
|
|
||||||
*/
|
|
||||||
typename ContextType::Executor&
|
|
||||||
getExecutor()
|
|
||||||
{
|
|
||||||
return context_.getExecutor();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -36,26 +36,17 @@ struct SpawnDispatchStrategy {
|
|||||||
{
|
{
|
||||||
auto op = outcome.getOperation();
|
auto op = outcome.getOperation();
|
||||||
|
|
||||||
if constexpr (SomeStoppableOutcome<OutcomeType>) {
|
util::spawn(
|
||||||
util::spawn(
|
ctx.getExecutor(),
|
||||||
ctx.getExecutor(),
|
[outcome = std::forward<OutcomeType>(outcome), fn = std::forward<FnType>(fn)](auto yield) mutable {
|
||||||
[outcome = std::forward<OutcomeType>(outcome), fn = std::forward<FnType>(fn)](auto yield) mutable {
|
if constexpr (SomeStoppableOutcome<OutcomeType>) {
|
||||||
if constexpr (SomeStoppableOutcome<OutcomeType>) {
|
auto& stopSource = outcome.getStopSource();
|
||||||
auto& stopSource = outcome.getStopSource();
|
std::invoke(std::forward<decltype(fn)>(fn), outcome, stopSource, stopSource[yield]);
|
||||||
std::invoke(std::forward<decltype(fn)>(fn), outcome, stopSource, stopSource[yield]);
|
} else {
|
||||||
} else {
|
|
||||||
std::invoke(std::forward<decltype(fn)>(fn), outcome);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
boost::asio::post(
|
|
||||||
ctx.getExecutor(),
|
|
||||||
[outcome = std::forward<OutcomeType>(outcome), fn = std::forward<FnType>(fn)]() mutable {
|
|
||||||
std::invoke(std::forward<decltype(fn)>(fn), outcome);
|
std::invoke(std::forward<decltype(fn)>(fn), outcome);
|
||||||
}
|
}
|
||||||
);
|
}
|
||||||
}
|
);
|
||||||
|
|
||||||
return op;
|
return op;
|
||||||
}
|
}
|
||||||
@@ -64,7 +55,7 @@ struct SpawnDispatchStrategy {
|
|||||||
static void
|
static void
|
||||||
post(ContextType& ctx, FnType&& fn)
|
post(ContextType& ctx, FnType&& fn)
|
||||||
{
|
{
|
||||||
boost::asio::post(ctx.getExecutor(), [fn = std::forward<FnType>(fn)]() mutable {
|
util::spawn(ctx.getExecutor(), [fn = std::forward<FnType>(fn)](auto) mutable {
|
||||||
std::invoke(std::forward<decltype(fn)>(fn));
|
std::invoke(std::forward<decltype(fn)>(fn));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,5 +4,6 @@ add_library(clio_build_version)
|
|||||||
target_sources(clio_build_version PRIVATE Build.cpp)
|
target_sources(clio_build_version PRIVATE Build.cpp)
|
||||||
target_link_libraries(clio_build_version PUBLIC clio_options)
|
target_link_libraries(clio_build_version PUBLIC clio_options)
|
||||||
target_compile_definitions(
|
target_compile_definitions(
|
||||||
clio_build_version PRIVATE CLIO_VERSION="${CLIO_VERSION}" GIT_COMMIT_HASH="${GIT_COMMIT_HASH}"
|
clio_build_version PRIVATE CLIO_VERSION="${CLIO_VERSION}" GIT_COMMIT_HASH="${GIT_COMMIT_HASH}"
|
||||||
GIT_BUILD_BRANCH="${GIT_BUILD_BRANCH}" BUILD_DATE="${BUILD_DATE}")
|
GIT_BUILD_BRANCH="${GIT_BUILD_BRANCH}" BUILD_DATE="${BUILD_DATE}"
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,21 +1,23 @@
|
|||||||
add_library(clio_web)
|
add_library(clio_web)
|
||||||
|
|
||||||
target_sources(clio_web
|
target_sources(
|
||||||
PRIVATE AdminVerificationStrategy.cpp
|
clio_web
|
||||||
dosguard/DOSGuard.cpp
|
PRIVATE AdminVerificationStrategy.cpp
|
||||||
dosguard/IntervalSweepHandler.cpp
|
dosguard/DOSGuard.cpp
|
||||||
dosguard/Weights.cpp
|
dosguard/IntervalSweepHandler.cpp
|
||||||
dosguard/WhitelistHandler.cpp
|
dosguard/Weights.cpp
|
||||||
ng/Connection.cpp
|
dosguard/WhitelistHandler.cpp
|
||||||
ng/impl/ErrorHandling.cpp
|
ng/Connection.cpp
|
||||||
ng/impl/ConnectionHandler.cpp
|
ng/impl/ErrorHandling.cpp
|
||||||
ng/impl/ServerSslContext.cpp
|
ng/impl/ConnectionHandler.cpp
|
||||||
ng/Request.cpp
|
ng/impl/ServerSslContext.cpp
|
||||||
ng/Response.cpp
|
ng/Request.cpp
|
||||||
ng/Server.cpp
|
ng/Response.cpp
|
||||||
ng/SubscriptionContext.cpp
|
ng/Server.cpp
|
||||||
ProxyIpResolver.cpp
|
ng/SubscriptionContext.cpp
|
||||||
Resolver.cpp
|
ProxyIpResolver.cpp
|
||||||
SubscriptionContext.cpp)
|
Resolver.cpp
|
||||||
|
SubscriptionContext.cpp
|
||||||
|
)
|
||||||
|
|
||||||
target_link_libraries(clio_web PUBLIC clio_util)
|
target_link_libraries(clio_web PUBLIC clio_util)
|
||||||
|
|||||||
@@ -1,20 +1,20 @@
|
|||||||
# Set coverage build options
|
# Set coverage build options
|
||||||
if (coverage)
|
if (coverage)
|
||||||
if (NOT tests)
|
if (NOT tests)
|
||||||
message(FATAL_ERROR "Coverage requires tests to be enabled")
|
message(FATAL_ERROR "Coverage requires tests to be enabled")
|
||||||
endif ()
|
endif ()
|
||||||
include(CodeCoverage)
|
include(CodeCoverage)
|
||||||
append_coverage_compiler_flags_to_target(clio_options INTERFACE)
|
append_coverage_compiler_flags_to_target(clio_options INTERFACE)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (tests OR integration_tests)
|
if (tests OR integration_tests)
|
||||||
add_subdirectory(common)
|
add_subdirectory(common)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (tests)
|
if (tests)
|
||||||
add_subdirectory(unit)
|
add_subdirectory(unit)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (integration_tests)
|
if (integration_tests)
|
||||||
add_subdirectory(integration)
|
add_subdirectory(integration)
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -1,17 +1,19 @@
|
|||||||
add_library(clio_testing_common)
|
add_library(clio_testing_common)
|
||||||
|
|
||||||
target_sources(clio_testing_common
|
target_sources(
|
||||||
PRIVATE util/AssignRandomPort.cpp
|
clio_testing_common
|
||||||
util/BinaryTestObject.cpp
|
PRIVATE util/AssignRandomPort.cpp
|
||||||
util/CallWithTimeout.cpp
|
util/BinaryTestObject.cpp
|
||||||
util/LoggerFixtures.cpp
|
util/CallWithTimeout.cpp
|
||||||
util/MockAssert.cpp
|
util/LoggerFixtures.cpp
|
||||||
util/StringUtils.cpp
|
util/MockAssert.cpp
|
||||||
util/TestHttpClient.cpp
|
util/StringUtils.cpp
|
||||||
util/TestHttpServer.cpp
|
util/TestHttpClient.cpp
|
||||||
util/TestObject.cpp
|
util/TestHttpServer.cpp
|
||||||
util/TestWebSocketClient.cpp
|
util/TestObject.cpp
|
||||||
util/TestWsServer.cpp)
|
util/TestWebSocketClient.cpp
|
||||||
|
util/TestWsServer.cpp
|
||||||
|
)
|
||||||
|
|
||||||
include(deps/gtest)
|
include(deps/gtest)
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,6 @@
|
|||||||
#include "util/config/ConfigDefinition.hpp"
|
#include "util/config/ConfigDefinition.hpp"
|
||||||
|
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
struct MockWriterStateBase : public etl::WriterStateInterface {
|
|
||||||
MOCK_METHOD(bool, isReadOnly, (), (const, override));
|
|
||||||
MOCK_METHOD(bool, isWriting, (), (const, override));
|
|
||||||
MOCK_METHOD(void, startWriting, (), (override));
|
|
||||||
MOCK_METHOD(void, giveUpWriting, (), (override));
|
|
||||||
MOCK_METHOD(void, setWriterDecidingFallback, (), (override));
|
|
||||||
MOCK_METHOD(bool, isFallback, (), (const, override));
|
|
||||||
MOCK_METHOD(bool, isLoadingCache, (), (const, override));
|
|
||||||
MOCK_METHOD(std::unique_ptr<etl::WriterStateInterface>, clone, (), (const, override));
|
|
||||||
};
|
|
||||||
|
|
||||||
using MockWriterState = testing::StrictMock<MockWriterStateBase>;
|
|
||||||
using NiceMockWriterState = testing::NiceMock<MockWriterStateBase>;
|
|
||||||
@@ -1,19 +1,21 @@
|
|||||||
add_executable(clio_integration_tests)
|
add_executable(clio_integration_tests)
|
||||||
|
|
||||||
target_sources(clio_integration_tests
|
target_sources(
|
||||||
PRIVATE data/BackendFactoryTests.cpp
|
clio_integration_tests
|
||||||
data/cassandra/BackendTests.cpp
|
PRIVATE data/BackendFactoryTests.cpp
|
||||||
data/cassandra/BaseTests.cpp
|
data/cassandra/BackendTests.cpp
|
||||||
migration/cassandra/DBRawData.cpp
|
data/cassandra/BaseTests.cpp
|
||||||
migration/cassandra/CassandraMigrationManagerTests.cpp
|
migration/cassandra/DBRawData.cpp
|
||||||
migration/cassandra/ExampleTransactionsMigrator.cpp
|
migration/cassandra/CassandraMigrationManagerTests.cpp
|
||||||
migration/cassandra/ExampleObjectsMigrator.cpp
|
migration/cassandra/ExampleTransactionsMigrator.cpp
|
||||||
migration/cassandra/ExampleLedgerMigrator.cpp
|
migration/cassandra/ExampleObjectsMigrator.cpp
|
||||||
migration/cassandra/ExampleDropTableMigrator.cpp
|
migration/cassandra/ExampleLedgerMigrator.cpp
|
||||||
util/CassandraDBHelper.cpp
|
migration/cassandra/ExampleDropTableMigrator.cpp
|
||||||
# Test runner
|
util/CassandraDBHelper.cpp
|
||||||
TestGlobals.cpp
|
# Test runner
|
||||||
Main.cpp)
|
TestGlobals.cpp
|
||||||
|
Main.cpp
|
||||||
|
)
|
||||||
|
|
||||||
# Fix for dwarf5 bug on ci. IS STILL NEEDED???
|
# Fix for dwarf5 bug on ci. IS STILL NEEDED???
|
||||||
target_compile_options(clio_options INTERFACE -gdwarf-4)
|
target_compile_options(clio_options INTERFACE -gdwarf-4)
|
||||||
|
|||||||
@@ -1,223 +1,219 @@
|
|||||||
add_executable(clio_tests)
|
add_executable(clio_tests)
|
||||||
|
|
||||||
target_sources(clio_tests
|
target_sources(
|
||||||
PRIVATE # Common
|
clio_tests
|
||||||
app/CliArgsTests.cpp
|
PRIVATE # Common
|
||||||
app/StopperTests.cpp
|
app/CliArgsTests.cpp
|
||||||
app/VerifyConfigTests.cpp
|
app/StopperTests.cpp
|
||||||
app/WebHandlersTests.cpp
|
app/VerifyConfigTests.cpp
|
||||||
data/AmendmentCenterTests.cpp
|
app/WebHandlersTests.cpp
|
||||||
data/BackendCountersTests.cpp
|
data/AmendmentCenterTests.cpp
|
||||||
data/BackendInterfaceTests.cpp
|
data/BackendCountersTests.cpp
|
||||||
data/LedgerCacheTests.cpp
|
data/BackendInterfaceTests.cpp
|
||||||
data/LedgerCacheSaverTests.cpp
|
data/LedgerCacheTests.cpp
|
||||||
data/cassandra/AsyncExecutorTests.cpp
|
data/LedgerCacheSaverTests.cpp
|
||||||
data/cassandra/ExecutionStrategyTests.cpp
|
data/cassandra/AsyncExecutorTests.cpp
|
||||||
data/cassandra/LedgerHeaderCacheTests.cpp
|
data/cassandra/ExecutionStrategyTests.cpp
|
||||||
data/cassandra/RetryPolicyTests.cpp
|
data/cassandra/LedgerHeaderCacheTests.cpp
|
||||||
data/cassandra/SettingsProviderTests.cpp
|
data/cassandra/RetryPolicyTests.cpp
|
||||||
data/impl/InputFileTests.cpp
|
data/cassandra/SettingsProviderTests.cpp
|
||||||
data/impl/LedgerCacheFileTests.cpp
|
data/impl/InputFileTests.cpp
|
||||||
data/impl/OutputFileTests.cpp
|
data/impl/LedgerCacheFileTests.cpp
|
||||||
# Cluster
|
data/impl/OutputFileTests.cpp
|
||||||
cluster/BackendTests.cpp
|
# Cluster
|
||||||
cluster/ClioNodeTests.cpp
|
cluster/ClioNodeTests.cpp
|
||||||
cluster/ClusterCommunicationServiceTests.cpp
|
cluster/ClusterCommunicationServiceTests.cpp
|
||||||
cluster/MetricsTests.cpp
|
# ETL
|
||||||
cluster/RepeatedTaskTests.cpp
|
etl/AmendmentBlockHandlerTests.cpp
|
||||||
cluster/WriterDeciderTests.cpp
|
etl/CacheLoaderSettingsTests.cpp
|
||||||
# ETL
|
etl/CacheLoaderTests.cpp
|
||||||
etl/AmendmentBlockHandlerTests.cpp
|
etl/CursorFromAccountProviderTests.cpp
|
||||||
etl/CacheLoaderSettingsTests.cpp
|
etl/CursorFromDiffProviderTests.cpp
|
||||||
etl/CacheLoaderTests.cpp
|
etl/CursorFromFixDiffNumProviderTests.cpp
|
||||||
etl/CursorFromAccountProviderTests.cpp
|
etl/CorruptionDetectorTests.cpp
|
||||||
etl/CursorFromDiffProviderTests.cpp
|
etl/ETLStateTests.cpp
|
||||||
etl/CursorFromFixDiffNumProviderTests.cpp
|
etl/ETLServiceTests.cpp
|
||||||
etl/CorruptionDetectorTests.cpp
|
etl/ExtractionTests.cpp
|
||||||
etl/ETLStateTests.cpp
|
etl/ForwardingSourceTests.cpp
|
||||||
etl/ETLServiceTests.cpp
|
etl/GrpcSourceTests.cpp
|
||||||
etl/ExtractionTests.cpp
|
etl/LedgerPublisherTests.cpp
|
||||||
etl/ForwardingSourceTests.cpp
|
etl/LoadBalancerTests.cpp
|
||||||
etl/GrpcSourceTests.cpp
|
etl/LoadingTests.cpp
|
||||||
etl/LedgerPublisherTests.cpp
|
etl/MonitorTests.cpp
|
||||||
etl/LoadBalancerTests.cpp
|
etl/NetworkValidatedLedgersTests.cpp
|
||||||
etl/LoadingTests.cpp
|
etl/NFTHelpersTests.cpp
|
||||||
etl/MonitorTests.cpp
|
etl/RegistryTests.cpp
|
||||||
etl/NetworkValidatedLedgersTests.cpp
|
etl/SchedulingTests.cpp
|
||||||
etl/NFTHelpersTests.cpp
|
etl/SourceImplTests.cpp
|
||||||
etl/RegistryTests.cpp
|
etl/SubscriptionSourceTests.cpp
|
||||||
etl/SchedulingTests.cpp
|
etl/TaskManagerTests.cpp
|
||||||
etl/SourceImplTests.cpp
|
etl/ext/CoreTests.cpp
|
||||||
etl/SubscriptionSourceTests.cpp
|
etl/ext/CacheTests.cpp
|
||||||
etl/SystemStateTests.cpp
|
etl/ext/MPTTests.cpp
|
||||||
etl/TaskManagerTests.cpp
|
etl/ext/NFTTests.cpp
|
||||||
etl/WriterStateTests.cpp
|
etl/ext/SuccessorTests.cpp
|
||||||
etl/ext/CoreTests.cpp
|
# Feed
|
||||||
etl/ext/CacheTests.cpp
|
feed/BookChangesFeedTests.cpp
|
||||||
etl/ext/MPTTests.cpp
|
feed/ForwardFeedTests.cpp
|
||||||
etl/ext/NFTTests.cpp
|
feed/LedgerFeedTests.cpp
|
||||||
etl/ext/SuccessorTests.cpp
|
feed/ProposedTransactionFeedTests.cpp
|
||||||
# Feed
|
feed/SingleFeedBaseTests.cpp
|
||||||
feed/BookChangesFeedTests.cpp
|
feed/SubscriptionManagerTests.cpp
|
||||||
feed/ForwardFeedTests.cpp
|
feed/TrackableSignalTests.cpp
|
||||||
feed/LedgerFeedTests.cpp
|
feed/TransactionFeedTests.cpp
|
||||||
feed/ProposedTransactionFeedTests.cpp
|
# Logging
|
||||||
feed/SingleFeedBaseTests.cpp
|
util/log/LogServiceInitTests.cpp
|
||||||
feed/SubscriptionManagerTests.cpp
|
util/log/LoggerTests.cpp
|
||||||
feed/TrackableSignalTests.cpp
|
util/log/PrettyPathTests.cpp
|
||||||
feed/TransactionFeedTests.cpp
|
# Other
|
||||||
# Logging
|
JsonUtilTests.cpp
|
||||||
util/log/LogServiceInitTests.cpp
|
Main.cpp
|
||||||
util/log/LoggerTests.cpp
|
Playground.cpp
|
||||||
util/log/PrettyPathTests.cpp
|
ProfilerTests.cpp
|
||||||
# Other
|
# Migration
|
||||||
JsonUtilTests.cpp
|
migration/cassandra/FullTableScannerTests.cpp
|
||||||
Main.cpp
|
migration/cassandra/SpecTests.cpp
|
||||||
Playground.cpp
|
migration/MigratorRegisterTests.cpp
|
||||||
ProfilerTests.cpp
|
migration/MigratorStatusTests.cpp
|
||||||
# Migration
|
migration/MigrationInspectorFactoryTests.cpp
|
||||||
migration/cassandra/FullTableScannerTests.cpp
|
migration/MigrationInspectorBaseTests.cpp
|
||||||
migration/cassandra/SpecTests.cpp
|
migration/MigrationManagerBaseTests.cpp
|
||||||
migration/MigratorRegisterTests.cpp
|
migration/MigrationManagerFactoryTests.cpp
|
||||||
migration/MigratorStatusTests.cpp
|
migration/SpecTests.cpp
|
||||||
migration/MigrationInspectorFactoryTests.cpp
|
# RPC
|
||||||
migration/MigrationInspectorBaseTests.cpp
|
rpc/APIVersionTests.cpp
|
||||||
migration/MigrationManagerBaseTests.cpp
|
rpc/BaseTests.cpp
|
||||||
migration/MigrationManagerFactoryTests.cpp
|
rpc/CountersTests.cpp
|
||||||
migration/SpecTests.cpp
|
rpc/ErrorTests.cpp
|
||||||
# RPC
|
rpc/ForwardingProxyTests.cpp
|
||||||
rpc/APIVersionTests.cpp
|
rpc/common/CheckersTests.cpp
|
||||||
rpc/BaseTests.cpp
|
rpc/common/SpecsTests.cpp
|
||||||
rpc/CountersTests.cpp
|
rpc/common/TypesTests.cpp
|
||||||
rpc/ErrorTests.cpp
|
rpc/common/impl/HandlerProviderTests.cpp
|
||||||
rpc/ForwardingProxyTests.cpp
|
rpc/handlers/AccountChannelsTests.cpp
|
||||||
rpc/common/CheckersTests.cpp
|
rpc/handlers/AccountCurrenciesTests.cpp
|
||||||
rpc/common/SpecsTests.cpp
|
rpc/handlers/AccountInfoTests.cpp
|
||||||
rpc/common/TypesTests.cpp
|
rpc/handlers/AccountLinesTests.cpp
|
||||||
rpc/common/impl/HandlerProviderTests.cpp
|
rpc/handlers/AccountMPTokenIssuancesTests.cpp
|
||||||
rpc/handlers/AccountChannelsTests.cpp
|
rpc/handlers/AccountMPTokensTests.cpp
|
||||||
rpc/handlers/AccountCurrenciesTests.cpp
|
rpc/handlers/AccountNFTsTests.cpp
|
||||||
rpc/handlers/AccountInfoTests.cpp
|
rpc/handlers/AccountObjectsTests.cpp
|
||||||
rpc/handlers/AccountLinesTests.cpp
|
rpc/handlers/AccountOffersTests.cpp
|
||||||
rpc/handlers/AccountMPTokenIssuancesTests.cpp
|
rpc/handlers/AccountTxTests.cpp
|
||||||
rpc/handlers/AccountMPTokensTests.cpp
|
rpc/handlers/AMMInfoTests.cpp
|
||||||
rpc/handlers/AccountNFTsTests.cpp
|
rpc/handlers/AllHandlerTests.cpp
|
||||||
rpc/handlers/AccountObjectsTests.cpp
|
rpc/handlers/BookChangesTests.cpp
|
||||||
rpc/handlers/AccountOffersTests.cpp
|
rpc/handlers/BookOffersTests.cpp
|
||||||
rpc/handlers/AccountTxTests.cpp
|
rpc/handlers/CredentialHelpersTests.cpp
|
||||||
rpc/handlers/AMMInfoTests.cpp
|
rpc/handlers/DefaultProcessorTests.cpp
|
||||||
rpc/handlers/AllHandlerTests.cpp
|
rpc/handlers/DepositAuthorizedTests.cpp
|
||||||
rpc/handlers/BookChangesTests.cpp
|
rpc/handlers/FeatureTests.cpp
|
||||||
rpc/handlers/BookOffersTests.cpp
|
rpc/handlers/GatewayBalancesTests.cpp
|
||||||
rpc/handlers/CredentialHelpersTests.cpp
|
rpc/handlers/GetAggregatePriceTests.cpp
|
||||||
rpc/handlers/DefaultProcessorTests.cpp
|
rpc/handlers/LedgerDataTests.cpp
|
||||||
rpc/handlers/DepositAuthorizedTests.cpp
|
rpc/handlers/LedgerEntryTests.cpp
|
||||||
rpc/handlers/FeatureTests.cpp
|
rpc/handlers/LedgerIndexTests.cpp
|
||||||
rpc/handlers/GatewayBalancesTests.cpp
|
rpc/handlers/LedgerRangeTests.cpp
|
||||||
rpc/handlers/GetAggregatePriceTests.cpp
|
rpc/handlers/LedgerTests.cpp
|
||||||
rpc/handlers/LedgerDataTests.cpp
|
rpc/handlers/MPTHoldersTests.cpp
|
||||||
rpc/handlers/LedgerEntryTests.cpp
|
rpc/handlers/NFTBuyOffersTests.cpp
|
||||||
rpc/handlers/LedgerIndexTests.cpp
|
rpc/handlers/NFTHistoryTests.cpp
|
||||||
rpc/handlers/LedgerRangeTests.cpp
|
rpc/handlers/NFTInfoTests.cpp
|
||||||
rpc/handlers/LedgerTests.cpp
|
rpc/handlers/NFTsByIssuerTest.cpp
|
||||||
rpc/handlers/MPTHoldersTests.cpp
|
rpc/handlers/NFTSellOffersTests.cpp
|
||||||
rpc/handlers/NFTBuyOffersTests.cpp
|
rpc/handlers/NoRippleCheckTests.cpp
|
||||||
rpc/handlers/NFTHistoryTests.cpp
|
rpc/handlers/PingTests.cpp
|
||||||
rpc/handlers/NFTInfoTests.cpp
|
rpc/handlers/RandomTests.cpp
|
||||||
rpc/handlers/NFTsByIssuerTest.cpp
|
rpc/handlers/ServerInfoTests.cpp
|
||||||
rpc/handlers/NFTSellOffersTests.cpp
|
rpc/handlers/SubscribeTests.cpp
|
||||||
rpc/handlers/NoRippleCheckTests.cpp
|
rpc/handlers/TestHandlerTests.cpp
|
||||||
rpc/handlers/PingTests.cpp
|
rpc/handlers/TransactionEntryTests.cpp
|
||||||
rpc/handlers/RandomTests.cpp
|
rpc/handlers/TxTests.cpp
|
||||||
rpc/handlers/ServerInfoTests.cpp
|
rpc/handlers/UnsubscribeTests.cpp
|
||||||
rpc/handlers/SubscribeTests.cpp
|
rpc/handlers/VersionHandlerTests.cpp
|
||||||
rpc/handlers/TestHandlerTests.cpp
|
rpc/handlers/VaultInfoTests.cpp
|
||||||
rpc/handlers/TransactionEntryTests.cpp
|
rpc/JsonBoolTests.cpp
|
||||||
rpc/handlers/TxTests.cpp
|
rpc/RPCEngineTests.cpp
|
||||||
rpc/handlers/UnsubscribeTests.cpp
|
rpc/RPCHelpersTests.cpp
|
||||||
rpc/handlers/VersionHandlerTests.cpp
|
rpc/WorkQueueTests.cpp
|
||||||
rpc/handlers/VaultInfoTests.cpp
|
test_data/SslCert.cpp
|
||||||
rpc/JsonBoolTests.cpp
|
# Async framework
|
||||||
rpc/RPCEngineTests.cpp
|
util/async/AnyExecutionContextTests.cpp
|
||||||
rpc/RPCHelpersTests.cpp
|
util/async/AnyOperationTests.cpp
|
||||||
rpc/WorkQueueTests.cpp
|
util/async/AnyStopTokenTests.cpp
|
||||||
test_data/SslCert.cpp
|
util/async/AnyStrandTests.cpp
|
||||||
# Async framework
|
util/async/AsyncExecutionContextTests.cpp
|
||||||
util/async/AnyExecutionContextTests.cpp
|
util/BatchingTests.cpp
|
||||||
util/async/AnyOperationTests.cpp
|
util/BlockingCacheTests.cpp
|
||||||
util/async/AnyStopTokenTests.cpp
|
util/ConceptsTests.cpp
|
||||||
util/async/AnyStrandTests.cpp
|
util/CoroutineGroupTests.cpp
|
||||||
util/async/AsyncExecutionContextTests.cpp
|
util/LedgerUtilsTests.cpp
|
||||||
util/BatchingTests.cpp
|
util/StringHashTests.cpp
|
||||||
util/BlockingCacheTests.cpp
|
# Prometheus support
|
||||||
util/ConceptsTests.cpp
|
util/prometheus/BoolTests.cpp
|
||||||
util/CoroutineGroupTests.cpp
|
util/prometheus/CounterTests.cpp
|
||||||
util/LedgerUtilsTests.cpp
|
util/prometheus/GaugeTests.cpp
|
||||||
util/StringHashTests.cpp
|
util/prometheus/HistogramTests.cpp
|
||||||
# Prometheus support
|
util/prometheus/HttpTests.cpp
|
||||||
util/prometheus/BoolTests.cpp
|
util/prometheus/LabelTests.cpp
|
||||||
util/prometheus/CounterTests.cpp
|
util/prometheus/MetricBuilderTests.cpp
|
||||||
util/prometheus/GaugeTests.cpp
|
util/prometheus/MetricsFamilyTests.cpp
|
||||||
util/prometheus/HistogramTests.cpp
|
util/prometheus/OStreamTests.cpp
|
||||||
util/prometheus/HttpTests.cpp
|
# Requests framework
|
||||||
util/prometheus/LabelTests.cpp
|
util/requests/RequestBuilderTests.cpp
|
||||||
util/prometheus/MetricBuilderTests.cpp
|
util/requests/SslContextTests.cpp
|
||||||
util/prometheus/MetricsFamilyTests.cpp
|
util/requests/WsConnectionTests.cpp
|
||||||
util/prometheus/OStreamTests.cpp
|
# Common utils
|
||||||
# Requests framework
|
util/AccountUtilsTests.cpp
|
||||||
util/requests/RequestBuilderTests.cpp
|
util/AssertTests.cpp
|
||||||
util/requests/SslContextTests.cpp
|
util/BytesConverterTests.cpp
|
||||||
util/requests/WsConnectionTests.cpp
|
util/ChannelTests.cpp
|
||||||
# Common utils
|
util/CoroutineTest.cpp
|
||||||
util/AccountUtilsTests.cpp
|
util/MoveTrackerTests.cpp
|
||||||
util/AssertTests.cpp
|
util/ObservableValueTest.cpp
|
||||||
util/BytesConverterTests.cpp
|
util/ObservableValueAtomicTest.cpp
|
||||||
util/ChannelTests.cpp
|
util/RandomTests.cpp
|
||||||
util/CoroutineTest.cpp
|
util/RepeatTests.cpp
|
||||||
util/MoveTrackerTests.cpp
|
util/ResponseExpirationCacheTests.cpp
|
||||||
util/ObservableValueTest.cpp
|
util/RetryTests.cpp
|
||||||
util/ObservableValueAtomicTest.cpp
|
util/ScopeGuardTests.cpp
|
||||||
util/RandomTests.cpp
|
util/SignalsHandlerTests.cpp
|
||||||
util/RepeatTests.cpp
|
util/ShasumTests.cpp
|
||||||
util/ResponseExpirationCacheTests.cpp
|
util/SpawnTests.cpp
|
||||||
util/RetryTests.cpp
|
util/StopHelperTests.cpp
|
||||||
util/ScopeGuardTests.cpp
|
util/TimeUtilsTests.cpp
|
||||||
util/SignalsHandlerTests.cpp
|
util/TxUtilTests.cpp
|
||||||
util/ShasumTests.cpp
|
util/WithTimeout.cpp
|
||||||
util/SpawnTests.cpp
|
# Webserver
|
||||||
util/StopHelperTests.cpp
|
web/AdminVerificationTests.cpp
|
||||||
util/TimeUtilsTests.cpp
|
web/dosguard/DOSGuardTests.cpp
|
||||||
util/TxUtilTests.cpp
|
web/dosguard/IntervalSweepHandlerTests.cpp
|
||||||
util/WithTimeout.cpp
|
web/dosguard/WeightsTests.cpp
|
||||||
# Webserver
|
web/dosguard/WhitelistHandlerTests.cpp
|
||||||
web/AdminVerificationTests.cpp
|
web/impl/ErrorHandlingTests.cpp
|
||||||
web/dosguard/DOSGuardTests.cpp
|
web/ng/ResponseTests.cpp
|
||||||
web/dosguard/IntervalSweepHandlerTests.cpp
|
web/ng/RequestTests.cpp
|
||||||
web/dosguard/WeightsTests.cpp
|
web/ng/RPCServerHandlerTests.cpp
|
||||||
web/dosguard/WhitelistHandlerTests.cpp
|
web/ng/ServerTests.cpp
|
||||||
web/impl/ErrorHandlingTests.cpp
|
web/ng/SubscriptionContextTests.cpp
|
||||||
web/ng/ResponseTests.cpp
|
web/ng/impl/ConnectionHandlerTests.cpp
|
||||||
web/ng/RequestTests.cpp
|
web/ng/impl/ErrorHandlingTests.cpp
|
||||||
web/ng/RPCServerHandlerTests.cpp
|
web/ng/impl/HttpConnectionTests.cpp
|
||||||
web/ng/ServerTests.cpp
|
web/ng/impl/ServerSslContextTests.cpp
|
||||||
web/ng/SubscriptionContextTests.cpp
|
web/ng/impl/WsConnectionTests.cpp
|
||||||
web/ng/impl/ConnectionHandlerTests.cpp
|
web/ProxyIpResolverTests.cpp
|
||||||
web/ng/impl/ErrorHandlingTests.cpp
|
web/RPCServerHandlerTests.cpp
|
||||||
web/ng/impl/HttpConnectionTests.cpp
|
web/ServerTests.cpp
|
||||||
web/ng/impl/ServerSslContextTests.cpp
|
web/SubscriptionContextTests.cpp
|
||||||
web/ng/impl/WsConnectionTests.cpp
|
# Config
|
||||||
web/ProxyIpResolverTests.cpp
|
util/config/ArrayTests.cpp
|
||||||
web/RPCServerHandlerTests.cpp
|
util/config/ArrayViewTests.cpp
|
||||||
web/ServerTests.cpp
|
util/config/ClioConfigDefinitionTests.cpp
|
||||||
web/SubscriptionContextTests.cpp
|
util/config/ConfigValueTests.cpp
|
||||||
# Config
|
util/config/ObjectViewTests.cpp
|
||||||
util/config/ArrayTests.cpp
|
util/config/ConfigFileJsonTests.cpp
|
||||||
util/config/ArrayViewTests.cpp
|
util/config/ValueViewTests.cpp
|
||||||
util/config/ClioConfigDefinitionTests.cpp
|
)
|
||||||
util/config/ConfigValueTests.cpp
|
|
||||||
util/config/ObjectViewTests.cpp
|
|
||||||
util/config/ConfigFileJsonTests.cpp
|
|
||||||
util/config/ValueViewTests.cpp)
|
|
||||||
|
|
||||||
# See https://github.com/google/googletest/issues/3475
|
# See https://github.com/google/googletest/issues/3475
|
||||||
gtest_discover_tests(clio_tests DISCOVERY_TIMEOUT 90)
|
gtest_discover_tests(clio_tests DISCOVERY_TIMEOUT 90)
|
||||||
@@ -231,36 +227,37 @@ set_target_properties(clio_tests PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BIN
|
|||||||
|
|
||||||
# Generate `coverage_report` target if coverage is enabled
|
# Generate `coverage_report` target if coverage is enabled
|
||||||
if (coverage)
|
if (coverage)
|
||||||
if (DEFINED CODE_COVERAGE_REPORT_FORMAT)
|
if (DEFINED CODE_COVERAGE_REPORT_FORMAT)
|
||||||
set(CODE_COVERAGE_FORMAT ${CODE_COVERAGE_REPORT_FORMAT})
|
set(CODE_COVERAGE_FORMAT ${CODE_COVERAGE_REPORT_FORMAT})
|
||||||
else ()
|
else ()
|
||||||
set(CODE_COVERAGE_FORMAT html-details)
|
set(CODE_COVERAGE_FORMAT html-details)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (DEFINED CODE_COVERAGE_TESTS_ARGS)
|
if (DEFINED CODE_COVERAGE_TESTS_ARGS)
|
||||||
set(TESTS_ADDITIONAL_ARGS ${CODE_COVERAGE_TESTS_ARGS})
|
set(TESTS_ADDITIONAL_ARGS ${CODE_COVERAGE_TESTS_ARGS})
|
||||||
separate_arguments(TESTS_ADDITIONAL_ARGS)
|
separate_arguments(TESTS_ADDITIONAL_ARGS)
|
||||||
else ()
|
else ()
|
||||||
set(TESTS_ADDITIONAL_ARGS "")
|
set(TESTS_ADDITIONAL_ARGS "")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(GCOVR_ADDITIONAL_ARGS --exclude-throw-branches -s)
|
set(GCOVR_ADDITIONAL_ARGS --exclude-throw-branches -s)
|
||||||
|
|
||||||
setup_target_for_coverage_gcovr(
|
setup_target_for_coverage_gcovr(
|
||||||
NAME
|
NAME
|
||||||
coverage_report
|
coverage_report
|
||||||
FORMAT
|
FORMAT
|
||||||
${CODE_COVERAGE_FORMAT}
|
${CODE_COVERAGE_FORMAT}
|
||||||
EXECUTABLE
|
EXECUTABLE
|
||||||
clio_tests
|
clio_tests
|
||||||
EXECUTABLE_ARGS
|
EXECUTABLE_ARGS
|
||||||
--gtest_brief=1
|
--gtest_brief=1
|
||||||
${TESTS_ADDITIONAL_ARGS}
|
${TESTS_ADDITIONAL_ARGS}
|
||||||
EXCLUDE
|
EXCLUDE
|
||||||
"tests"
|
"tests"
|
||||||
"src/data/cassandra"
|
"src/data/cassandra"
|
||||||
"src/data/CassandraBackend.hpp"
|
"src/data/CassandraBackend.hpp"
|
||||||
"src/data/BackendFactory.*"
|
"src/data/BackendFactory.*"
|
||||||
DEPENDENCIES
|
DEPENDENCIES
|
||||||
clio_tests)
|
clio_tests
|
||||||
|
)
|
||||||
endif ()
|
endif ()
|
||||||
|
|||||||
@@ -17,7 +17,6 @@
|
|||||||
*/
|
*/
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
#include "app/Stopper.hpp"
|
#include "app/Stopper.hpp"
|
||||||
#include "cluster/Concepts.hpp"
|
|
||||||
#include "util/AsioContextTestFixture.hpp"
|
#include "util/AsioContextTestFixture.hpp"
|
||||||
#include "util/MockBackend.hpp"
|
#include "util/MockBackend.hpp"
|
||||||
#include "util/MockETLService.hpp"
|
#include "util/MockETLService.hpp"
|
||||||
@@ -88,10 +87,6 @@ struct StopperMakeCallbackTest : util::prometheus::WithPrometheus, SyncAsioConte
|
|||||||
MOCK_METHOD(void, waitToFinish, ());
|
MOCK_METHOD(void, waitToFinish, ());
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MockClusterCommunicationService : cluster::ClusterCommunicationServiceTag {
|
|
||||||
MOCK_METHOD(void, stop, (), ());
|
|
||||||
};
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
testing::StrictMock<ServerMock> serverMock_;
|
testing::StrictMock<ServerMock> serverMock_;
|
||||||
testing::StrictMock<MockLoadBalancer> loadBalancerMock_;
|
testing::StrictMock<MockLoadBalancer> loadBalancerMock_;
|
||||||
@@ -99,7 +94,6 @@ protected:
|
|||||||
testing::StrictMock<MockSubscriptionManager> subscriptionManagerMock_;
|
testing::StrictMock<MockSubscriptionManager> subscriptionManagerMock_;
|
||||||
testing::StrictMock<MockBackend> backendMock_{util::config::ClioConfigDefinition{}};
|
testing::StrictMock<MockBackend> backendMock_{util::config::ClioConfigDefinition{}};
|
||||||
testing::StrictMock<MockLedgerCacheSaver> cacheSaverMock_;
|
testing::StrictMock<MockLedgerCacheSaver> cacheSaverMock_;
|
||||||
testing::StrictMock<MockClusterCommunicationService> clusterCommunicationServiceMock_;
|
|
||||||
boost::asio::io_context ioContextToStop_;
|
boost::asio::io_context ioContextToStop_;
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@@ -121,7 +115,6 @@ TEST_F(StopperMakeCallbackTest, makeCallbackTest)
|
|||||||
subscriptionManagerMock_,
|
subscriptionManagerMock_,
|
||||||
backendMock_,
|
backendMock_,
|
||||||
cacheSaverMock_,
|
cacheSaverMock_,
|
||||||
clusterCommunicationServiceMock_,
|
|
||||||
ioContextToStop_
|
ioContextToStop_
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -129,9 +122,6 @@ TEST_F(StopperMakeCallbackTest, makeCallbackTest)
|
|||||||
EXPECT_CALL(cacheSaverMock_, save).InSequence(s1).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
EXPECT_CALL(cacheSaverMock_, save).InSequence(s1).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
||||||
EXPECT_CALL(serverMock_, stop).InSequence(s1).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
EXPECT_CALL(serverMock_, stop).InSequence(s1).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
||||||
EXPECT_CALL(loadBalancerMock_, stop).InSequence(s2).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
EXPECT_CALL(loadBalancerMock_, stop).InSequence(s2).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
||||||
EXPECT_CALL(clusterCommunicationServiceMock_, stop).InSequence(s1, s2).WillOnce([this]() {
|
|
||||||
EXPECT_FALSE(isContextStopped());
|
|
||||||
});
|
|
||||||
EXPECT_CALL(etlServiceMock_, stop).InSequence(s1, s2).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
EXPECT_CALL(etlServiceMock_, stop).InSequence(s1, s2).WillOnce([this]() { EXPECT_FALSE(isContextStopped()); });
|
||||||
EXPECT_CALL(subscriptionManagerMock_, stop).InSequence(s1, s2).WillOnce([this]() {
|
EXPECT_CALL(subscriptionManagerMock_, stop).InSequence(s1, s2).WillOnce([this]() {
|
||||||
EXPECT_FALSE(isContextStopped());
|
EXPECT_FALSE(isContextStopped());
|
||||||
|
|||||||
@@ -1,347 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "data/BackendInterface.hpp"
|
|
||||||
#include "util/MockBackendTestFixture.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
|
||||||
#include "util/MockWriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/json/parse.hpp>
|
|
||||||
#include <boost/json/value_to.hpp>
|
|
||||||
#include <boost/uuid/random_generator.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <memory>
|
|
||||||
#include <semaphore>
|
|
||||||
#include <stdexcept>
|
|
||||||
#include <string>
|
|
||||||
#include <thread>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
using namespace cluster;
|
|
||||||
|
|
||||||
struct ClusterBackendTest : util::prometheus::WithPrometheus, MockBackendTestStrict {
|
|
||||||
~ClusterBackendTest() override
|
|
||||||
{
|
|
||||||
ctx.stop();
|
|
||||||
ctx.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::asio::thread_pool ctx;
|
|
||||||
std::unique_ptr<MockWriterState> writerState = std::make_unique<MockWriterState>();
|
|
||||||
MockWriterState& writerStateRef = *writerState;
|
|
||||||
testing::StrictMock<testing::MockFunction<void(ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const>)>>
|
|
||||||
callbackMock;
|
|
||||||
std::binary_semaphore semaphore{0};
|
|
||||||
|
|
||||||
class SemaphoreReleaseGuard {
|
|
||||||
std::binary_semaphore& semaphore_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
SemaphoreReleaseGuard(std::binary_semaphore& s) : semaphore_(s)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
~SemaphoreReleaseGuard()
|
|
||||||
{
|
|
||||||
semaphore_.release();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, SubscribeToNewState)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(testing::Return(BackendInterface::ClioNodesDataFetchResult{}));
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this](ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard const guard{semaphore};
|
|
||||||
ASSERT_TRUE(clusterData->has_value());
|
|
||||||
EXPECT_EQ(clusterData->value().size(), 1);
|
|
||||||
auto const& nodeData = clusterData->value().front();
|
|
||||||
EXPECT_EQ(nodeData.uuid, selfId);
|
|
||||||
EXPECT_EQ(nodeData.dbRole, ClioNode::DbRole::ReadOnly);
|
|
||||||
EXPECT_LE(nodeData.updateTime, std::chrono::system_clock::now());
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, Stop)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(testing::Return(BackendInterface::ClioNodesDataFetchResult{}));
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds{20});
|
|
||||||
clusterBackend.stop();
|
|
||||||
|
|
||||||
testing::Mock::VerifyAndClearExpectations(&(*backend_));
|
|
||||||
// Wait to make sure there is no new calls of mockDbBackend
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds{20});
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataThrowsException)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(testing::Throw(std::runtime_error("Database connection failed")));
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this](ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard const guard{semaphore};
|
|
||||||
ASSERT_FALSE(clusterData->has_value());
|
|
||||||
EXPECT_EQ(clusterData->error(), "Failed to fetch Clio nodes data");
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataReturnsDataWithOtherNodes)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
auto const otherUuid = boost::uuids::random_generator{}();
|
|
||||||
auto const otherNodeJson = R"JSON({
|
|
||||||
"db_role": 3,
|
|
||||||
"update_time": "2025-01-15T10:30:00Z"
|
|
||||||
})JSON";
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(
|
|
||||||
testing::Return(
|
|
||||||
BackendInterface::ClioNodesDataFetchResult{
|
|
||||||
std::vector<std::pair<boost::uuids::uuid, std::string>>{{otherUuid, otherNodeJson}}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isFallback).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isLoadingCache).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isWriting).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([&](ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard const guard{semaphore};
|
|
||||||
ASSERT_TRUE(clusterData->has_value()) << clusterData->error();
|
|
||||||
EXPECT_EQ(clusterData->value().size(), 2);
|
|
||||||
EXPECT_EQ(selfId, clusterBackend.selfId());
|
|
||||||
|
|
||||||
bool foundSelf = false;
|
|
||||||
bool foundOther = false;
|
|
||||||
|
|
||||||
for (auto const& node : clusterData->value()) {
|
|
||||||
if (*node.uuid == *selfId) {
|
|
||||||
foundSelf = true;
|
|
||||||
EXPECT_EQ(node.dbRole, ClioNode::DbRole::NotWriter);
|
|
||||||
} else if (*node.uuid == otherUuid) {
|
|
||||||
foundOther = true;
|
|
||||||
EXPECT_EQ(node.dbRole, ClioNode::DbRole::Writer);
|
|
||||||
}
|
|
||||||
EXPECT_LE(node.updateTime, std::chrono::system_clock::now());
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_TRUE(foundSelf);
|
|
||||||
EXPECT_TRUE(foundOther);
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataReturnsOnlySelfData)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
auto const selfNodeJson = R"JSON({
|
|
||||||
"db_role": 1,
|
|
||||||
"update_time": "2025-01-16T10:30:00Z"
|
|
||||||
})JSON";
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData).Times(testing::AtLeast(1)).WillRepeatedly([&]() {
|
|
||||||
return BackendInterface::ClioNodesDataFetchResult{
|
|
||||||
std::vector<std::pair<boost::uuids::uuid, std::string>>{{*clusterBackend.selfId(), selfNodeJson}}
|
|
||||||
};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this](ClioNode::CUuid selfId, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard const guard{semaphore};
|
|
||||||
ASSERT_TRUE(clusterData->has_value());
|
|
||||||
EXPECT_EQ(clusterData->value().size(), 1);
|
|
||||||
auto const& nodeData = clusterData->value().front();
|
|
||||||
EXPECT_EQ(nodeData.uuid, selfId);
|
|
||||||
EXPECT_EQ(nodeData.dbRole, ClioNode::DbRole::ReadOnly);
|
|
||||||
EXPECT_LE(nodeData.updateTime, std::chrono::system_clock::now());
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataReturnsInvalidJson)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
auto const otherUuid = boost::uuids::random_generator{}();
|
|
||||||
auto const invalidJson = "{ invalid json";
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(
|
|
||||||
testing::Return(
|
|
||||||
BackendInterface::ClioNodesDataFetchResult{
|
|
||||||
std::vector<std::pair<boost::uuids::uuid, std::string>>{{otherUuid, invalidJson}}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this, invalidJson](ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard const guard{semaphore};
|
|
||||||
ASSERT_FALSE(clusterData->has_value());
|
|
||||||
EXPECT_THAT(clusterData->error(), testing::HasSubstr("Error parsing json from DB"));
|
|
||||||
EXPECT_THAT(clusterData->error(), testing::HasSubstr(invalidJson));
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, FetchClioNodesDataReturnsValidJsonButCannotConvertToClioNode)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterBackend.subscribeToNewState(callbackMock.AsStdFunction());
|
|
||||||
|
|
||||||
auto const otherUuid = boost::uuids::random_generator{}();
|
|
||||||
// Valid JSON but missing required field 'db_role'
|
|
||||||
auto const validJsonMissingField = R"JSON({
|
|
||||||
"update_time": "2025-01-16T10:30:00Z"
|
|
||||||
})JSON";
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(
|
|
||||||
testing::Return(
|
|
||||||
BackendInterface::ClioNodesDataFetchResult{
|
|
||||||
std::vector<std::pair<boost::uuids::uuid, std::string>>{{otherUuid, validJsonMissingField}}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage).Times(testing::AtLeast(1));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(true));
|
|
||||||
EXPECT_CALL(callbackMock, Call)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([this](ClioNode::CUuid, std::shared_ptr<Backend::ClusterData const> clusterData) {
|
|
||||||
SemaphoreReleaseGuard const guard{semaphore};
|
|
||||||
ASSERT_FALSE(clusterData->has_value());
|
|
||||||
EXPECT_THAT(clusterData->error(), testing::HasSubstr("Error converting json to ClioNode"));
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClusterBackendTest, WriteNodeMessageWritesSelfDataWithRecentTimestampAndDbRole)
|
|
||||||
{
|
|
||||||
Backend clusterBackend{
|
|
||||||
ctx, backend_, std::move(writerState), std::chrono::milliseconds(1), std::chrono::milliseconds(1)
|
|
||||||
};
|
|
||||||
|
|
||||||
auto const beforeRun = std::chrono::floor<std::chrono::seconds>(std::chrono::system_clock::now());
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchClioNodesData)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly(testing::Return(BackendInterface::ClioNodesDataFetchResult{}));
|
|
||||||
EXPECT_CALL(writerStateRef, isReadOnly).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isFallback).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isLoadingCache).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(writerStateRef, isWriting).Times(testing::AtLeast(1)).WillRepeatedly(testing::Return(false));
|
|
||||||
EXPECT_CALL(*backend_, writeNodeMessage)
|
|
||||||
.Times(testing::AtLeast(1))
|
|
||||||
.WillRepeatedly([&](boost::uuids::uuid const& uuid, std::string message) {
|
|
||||||
SemaphoreReleaseGuard const guard{semaphore};
|
|
||||||
auto const afterWrite = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
EXPECT_EQ(uuid, *clusterBackend.selfId());
|
|
||||||
auto const json = boost::json::parse(message);
|
|
||||||
auto const node = boost::json::try_value_to<ClioNode>(json);
|
|
||||||
ASSERT_TRUE(node.has_value());
|
|
||||||
EXPECT_EQ(node->dbRole, ClioNode::DbRole::NotWriter);
|
|
||||||
EXPECT_GE(node->updateTime, beforeRun);
|
|
||||||
EXPECT_LE(node->updateTime, afterWrite);
|
|
||||||
});
|
|
||||||
|
|
||||||
clusterBackend.run();
|
|
||||||
semaphore.acquire();
|
|
||||||
}
|
|
||||||
@@ -18,8 +18,6 @@
|
|||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
#include "cluster/ClioNode.hpp"
|
#include "cluster/ClioNode.hpp"
|
||||||
#include "util/MockWriterState.hpp"
|
|
||||||
#include "util/NameGenerator.hpp"
|
|
||||||
#include "util/TimeUtils.hpp"
|
#include "util/TimeUtils.hpp"
|
||||||
|
|
||||||
#include <boost/json/object.hpp>
|
#include <boost/json/object.hpp>
|
||||||
@@ -28,11 +26,9 @@
|
|||||||
#include <boost/json/value_to.hpp>
|
#include <boost/json/value_to.hpp>
|
||||||
#include <boost/uuid/random_generator.hpp>
|
#include <boost/uuid/random_generator.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdint>
|
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
@@ -48,44 +44,44 @@ struct ClioNodeTest : testing::Test {
|
|||||||
|
|
||||||
TEST_F(ClioNodeTest, Serialization)
|
TEST_F(ClioNodeTest, Serialization)
|
||||||
{
|
{
|
||||||
|
// Create a ClioNode with test data
|
||||||
ClioNode const node{
|
ClioNode const node{
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()),
|
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()), .updateTime = updateTime
|
||||||
.updateTime = updateTime,
|
|
||||||
.dbRole = ClioNode::DbRole::Writer
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Serialize to JSON
|
||||||
boost::json::value jsonValue;
|
boost::json::value jsonValue;
|
||||||
EXPECT_NO_THROW(boost::json::value_from(node, jsonValue));
|
EXPECT_NO_THROW(boost::json::value_from(node, jsonValue));
|
||||||
|
|
||||||
|
// Verify JSON structure
|
||||||
ASSERT_TRUE(jsonValue.is_object()) << jsonValue;
|
ASSERT_TRUE(jsonValue.is_object()) << jsonValue;
|
||||||
auto const& obj = jsonValue.as_object();
|
auto const& obj = jsonValue.as_object();
|
||||||
|
|
||||||
|
// Check update_time exists and is a string
|
||||||
EXPECT_TRUE(obj.contains("update_time"));
|
EXPECT_TRUE(obj.contains("update_time"));
|
||||||
EXPECT_TRUE(obj.at("update_time").is_string());
|
EXPECT_TRUE(obj.at("update_time").is_string());
|
||||||
|
|
||||||
EXPECT_TRUE(obj.contains("db_role"));
|
|
||||||
EXPECT_TRUE(obj.at("db_role").is_number());
|
|
||||||
EXPECT_EQ(obj.at("db_role").as_int64(), static_cast<int64_t>(node.dbRole));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClioNodeTest, Deserialization)
|
TEST_F(ClioNodeTest, Deserialization)
|
||||||
{
|
{
|
||||||
boost::json::value const jsonValue = {{"update_time", updateTimeStr}, {"db_role", 1}};
|
boost::json::value const jsonValue = {{"update_time", updateTimeStr}};
|
||||||
|
|
||||||
ClioNode node{
|
// Deserialize to ClioNode
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(), .updateTime = {}, .dbRole = ClioNode::DbRole::ReadOnly
|
ClioNode node{.uuid = std::make_shared<boost::uuids::uuid>(), .updateTime = {}};
|
||||||
};
|
EXPECT_NO_THROW(node = boost::json::value_to<ClioNode>(jsonValue));
|
||||||
ASSERT_NO_THROW(node = boost::json::value_to<ClioNode>(jsonValue));
|
|
||||||
|
|
||||||
|
// Verify deserialized data
|
||||||
EXPECT_NE(node.uuid, nullptr);
|
EXPECT_NE(node.uuid, nullptr);
|
||||||
EXPECT_EQ(*node.uuid, boost::uuids::uuid{});
|
EXPECT_EQ(*node.uuid, boost::uuids::uuid{});
|
||||||
EXPECT_EQ(node.updateTime, updateTime);
|
EXPECT_EQ(node.updateTime, updateTime);
|
||||||
EXPECT_EQ(node.dbRole, ClioNode::DbRole::LoadingCache);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClioNodeTest, DeserializationInvalidTime)
|
TEST_F(ClioNodeTest, DeserializationInvalidTime)
|
||||||
{
|
{
|
||||||
|
// Prepare an invalid time format
|
||||||
boost::json::value const jsonValue{"update_time", "invalid_format"};
|
boost::json::value const jsonValue{"update_time", "invalid_format"};
|
||||||
|
|
||||||
|
// Expect an exception during deserialization
|
||||||
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,145 +93,3 @@ TEST_F(ClioNodeTest, DeserializationMissingTime)
|
|||||||
// Expect an exception
|
// Expect an exception
|
||||||
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ClioNodeDbRoleTestBundle {
|
|
||||||
std::string testName;
|
|
||||||
ClioNode::DbRole role;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ClioNodeDbRoleTest : ClioNodeTest, testing::WithParamInterface<ClioNodeDbRoleTestBundle> {};
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
|
||||||
AllDbRoles,
|
|
||||||
ClioNodeDbRoleTest,
|
|
||||||
testing::Values(
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "ReadOnly", .role = ClioNode::DbRole::ReadOnly},
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "LoadingCache", .role = ClioNode::DbRole::LoadingCache},
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "NotWriter", .role = ClioNode::DbRole::NotWriter},
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "Writer", .role = ClioNode::DbRole::Writer},
|
|
||||||
ClioNodeDbRoleTestBundle{.testName = "Fallback", .role = ClioNode::DbRole::Fallback}
|
|
||||||
),
|
|
||||||
tests::util::kNAME_GENERATOR
|
|
||||||
);
|
|
||||||
|
|
||||||
TEST_P(ClioNodeDbRoleTest, Serialization)
|
|
||||||
{
|
|
||||||
auto const param = GetParam();
|
|
||||||
ClioNode const node{
|
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()),
|
|
||||||
.updateTime = updateTime,
|
|
||||||
.dbRole = param.role
|
|
||||||
};
|
|
||||||
auto const jsonValue = boost::json::value_from(node);
|
|
||||||
EXPECT_EQ(jsonValue.as_object().at("db_role").as_int64(), static_cast<int64_t>(param.role));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_P(ClioNodeDbRoleTest, Deserialization)
|
|
||||||
{
|
|
||||||
auto const param = GetParam();
|
|
||||||
boost::json::value const jsonValue = {
|
|
||||||
{"update_time", updateTimeStr}, {"db_role", static_cast<int64_t>(param.role)}
|
|
||||||
};
|
|
||||||
auto const node = boost::json::value_to<ClioNode>(jsonValue);
|
|
||||||
EXPECT_EQ(node.dbRole, param.role);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClioNodeDbRoleTest, DeserializationInvalidDbRole)
|
|
||||||
{
|
|
||||||
boost::json::value const jsonValue = {{"update_time", updateTimeStr}, {"db_role", 10}};
|
|
||||||
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ClioNodeDbRoleTest, DeserializationMissingDbRole)
|
|
||||||
{
|
|
||||||
boost::json::value const jsonValue = {{"update_time", updateTimeStr}};
|
|
||||||
EXPECT_THROW(boost::json::value_to<ClioNode>(jsonValue), std::runtime_error);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ClioNodeFromTestBundle {
|
|
||||||
std::string testName;
|
|
||||||
bool readOnly;
|
|
||||||
bool fallback;
|
|
||||||
bool loadingCache;
|
|
||||||
bool writing;
|
|
||||||
ClioNode::DbRole expectedRole;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ClioNodeFromTest : ClioNodeTest, testing::WithParamInterface<ClioNodeFromTestBundle> {
|
|
||||||
std::shared_ptr<boost::uuids::uuid> uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()());
|
|
||||||
|
|
||||||
MockWriterState writerState;
|
|
||||||
};
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
|
||||||
AllWriterStates,
|
|
||||||
ClioNodeFromTest,
|
|
||||||
testing::Values(
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "ReadOnly",
|
|
||||||
.readOnly = true,
|
|
||||||
.fallback = false,
|
|
||||||
.loadingCache = false,
|
|
||||||
.writing = false,
|
|
||||||
.expectedRole = ClioNode::DbRole::ReadOnly
|
|
||||||
},
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "Fallback",
|
|
||||||
.readOnly = false,
|
|
||||||
.fallback = true,
|
|
||||||
.loadingCache = false,
|
|
||||||
.writing = false,
|
|
||||||
.expectedRole = ClioNode::DbRole::Fallback
|
|
||||||
},
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "LoadingCache",
|
|
||||||
.readOnly = false,
|
|
||||||
.fallback = false,
|
|
||||||
.loadingCache = true,
|
|
||||||
.writing = false,
|
|
||||||
.expectedRole = ClioNode::DbRole::LoadingCache
|
|
||||||
},
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "NotWriterNotReadOnly",
|
|
||||||
.readOnly = false,
|
|
||||||
.fallback = false,
|
|
||||||
.loadingCache = false,
|
|
||||||
.writing = false,
|
|
||||||
.expectedRole = ClioNode::DbRole::NotWriter
|
|
||||||
},
|
|
||||||
ClioNodeFromTestBundle{
|
|
||||||
.testName = "Writer",
|
|
||||||
.readOnly = false,
|
|
||||||
.fallback = false,
|
|
||||||
.loadingCache = false,
|
|
||||||
.writing = true,
|
|
||||||
.expectedRole = ClioNode::DbRole::Writer
|
|
||||||
}
|
|
||||||
),
|
|
||||||
tests::util::kNAME_GENERATOR
|
|
||||||
);
|
|
||||||
|
|
||||||
TEST_P(ClioNodeFromTest, FromWriterState)
|
|
||||||
{
|
|
||||||
auto const& param = GetParam();
|
|
||||||
|
|
||||||
EXPECT_CALL(writerState, isReadOnly()).WillOnce(testing::Return(param.readOnly));
|
|
||||||
if (not param.readOnly) {
|
|
||||||
EXPECT_CALL(writerState, isFallback()).WillOnce(testing::Return(param.fallback));
|
|
||||||
if (not param.fallback) {
|
|
||||||
EXPECT_CALL(writerState, isLoadingCache()).WillOnce(testing::Return(param.loadingCache));
|
|
||||||
if (not param.loadingCache) {
|
|
||||||
EXPECT_CALL(writerState, isWriting()).WillOnce(testing::Return(param.writing));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto const beforeTime = std::chrono::system_clock::now();
|
|
||||||
auto const node = ClioNode::from(uuid, writerState);
|
|
||||||
auto const afterTime = std::chrono::system_clock::now();
|
|
||||||
|
|
||||||
EXPECT_EQ(node.uuid, uuid);
|
|
||||||
EXPECT_EQ(node.dbRole, param.expectedRole);
|
|
||||||
EXPECT_GE(node.updateTime, beforeTime);
|
|
||||||
EXPECT_LE(node.updateTime, afterTime);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -22,197 +22,207 @@
|
|||||||
#include "data/BackendInterface.hpp"
|
#include "data/BackendInterface.hpp"
|
||||||
#include "util/MockBackendTestFixture.hpp"
|
#include "util/MockBackendTestFixture.hpp"
|
||||||
#include "util/MockPrometheus.hpp"
|
#include "util/MockPrometheus.hpp"
|
||||||
#include "util/MockWriterState.hpp"
|
#include "util/TimeUtils.hpp"
|
||||||
|
#include "util/prometheus/Bool.hpp"
|
||||||
|
#include "util/prometheus/Gauge.hpp"
|
||||||
#include "util/prometheus/Prometheus.hpp"
|
#include "util/prometheus/Prometheus.hpp"
|
||||||
|
|
||||||
#include <boost/json/object.hpp>
|
#include <boost/json/parse.hpp>
|
||||||
#include <boost/json/serialize.hpp>
|
#include <boost/json/serialize.hpp>
|
||||||
|
#include <boost/json/string.hpp>
|
||||||
|
#include <boost/json/value.hpp>
|
||||||
#include <boost/json/value_from.hpp>
|
#include <boost/json/value_from.hpp>
|
||||||
|
#include <boost/uuid/random_generator.hpp>
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
|
#include <boost/uuid/uuid_io.hpp>
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdint>
|
#include <condition_variable>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <semaphore>
|
#include <mutex>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace cluster;
|
using namespace cluster;
|
||||||
|
|
||||||
struct ClusterCommunicationServiceTest : util::prometheus::WithPrometheus, MockBackendTest {
|
namespace {
|
||||||
std::unique_ptr<NiceMockWriterState> writerState = std::make_unique<NiceMockWriterState>();
|
std::vector<ClioNode> const kOTHER_NODES_DATA = {
|
||||||
NiceMockWriterState& writerStateRef = *writerState;
|
ClioNode{
|
||||||
|
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()),
|
||||||
|
.updateTime = util::systemTpFromUtcStr("2015-05-15T12:00:00Z", ClioNode::kTIME_FORMAT).value()
|
||||||
|
},
|
||||||
|
ClioNode{
|
||||||
|
.uuid = std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()()),
|
||||||
|
.updateTime = util::systemTpFromUtcStr("2015-05-15T12:00:01Z", ClioNode::kTIME_FORMAT).value()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
} // namespace
|
||||||
|
|
||||||
static constexpr std::chrono::milliseconds kSHORT_INTERVAL{1};
|
struct ClusterCommunicationServiceTest : util::prometheus::WithPrometheus, MockBackendTestStrict {
|
||||||
|
ClusterCommunicationService clusterCommunicationService{
|
||||||
|
backend_,
|
||||||
|
std::chrono::milliseconds{5},
|
||||||
|
std::chrono::milliseconds{9}
|
||||||
|
};
|
||||||
|
|
||||||
static boost::uuids::uuid
|
util::prometheus::GaugeInt& nodesInClusterMetric = PrometheusService::gaugeInt("cluster_nodes_total_number", {});
|
||||||
makeUuid(uint8_t value)
|
util::prometheus::Bool isHealthyMetric = PrometheusService::boolMetric("cluster_communication_is_healthy", {});
|
||||||
|
|
||||||
|
std::mutex mtx;
|
||||||
|
std::condition_variable cv;
|
||||||
|
|
||||||
|
void
|
||||||
|
notify()
|
||||||
{
|
{
|
||||||
boost::uuids::uuid uuid{};
|
std::unique_lock const lock{mtx};
|
||||||
std::ranges::fill(uuid, value);
|
cv.notify_one();
|
||||||
return uuid;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ClioNode
|
void
|
||||||
makeNode(boost::uuids::uuid const& uuid, ClioNode::DbRole role)
|
wait()
|
||||||
{
|
{
|
||||||
return ClioNode{
|
std::unique_lock lock{mtx};
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(uuid),
|
cv.wait_until(lock, std::chrono::steady_clock::now() + std::chrono::milliseconds{100});
|
||||||
.updateTime = std::chrono::system_clock::now(),
|
|
||||||
.dbRole = role
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::string
|
|
||||||
nodeToJson(ClioNode const& node)
|
|
||||||
{
|
|
||||||
boost::json::value const v = boost::json::value_from(node);
|
|
||||||
return boost::json::serialize(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
ClusterCommunicationServiceTest()
|
|
||||||
{
|
|
||||||
ON_CALL(writerStateRef, clone()).WillByDefault(testing::Invoke([]() {
|
|
||||||
auto state = std::make_unique<NiceMockWriterState>();
|
|
||||||
ON_CALL(*state, isReadOnly()).WillByDefault(testing::Return(false));
|
|
||||||
ON_CALL(*state, isWriting()).WillByDefault(testing::Return(true));
|
|
||||||
return state;
|
|
||||||
}));
|
|
||||||
ON_CALL(writerStateRef, isReadOnly()).WillByDefault(testing::Return(false));
|
|
||||||
ON_CALL(writerStateRef, isWriting()).WillByDefault(testing::Return(true));
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
|
||||||
waitForSignal(std::binary_semaphore& sem, std::chrono::milliseconds timeout = std::chrono::milliseconds{1000})
|
|
||||||
{
|
|
||||||
return sem.try_acquire_for(timeout);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(ClusterCommunicationServiceTest, BackendReadsAndWritesData)
|
TEST_F(ClusterCommunicationServiceTest, Write)
|
||||||
{
|
{
|
||||||
auto const otherUuid = makeUuid(0x02);
|
auto const selfUuid = *clusterCommunicationService.selfUuid();
|
||||||
std::binary_semaphore fetchSemaphore{0};
|
|
||||||
std::binary_semaphore writeSemaphore{0};
|
|
||||||
|
|
||||||
BackendInterface::ClioNodesDataFetchResult fetchResult{std::vector<std::pair<boost::uuids::uuid, std::string>>{
|
auto const nowStr = util::systemTpToUtcStr(std::chrono::system_clock::now(), ClioNode::kTIME_FORMAT);
|
||||||
{otherUuid, nodeToJson(makeNode(otherUuid, ClioNode::DbRole::Writer))}
|
auto const nowStrPrefix = nowStr.substr(0, nowStr.size() - 3);
|
||||||
}};
|
|
||||||
|
|
||||||
ON_CALL(*backend_, fetchClioNodesData).WillByDefault(testing::Invoke([&](auto) {
|
EXPECT_CALL(*backend_, writeNodeMessage(selfUuid, testing::_)).WillOnce([&](auto&&, std::string const& jsonStr) {
|
||||||
fetchSemaphore.release();
|
auto const jv = boost::json::parse(jsonStr);
|
||||||
return fetchResult;
|
ASSERT_TRUE(jv.is_object());
|
||||||
}));
|
auto const& obj = jv.as_object();
|
||||||
|
ASSERT_TRUE(obj.contains("update_time"));
|
||||||
|
ASSERT_TRUE(obj.at("update_time").is_string());
|
||||||
|
EXPECT_THAT(std::string{obj.at("update_time").as_string()}, testing::StartsWith(nowStrPrefix));
|
||||||
|
|
||||||
ON_CALL(*backend_, writeNodeMessage).WillByDefault(testing::Invoke([&](auto, auto) { writeSemaphore.release(); }));
|
notify();
|
||||||
|
});
|
||||||
|
|
||||||
ClusterCommunicationService service{backend_, std::move(writerState), kSHORT_INTERVAL, kSHORT_INTERVAL};
|
clusterCommunicationService.run();
|
||||||
|
wait();
|
||||||
service.run();
|
// destructor of clusterCommunicationService calls .stop()
|
||||||
|
|
||||||
EXPECT_TRUE(waitForSignal(fetchSemaphore));
|
|
||||||
EXPECT_TRUE(waitForSignal(writeSemaphore));
|
|
||||||
|
|
||||||
service.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClusterCommunicationServiceTest, MetricsGetsNewStateFromBackend)
|
TEST_F(ClusterCommunicationServiceTest, Read_FetchFailed)
|
||||||
{
|
{
|
||||||
auto const otherUuid = makeUuid(0x02);
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
std::binary_semaphore writerActionSemaphore{0};
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
|
notify();
|
||||||
|
});
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly([](auto&&) { return std::unexpected{"Failed"}; });
|
||||||
|
|
||||||
BackendInterface::ClioNodesDataFetchResult fetchResult{std::vector<std::pair<boost::uuids::uuid, std::string>>{
|
clusterCommunicationService.run();
|
||||||
{otherUuid, nodeToJson(makeNode(otherUuid, ClioNode::DbRole::Writer))}
|
wait();
|
||||||
}};
|
// call .stop() manually so that workers exit before expectations are called more times than we want
|
||||||
|
clusterCommunicationService.stop();
|
||||||
|
|
||||||
ON_CALL(*backend_, fetchClioNodesData).WillByDefault(testing::Invoke([&](auto) { return fetchResult; }));
|
EXPECT_FALSE(isHealthyMetric);
|
||||||
|
|
||||||
ON_CALL(writerStateRef, clone()).WillByDefault(testing::Invoke([&]() mutable {
|
|
||||||
auto state = std::make_unique<NiceMockWriterState>();
|
|
||||||
ON_CALL(*state, startWriting()).WillByDefault(testing::Invoke([&]() { writerActionSemaphore.release(); }));
|
|
||||||
ON_CALL(*state, giveUpWriting()).WillByDefault(testing::Invoke([&]() { writerActionSemaphore.release(); }));
|
|
||||||
return state;
|
|
||||||
}));
|
|
||||||
|
|
||||||
auto& nodesInClusterMetric = PrometheusService::gaugeInt("cluster_nodes_total_number", {});
|
|
||||||
auto isHealthyMetric = PrometheusService::boolMetric("cluster_communication_is_healthy", {});
|
|
||||||
|
|
||||||
ClusterCommunicationService service{backend_, std::move(writerState), kSHORT_INTERVAL, kSHORT_INTERVAL};
|
|
||||||
|
|
||||||
service.run();
|
|
||||||
|
|
||||||
// WriterDecider is called after metrics are updated so we could use it as a signal to stop
|
|
||||||
EXPECT_TRUE(waitForSignal(writerActionSemaphore));
|
|
||||||
|
|
||||||
service.stop();
|
|
||||||
|
|
||||||
EXPECT_EQ(nodesInClusterMetric.value(), 2);
|
|
||||||
EXPECT_TRUE(static_cast<bool>(isHealthyMetric));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClusterCommunicationServiceTest, WriterDeciderCallsWriterStateMethodsAccordingly)
|
TEST_F(ClusterCommunicationServiceTest, Read_FetchThrew)
|
||||||
{
|
{
|
||||||
auto const smallerUuid = makeUuid(0x00);
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
std::binary_semaphore fetchSemaphore{0};
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
std::binary_semaphore writerActionSemaphore{0};
|
notify();
|
||||||
|
});
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly(testing::Throw(data::DatabaseTimeout{}));
|
||||||
|
|
||||||
BackendInterface::ClioNodesDataFetchResult fetchResult{std::vector<std::pair<boost::uuids::uuid, std::string>>{
|
clusterCommunicationService.run();
|
||||||
{smallerUuid, nodeToJson(makeNode(smallerUuid, ClioNode::DbRole::Writer))}
|
wait();
|
||||||
}};
|
clusterCommunicationService.stop();
|
||||||
|
|
||||||
ON_CALL(*backend_, fetchClioNodesData).WillByDefault(testing::Invoke([&](auto) {
|
EXPECT_FALSE(isHealthyMetric);
|
||||||
fetchSemaphore.release();
|
EXPECT_FALSE(clusterCommunicationService.clusterData().has_value());
|
||||||
return fetchResult;
|
|
||||||
}));
|
|
||||||
|
|
||||||
ON_CALL(*backend_, writeNodeMessage).WillByDefault(testing::Return());
|
|
||||||
|
|
||||||
ON_CALL(writerStateRef, clone()).WillByDefault(testing::Invoke([&]() mutable {
|
|
||||||
auto state = std::make_unique<NiceMockWriterState>();
|
|
||||||
ON_CALL(*state, startWriting()).WillByDefault(testing::Invoke([&]() { writerActionSemaphore.release(); }));
|
|
||||||
ON_CALL(*state, giveUpWriting()).WillByDefault(testing::Invoke([&]() { writerActionSemaphore.release(); }));
|
|
||||||
return state;
|
|
||||||
}));
|
|
||||||
|
|
||||||
ClusterCommunicationService service{backend_, std::move(writerState), kSHORT_INTERVAL, kSHORT_INTERVAL};
|
|
||||||
|
|
||||||
service.run();
|
|
||||||
|
|
||||||
EXPECT_TRUE(waitForSignal(fetchSemaphore));
|
|
||||||
EXPECT_TRUE(waitForSignal(writerActionSemaphore));
|
|
||||||
|
|
||||||
service.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ClusterCommunicationServiceTest, StopHaltsBackendOperations)
|
TEST_F(ClusterCommunicationServiceTest, Read_GotInvalidJson)
|
||||||
{
|
{
|
||||||
std::atomic<int> backendOperationsCount{0};
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
std::binary_semaphore fetchSemaphore{0};
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
|
notify();
|
||||||
|
});
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly([](auto&&) {
|
||||||
|
return std::vector<std::pair<boost::uuids::uuid, std::string>>{
|
||||||
|
{boost::uuids::random_generator()(), "invalid json"}
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
BackendInterface::ClioNodesDataFetchResult fetchResult{std::vector<std::pair<boost::uuids::uuid, std::string>>{}};
|
clusterCommunicationService.run();
|
||||||
|
wait();
|
||||||
|
clusterCommunicationService.stop();
|
||||||
|
|
||||||
ON_CALL(*backend_, fetchClioNodesData).WillByDefault(testing::Invoke([&](auto) {
|
EXPECT_FALSE(isHealthyMetric);
|
||||||
backendOperationsCount++;
|
EXPECT_FALSE(clusterCommunicationService.clusterData().has_value());
|
||||||
fetchSemaphore.release();
|
}
|
||||||
return fetchResult;
|
|
||||||
}));
|
TEST_F(ClusterCommunicationServiceTest, Read_GotInvalidNodeData)
|
||||||
ON_CALL(*backend_, writeNodeMessage).WillByDefault(testing::Invoke([&](auto&&, auto&&) {
|
{
|
||||||
backendOperationsCount++;
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
}));
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
|
notify();
|
||||||
ClusterCommunicationService service{backend_, std::move(writerState), kSHORT_INTERVAL, kSHORT_INTERVAL};
|
});
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly([](auto&&) {
|
||||||
service.run();
|
return std::vector<std::pair<boost::uuids::uuid, std::string>>{{boost::uuids::random_generator()(), "{}"}};
|
||||||
EXPECT_TRUE(waitForSignal(fetchSemaphore));
|
});
|
||||||
service.stop();
|
|
||||||
|
clusterCommunicationService.run();
|
||||||
auto const countAfterStop = backendOperationsCount.load();
|
wait();
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds{50});
|
clusterCommunicationService.stop();
|
||||||
EXPECT_EQ(backendOperationsCount.load(), countAfterStop);
|
|
||||||
|
EXPECT_FALSE(isHealthyMetric);
|
||||||
|
EXPECT_FALSE(clusterCommunicationService.clusterData().has_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ClusterCommunicationServiceTest, Read_Success)
|
||||||
|
{
|
||||||
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
|
EXPECT_EQ(nodesInClusterMetric.value(), 1);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, writeNodeMessage).Times(2).WillOnce([](auto&&, auto&&) {}).WillOnce([this](auto&&, auto&&) {
|
||||||
|
auto const clusterData = clusterCommunicationService.clusterData();
|
||||||
|
ASSERT_TRUE(clusterData.has_value());
|
||||||
|
ASSERT_EQ(clusterData->size(), kOTHER_NODES_DATA.size() + 1);
|
||||||
|
for (auto const& node : kOTHER_NODES_DATA) {
|
||||||
|
auto const it =
|
||||||
|
std::ranges::find_if(*clusterData, [&](ClioNode const& n) { return *(n.uuid) == *(node.uuid); });
|
||||||
|
EXPECT_NE(it, clusterData->cend()) << boost::uuids::to_string(*node.uuid);
|
||||||
|
}
|
||||||
|
auto const selfUuid = clusterCommunicationService.selfUuid();
|
||||||
|
auto const it =
|
||||||
|
std::ranges::find_if(*clusterData, [&selfUuid](ClioNode const& node) { return node.uuid == selfUuid; });
|
||||||
|
EXPECT_NE(it, clusterData->end());
|
||||||
|
|
||||||
|
notify();
|
||||||
|
});
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchClioNodesData).WillRepeatedly([this](auto&&) {
|
||||||
|
auto const selfUuid = clusterCommunicationService.selfUuid();
|
||||||
|
std::vector<std::pair<boost::uuids::uuid, std::string>> result = {
|
||||||
|
{*selfUuid, R"JSON({"update_time": "2015-05-15:12:00:00"})JSON"},
|
||||||
|
};
|
||||||
|
|
||||||
|
for (auto const& node : kOTHER_NODES_DATA) {
|
||||||
|
boost::json::value jsonValue;
|
||||||
|
boost::json::value_from(node, jsonValue);
|
||||||
|
result.emplace_back(*node.uuid, boost::json::serialize(jsonValue));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
});
|
||||||
|
|
||||||
|
clusterCommunicationService.run();
|
||||||
|
wait();
|
||||||
|
clusterCommunicationService.stop();
|
||||||
|
|
||||||
|
EXPECT_TRUE(isHealthyMetric);
|
||||||
|
EXPECT_EQ(nodesInClusterMetric.value(), 3);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,189 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "cluster/Metrics.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
|
||||||
#include "util/prometheus/Gauge.hpp"
|
|
||||||
|
|
||||||
#include <boost/uuid/random_generator.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <expected>
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
using namespace cluster;
|
|
||||||
using namespace util::prometheus;
|
|
||||||
using namespace testing;
|
|
||||||
|
|
||||||
struct MetricsTest : WithMockPrometheus {
|
|
||||||
std::shared_ptr<boost::uuids::uuid> uuid1 =
|
|
||||||
std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()());
|
|
||||||
std::shared_ptr<boost::uuids::uuid> uuid2 =
|
|
||||||
std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()());
|
|
||||||
std::shared_ptr<boost::uuids::uuid> uuid3 =
|
|
||||||
std::make_shared<boost::uuids::uuid>(boost::uuids::random_generator()());
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, InitializesMetricsOnConstruction)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics const metrics;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateWithValidClusterData)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
ClioNode const node1{
|
|
||||||
.uuid = uuid1, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::Writer
|
|
||||||
};
|
|
||||||
ClioNode const node2{
|
|
||||||
.uuid = uuid2, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::ReadOnly
|
|
||||||
};
|
|
||||||
ClioNode const node3{
|
|
||||||
.uuid = uuid3, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::NotWriter
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<ClioNode> const nodes = {node1, node2, node3};
|
|
||||||
Backend::ClusterData const clusterData = std::expected<std::vector<ClioNode>, std::string>(nodes);
|
|
||||||
auto sharedClusterData = std::make_shared<Backend::ClusterData>(clusterData);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(3));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateWithEmptyClusterData)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
std::vector<ClioNode> const nodes = {};
|
|
||||||
Backend::ClusterData const clusterData = std::expected<std::vector<ClioNode>, std::string>(nodes);
|
|
||||||
auto sharedClusterData = std::make_shared<Backend::ClusterData>(clusterData);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(0));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateWithFailedClusterData)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
Backend::ClusterData const clusterData =
|
|
||||||
std::expected<std::vector<ClioNode>, std::string>(std::unexpected("Connection failed"));
|
|
||||||
auto sharedClusterData = std::make_shared<Backend::ClusterData>(clusterData);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(0));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateWithSingleNode)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
ClioNode const node1{
|
|
||||||
.uuid = uuid1, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::Writer
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<ClioNode> const nodes = {node1};
|
|
||||||
Backend::ClusterData const clusterData = std::expected<std::vector<ClioNode>, std::string>(nodes);
|
|
||||||
auto sharedClusterData = std::make_shared<Backend::ClusterData>(clusterData);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(MetricsTest, OnNewStateRecoveryFromFailure)
|
|
||||||
{
|
|
||||||
auto& nodesInClusterMock = makeMock<GaugeInt>("cluster_nodes_total_number", "");
|
|
||||||
auto& isHealthyMock = makeMock<GaugeInt>("cluster_communication_is_healthy", "");
|
|
||||||
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
|
|
||||||
Metrics metrics;
|
|
||||||
|
|
||||||
Backend::ClusterData const clusterData1 =
|
|
||||||
std::expected<std::vector<ClioNode>, std::string>(std::unexpected("Connection timeout"));
|
|
||||||
auto sharedClusterData1 = std::make_shared<Backend::ClusterData>(clusterData1);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(0));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(1));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid1, sharedClusterData1);
|
|
||||||
|
|
||||||
ClioNode const node1{
|
|
||||||
.uuid = uuid1, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::Writer
|
|
||||||
};
|
|
||||||
ClioNode const node2{
|
|
||||||
.uuid = uuid2, .updateTime = std::chrono::system_clock::now(), .dbRole = ClioNode::DbRole::ReadOnly
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<ClioNode> const nodes = {node1, node2};
|
|
||||||
Backend::ClusterData const clusterData2 = std::expected<std::vector<ClioNode>, std::string>(nodes);
|
|
||||||
auto sharedClusterData2 = std::make_shared<Backend::ClusterData>(clusterData2);
|
|
||||||
|
|
||||||
EXPECT_CALL(isHealthyMock, set(1));
|
|
||||||
EXPECT_CALL(nodesInClusterMock, set(2));
|
|
||||||
|
|
||||||
metrics.onNewState(uuid2, sharedClusterData2);
|
|
||||||
}
|
|
||||||
@@ -1,223 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/impl/RepeatedTask.hpp"
|
|
||||||
#include "util/AsioContextTestFixture.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/io_context.hpp>
|
|
||||||
#include <boost/asio/spawn.hpp>
|
|
||||||
#include <boost/asio/steady_timer.hpp>
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <chrono>
|
|
||||||
#include <semaphore>
|
|
||||||
#include <thread>
|
|
||||||
|
|
||||||
using namespace cluster::impl;
|
|
||||||
using namespace testing;
|
|
||||||
|
|
||||||
struct RepeatedTaskTest : AsyncAsioContextTest {
|
|
||||||
static constexpr auto kTIMEOUT = std::chrono::seconds{5};
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename MockFunctionType>
|
|
||||||
struct RepeatedTaskTypedTest : RepeatedTaskTest {
|
|
||||||
std::atomic_int32_t callCount{0};
|
|
||||||
std::binary_semaphore semaphore{0};
|
|
||||||
testing::StrictMock<MockFunctionType> mockFn;
|
|
||||||
|
|
||||||
void
|
|
||||||
expectCalls(int const expectedCalls)
|
|
||||||
{
|
|
||||||
callCount = 0;
|
|
||||||
|
|
||||||
EXPECT_CALL(mockFn, Call).Times(AtLeast(expectedCalls)).WillRepeatedly([this, expectedCalls](auto&&...) {
|
|
||||||
++callCount;
|
|
||||||
if (callCount >= expectedCalls) {
|
|
||||||
semaphore.release();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
using TypesToTest = Types<MockFunction<void()>, MockFunction<void(boost::asio::yield_context)>>;
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
TYPED_TEST_SUITE(RepeatedTaskTypedTest, TypesToTest);
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, CallsFunctionRepeatedly)
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
this->expectCalls(3);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(this->semaphore.try_acquire_for(TestFixture::kTIMEOUT));
|
|
||||||
|
|
||||||
task.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, StopsImmediately)
|
|
||||||
{
|
|
||||||
auto const interval = std::chrono::seconds(5);
|
|
||||||
RepeatedTask<boost::asio::io_context> task(interval, this->ctx_);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
|
||||||
|
|
||||||
auto start = std::chrono::steady_clock::now();
|
|
||||||
task.stop();
|
|
||||||
EXPECT_LT(std::chrono::steady_clock::now() - start, interval);
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, MultipleStops)
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
this->expectCalls(3);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(this->semaphore.try_acquire_for(TestFixture::kTIMEOUT));
|
|
||||||
|
|
||||||
task.stop();
|
|
||||||
task.stop();
|
|
||||||
task.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, DestructorStopsTask)
|
|
||||||
{
|
|
||||||
this->expectCalls(3);
|
|
||||||
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(this->semaphore.try_acquire_for(TestFixture::kTIMEOUT));
|
|
||||||
|
|
||||||
// Destructor will call stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
auto const countAfterDestruction = this->callCount.load();
|
|
||||||
|
|
||||||
// Wait a bit - no more calls should happen
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(10));
|
|
||||||
|
|
||||||
EXPECT_EQ(this->callCount, countAfterDestruction);
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, StopWithoutRunIsNoOp)
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
// Should not crash or hang
|
|
||||||
task.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(RepeatedTaskTest, MultipleTasksRunConcurrently)
|
|
||||||
{
|
|
||||||
StrictMock<MockFunction<void()>> mockFn1;
|
|
||||||
StrictMock<MockFunction<void()>> mockFn2;
|
|
||||||
|
|
||||||
RepeatedTask<boost::asio::io_context> task1(std::chrono::milliseconds(1), ctx_);
|
|
||||||
RepeatedTask<boost::asio::io_context> task2(std::chrono::milliseconds(2), ctx_);
|
|
||||||
|
|
||||||
std::atomic_int32_t callCount1{0};
|
|
||||||
std::atomic_int32_t callCount2{0};
|
|
||||||
std::binary_semaphore semaphore1{0};
|
|
||||||
std::binary_semaphore semaphore2{0};
|
|
||||||
|
|
||||||
EXPECT_CALL(mockFn1, Call).Times(AtLeast(10)).WillRepeatedly([&]() {
|
|
||||||
if (++callCount1 >= 10) {
|
|
||||||
semaphore1.release();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
EXPECT_CALL(mockFn2, Call).Times(AtLeast(5)).WillRepeatedly([&]() {
|
|
||||||
if (++callCount2 >= 5) {
|
|
||||||
semaphore2.release();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
task1.run(mockFn1.AsStdFunction());
|
|
||||||
task2.run(mockFn2.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(semaphore1.try_acquire_for(kTIMEOUT));
|
|
||||||
EXPECT_TRUE(semaphore2.try_acquire_for(kTIMEOUT));
|
|
||||||
|
|
||||||
task1.stop();
|
|
||||||
task2.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TYPED_TEST(RepeatedTaskTypedTest, TaskStateTransitionsCorrectly)
|
|
||||||
{
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), this->ctx_);
|
|
||||||
|
|
||||||
task.stop(); // Should be no-op
|
|
||||||
|
|
||||||
this->expectCalls(3);
|
|
||||||
|
|
||||||
task.run(this->mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(this->semaphore.try_acquire_for(TestFixture::kTIMEOUT));
|
|
||||||
|
|
||||||
task.stop();
|
|
||||||
|
|
||||||
// Stop again should be no-op
|
|
||||||
task.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(RepeatedTaskTest, FunctionCanAccessYieldContext)
|
|
||||||
{
|
|
||||||
StrictMock<MockFunction<void(boost::asio::yield_context)>> mockFn;
|
|
||||||
std::atomic_bool yieldContextUsed = false;
|
|
||||||
std::binary_semaphore semaphore{0};
|
|
||||||
|
|
||||||
RepeatedTask<boost::asio::io_context> task(std::chrono::milliseconds(1), ctx_);
|
|
||||||
|
|
||||||
EXPECT_CALL(mockFn, Call).Times(AtLeast(1)).WillRepeatedly([&](boost::asio::yield_context yield) {
|
|
||||||
if (yieldContextUsed)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// Use the yield context to verify it's valid
|
|
||||||
boost::asio::steady_timer timer(yield.get_executor());
|
|
||||||
timer.expires_after(std::chrono::milliseconds(1));
|
|
||||||
boost::system::error_code ec;
|
|
||||||
timer.async_wait(yield[ec]);
|
|
||||||
EXPECT_FALSE(ec) << ec.message();
|
|
||||||
yieldContextUsed = true;
|
|
||||||
semaphore.release();
|
|
||||||
});
|
|
||||||
|
|
||||||
task.run(mockFn.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_TRUE(semaphore.try_acquire_for(kTIMEOUT));
|
|
||||||
|
|
||||||
task.stop();
|
|
||||||
|
|
||||||
EXPECT_TRUE(yieldContextUsed);
|
|
||||||
}
|
|
||||||
@@ -1,314 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "cluster/Backend.hpp"
|
|
||||||
#include "cluster/ClioNode.hpp"
|
|
||||||
#include "cluster/WriterDecider.hpp"
|
|
||||||
#include "util/MockWriterState.hpp"
|
|
||||||
|
|
||||||
#include <boost/asio/thread_pool.hpp>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <chrono>
|
|
||||||
#include <cstdint>
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
using namespace cluster;
|
|
||||||
|
|
||||||
enum class ExpectedAction { StartWriting, GiveUpWriting, NoAction, SetFallback };
|
|
||||||
|
|
||||||
struct WriterDeciderTestParams {
|
|
||||||
std::string testName;
|
|
||||||
uint8_t selfUuidValue;
|
|
||||||
std::vector<std::pair<uint8_t, ClioNode::DbRole>> nodes;
|
|
||||||
ExpectedAction expectedAction;
|
|
||||||
bool useEmptyClusterData = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct WriterDeciderTest : testing::TestWithParam<WriterDeciderTestParams> {
|
|
||||||
~WriterDeciderTest() override
|
|
||||||
{
|
|
||||||
ctx.stop();
|
|
||||||
ctx.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::asio::thread_pool ctx{1};
|
|
||||||
std::unique_ptr<MockWriterState> writerState = std::make_unique<MockWriterState>();
|
|
||||||
MockWriterState& writerStateRef = *writerState;
|
|
||||||
|
|
||||||
static ClioNode
|
|
||||||
makeNode(boost::uuids::uuid const& uuid, ClioNode::DbRole role)
|
|
||||||
{
|
|
||||||
return ClioNode{
|
|
||||||
.uuid = std::make_shared<boost::uuids::uuid>(uuid),
|
|
||||||
.updateTime = std::chrono::system_clock::now(),
|
|
||||||
.dbRole = role
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
static boost::uuids::uuid
|
|
||||||
makeUuid(uint8_t value)
|
|
||||||
{
|
|
||||||
boost::uuids::uuid uuid{};
|
|
||||||
std::ranges::fill(uuid, value);
|
|
||||||
return uuid;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_P(WriterDeciderTest, WriterSelection)
|
|
||||||
{
|
|
||||||
auto const& params = GetParam();
|
|
||||||
|
|
||||||
auto const selfUuid = makeUuid(params.selfUuidValue);
|
|
||||||
|
|
||||||
WriterDecider decider{ctx, std::move(writerState)};
|
|
||||||
|
|
||||||
auto clonedState = std::make_unique<MockWriterState>();
|
|
||||||
|
|
||||||
// Set up expectations based on expected action
|
|
||||||
switch (params.expectedAction) {
|
|
||||||
case ExpectedAction::StartWriting:
|
|
||||||
EXPECT_CALL(*clonedState, startWriting());
|
|
||||||
EXPECT_CALL(writerStateRef, clone()).WillOnce(testing::Return(testing::ByMove(std::move(clonedState))));
|
|
||||||
break;
|
|
||||||
case ExpectedAction::GiveUpWriting:
|
|
||||||
EXPECT_CALL(*clonedState, giveUpWriting());
|
|
||||||
EXPECT_CALL(writerStateRef, clone()).WillOnce(testing::Return(testing::ByMove(std::move(clonedState))));
|
|
||||||
break;
|
|
||||||
case ExpectedAction::SetFallback:
|
|
||||||
EXPECT_CALL(*clonedState, setWriterDecidingFallback());
|
|
||||||
EXPECT_CALL(writerStateRef, clone()).WillOnce(testing::Return(testing::ByMove(std::move(clonedState))));
|
|
||||||
break;
|
|
||||||
case ExpectedAction::NoAction:
|
|
||||||
if (not params.useEmptyClusterData) {
|
|
||||||
// For all-ReadOnly case, we still clone but don't call any action
|
|
||||||
EXPECT_CALL(writerStateRef, clone()).WillOnce(testing::Return(testing::ByMove(std::move(clonedState))));
|
|
||||||
}
|
|
||||||
// For empty cluster data, clone is never called
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<Backend::ClusterData> clusterData;
|
|
||||||
ClioNode::CUuid selfIdPtr;
|
|
||||||
|
|
||||||
if (params.useEmptyClusterData) {
|
|
||||||
clusterData = std::make_shared<Backend::ClusterData>(std::unexpected(std::string("Communication failed")));
|
|
||||||
selfIdPtr = std::make_shared<boost::uuids::uuid>(selfUuid);
|
|
||||||
} else {
|
|
||||||
std::vector<ClioNode> nodes;
|
|
||||||
nodes.reserve(params.nodes.size());
|
|
||||||
for (auto const& [uuidValue, role] : params.nodes) {
|
|
||||||
auto node = makeNode(makeUuid(uuidValue), role);
|
|
||||||
if (uuidValue == params.selfUuidValue) {
|
|
||||||
selfIdPtr = node.uuid; // Use the same shared_ptr as in the node
|
|
||||||
}
|
|
||||||
nodes.push_back(std::move(node));
|
|
||||||
}
|
|
||||||
clusterData = std::make_shared<Backend::ClusterData>(std::move(nodes));
|
|
||||||
}
|
|
||||||
|
|
||||||
decider.onNewState(selfIdPtr, clusterData);
|
|
||||||
|
|
||||||
ctx.join();
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
|
||||||
WriterDeciderTests,
|
|
||||||
WriterDeciderTest,
|
|
||||||
testing::Values(
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SelfNodeIsSelectedAsWriter",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Writer}, {0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "OtherNodeIsSelectedAsWriter",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Writer}, {0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "NodesAreSortedByUUID",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes =
|
|
||||||
{{0x03, ClioNode::DbRole::Writer}, {0x02, ClioNode::DbRole::Writer}, {0x01, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "FirstNodeAfterReadOnlyIsNotSelf",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x02, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "FirstNodeAfterReadOnlyIsSelf",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x02, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "AllNodesReadOnlyGiveUpWriting",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::ReadOnly}, {0x02, ClioNode::DbRole::ReadOnly}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "EmptyClusterDataNoActionTaken",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {},
|
|
||||||
.expectedAction = ExpectedAction::NoAction,
|
|
||||||
.useEmptyClusterData = true
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SingleNodeClusterSelfIsWriter",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "NotWriterRoleIsSelectedWhenNoWriterRole",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::NotWriter}, {0x02, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "MixedRolesFirstNonReadOnlyIsSelected",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x02, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::NotWriter},
|
|
||||||
{0x04, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "ShuffledNodesAreSortedCorrectly",
|
|
||||||
.selfUuidValue = 0x04,
|
|
||||||
.nodes =
|
|
||||||
{{0x04, ClioNode::DbRole::Writer},
|
|
||||||
{0x01, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::Writer},
|
|
||||||
{0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "ShuffledNodesWithReadOnlySelfIsSelected",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x05, ClioNode::DbRole::Writer},
|
|
||||||
{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x04, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::Writer},
|
|
||||||
{0x02, ClioNode::DbRole::ReadOnly}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SelfIsFallbackNoActionTaken",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Fallback}, {0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::NoAction
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "OtherNodeIsFallbackSetsFallbackMode",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::Writer}, {0x02, ClioNode::DbRole::Fallback}},
|
|
||||||
.expectedAction = ExpectedAction::SetFallback
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SelfIsReadOnlyOthersAreFallbackGiveUpWriting",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::ReadOnly}, {0x02, ClioNode::DbRole::Fallback}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "MultipleFallbackNodesSelfNotFallbackSetsFallback",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::Fallback},
|
|
||||||
{0x02, ClioNode::DbRole::Fallback},
|
|
||||||
{0x03, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::SetFallback
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "MixedRolesWithOneFallbackSetsFallback",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::Writer},
|
|
||||||
{0x02, ClioNode::DbRole::NotWriter},
|
|
||||||
{0x03, ClioNode::DbRole::Fallback},
|
|
||||||
{0x04, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::SetFallback
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "SelfIsLoadingCacheOtherIsWriter",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::LoadingCache}, {0x02, ClioNode::DbRole::Writer}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "OtherNodeIsLoadingCacheSkipToNextWriter",
|
|
||||||
.selfUuidValue = 0x02,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::LoadingCache},
|
|
||||||
{0x02, ClioNode::DbRole::Writer},
|
|
||||||
{0x03, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "AllNodesLoadingCacheNoActionTaken",
|
|
||||||
.selfUuidValue = 0x01,
|
|
||||||
.nodes = {{0x01, ClioNode::DbRole::LoadingCache}, {0x02, ClioNode::DbRole::LoadingCache}},
|
|
||||||
.expectedAction = ExpectedAction::NoAction
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "MixedWithLoadingCacheReadOnlyFirstNonReadOnlyNonLoadingCacheSelected",
|
|
||||||
.selfUuidValue = 0x03,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::ReadOnly},
|
|
||||||
{0x02, ClioNode::DbRole::LoadingCache},
|
|
||||||
{0x03, ClioNode::DbRole::Writer},
|
|
||||||
{0x04, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::StartWriting
|
|
||||||
},
|
|
||||||
WriterDeciderTestParams{
|
|
||||||
.testName = "LoadingCacheBeforeWriterSkipsLoadingCache",
|
|
||||||
.selfUuidValue = 0x04,
|
|
||||||
.nodes =
|
|
||||||
{{0x01, ClioNode::DbRole::LoadingCache},
|
|
||||||
{0x02, ClioNode::DbRole::LoadingCache},
|
|
||||||
{0x03, ClioNode::DbRole::Writer},
|
|
||||||
{0x04, ClioNode::DbRole::NotWriter}},
|
|
||||||
.expectedAction = ExpectedAction::GiveUpWriting
|
|
||||||
}
|
|
||||||
),
|
|
||||||
[](testing::TestParamInfo<WriterDeciderTestParams> const& info) { return info.param.testName; }
|
|
||||||
);
|
|
||||||
@@ -32,8 +32,8 @@
|
|||||||
#include <xrpl/protocol/Indexes.h>
|
#include <xrpl/protocol/Indexes.h>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <functional>
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <stdexcept>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
|||||||
@@ -216,10 +216,6 @@ protected:
|
|||||||
std::shared_ptr<testing::NiceMock<MockMonitorProvider>> monitorProvider_ =
|
std::shared_ptr<testing::NiceMock<MockMonitorProvider>> monitorProvider_ =
|
||||||
std::make_shared<testing::NiceMock<MockMonitorProvider>>();
|
std::make_shared<testing::NiceMock<MockMonitorProvider>>();
|
||||||
std::shared_ptr<etl::SystemState> systemState_ = std::make_shared<etl::SystemState>();
|
std::shared_ptr<etl::SystemState> systemState_ = std::make_shared<etl::SystemState>();
|
||||||
testing::StrictMock<testing::MockFunction<void(etl::SystemState::WriteCommand)>> mockWriteSignalCommandCallback_;
|
|
||||||
boost::signals2::scoped_connection writeCommandConnection_{
|
|
||||||
systemState_->writeCommandSignal.connect(mockWriteSignalCommandCallback_.AsStdFunction())
|
|
||||||
};
|
|
||||||
|
|
||||||
etl::ETLService service_{
|
etl::ETLService service_{
|
||||||
ctx_,
|
ctx_,
|
||||||
@@ -304,7 +300,6 @@ TEST_F(ETLServiceTests, RunWithEmptyDatabase)
|
|||||||
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
||||||
auto& mockTaskManagerRef = *mockTaskManager;
|
auto& mockTaskManagerRef = *mockTaskManager;
|
||||||
auto ledgerData = createTestData(kSEQ);
|
auto ledgerData = createTestData(kSEQ);
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
|
|
||||||
testing::Sequence const s;
|
testing::Sequence const s;
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).InSequence(s).WillOnce(testing::Return(std::nullopt));
|
EXPECT_CALL(*backend_, hardFetchLedgerRange).InSequence(s).WillOnce(testing::Return(std::nullopt));
|
||||||
@@ -313,61 +308,25 @@ TEST_F(ETLServiceTests, RunWithEmptyDatabase)
|
|||||||
EXPECT_CALL(*balancer_, loadInitialLedger(kSEQ, testing::_, testing::_))
|
EXPECT_CALL(*balancer_, loadInitialLedger(kSEQ, testing::_, testing::_))
|
||||||
.WillOnce(testing::Return(std::vector<std::string>{}));
|
.WillOnce(testing::Return(std::vector<std::string>{}));
|
||||||
EXPECT_CALL(*loader_, loadInitialLedger).WillOnce(testing::Return(ripple::LedgerHeader{}));
|
EXPECT_CALL(*loader_, loadInitialLedger).WillOnce(testing::Return(ripple::LedgerHeader{}));
|
||||||
// In syncCacheWithDb()
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange).Times(2).InSequence(s).WillRepeatedly([this]() {
|
.InSequence(s)
|
||||||
backend_->cache().update({}, kSEQ, false);
|
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
return data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockTaskManagerRef, run);
|
EXPECT_CALL(mockTaskManagerRef, run);
|
||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_)).WillOnce([&](auto&&...) {
|
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
||||||
EXPECT_FALSE(systemState_->isLoadingCache);
|
.WillOnce(testing::Return(std::unique_ptr<etl::TaskManagerInterface>(mockTaskManager.release())));
|
||||||
return std::unique_ptr<etl::TaskManagerInterface>(mockTaskManager.release());
|
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, testing::_, testing::_))
|
||||||
});
|
.WillOnce([](auto, auto, auto, auto, auto) { return std::make_unique<testing::NiceMock<MockMonitor>>(); });
|
||||||
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce([this](auto, auto, auto, auto, auto) {
|
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
});
|
|
||||||
|
|
||||||
service_.run();
|
service_.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, RunWithPopulatedDatabase)
|
TEST_F(ETLServiceTests, RunWithPopulatedDatabase)
|
||||||
{
|
{
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, kSEQ + 1, testing::_))
|
EXPECT_CALL(*monitorProvider_, make).WillOnce([](auto, auto, auto, auto, auto) {
|
||||||
.WillOnce([this](auto, auto, auto, auto, auto) {
|
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
});
|
||||||
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
});
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, SyncCacheWithDbBeforeStartingMonitor)
|
|
||||||
{
|
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
backend_->cache().update({}, kSEQ - 2, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ - 1, testing::_));
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ - 1, std::vector<data::LedgerObject>()))
|
|
||||||
.WillOnce([this](auto const seq, auto&&...) { backend_->cache().update({}, seq, false); });
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ, testing::_));
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ, std::vector<data::LedgerObject>()))
|
|
||||||
.WillOnce([this](auto const seq, auto&&...) { backend_->cache().update({}, seq, false); });
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make(testing::_, testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce([this](auto, auto, auto, auto, auto) {
|
|
||||||
EXPECT_TRUE(systemState_->isLoadingCache);
|
|
||||||
return std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
});
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillRepeatedly(testing::Return(kSEQ));
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||||
|
|
||||||
@@ -405,22 +364,19 @@ TEST_F(ETLServiceTests, HandlesWriteConflictInMonitorSubscription)
|
|||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
EXPECT_CALL(mockMonitorRef, run);
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.Times(2)
|
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||||
|
|
||||||
service_.run();
|
service_.run();
|
||||||
writeCommandConnection_.disconnect();
|
systemState_->writeConflict = true;
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StopWriting);
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
||||||
ASSERT_TRUE(capturedCallback);
|
ASSERT_TRUE(capturedCallback);
|
||||||
capturedCallback(kSEQ + 1);
|
capturedCallback(kSEQ + 1);
|
||||||
|
|
||||||
|
EXPECT_FALSE(systemState_->writeConflict);
|
||||||
EXPECT_FALSE(systemState_->isWriting);
|
EXPECT_FALSE(systemState_->isWriting);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -441,11 +397,8 @@ TEST_F(ETLServiceTests, NormalFlowInMonitorSubscription)
|
|||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
EXPECT_CALL(mockMonitorRef, run);
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.Times(2)
|
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||||
|
|
||||||
@@ -471,19 +424,13 @@ TEST_F(ETLServiceTests, AttemptTakeoverWriter)
|
|||||||
return std::move(mockMonitor);
|
return std::move(mockMonitor);
|
||||||
});
|
});
|
||||||
|
|
||||||
std::function<void(uint32_t)> onNewSeqCallback;
|
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence);
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&onNewSeqCallback](auto cb) {
|
|
||||||
onNewSeqCallback = std::move(cb);
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
||||||
capturedDbStalledCallback = callback;
|
capturedDbStalledCallback = callback;
|
||||||
return boost::signals2::scoped_connection{};
|
return boost::signals2::scoped_connection{};
|
||||||
});
|
});
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
EXPECT_CALL(mockMonitorRef, run);
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||||
@@ -500,14 +447,10 @@ TEST_F(ETLServiceTests, AttemptTakeoverWriter)
|
|||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
||||||
.WillOnce(testing::Return(std::move(mockTaskManager)));
|
.WillOnce(testing::Return(std::move(mockTaskManager)));
|
||||||
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StartWriting));
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedDbStalledCallback);
|
ASSERT_TRUE(capturedDbStalledCallback);
|
||||||
EXPECT_FALSE(systemState_->isWriting); // will attempt to become writer after new sequence appears but not yet
|
|
||||||
EXPECT_FALSE(systemState_->isWriterDecidingFallback);
|
|
||||||
capturedDbStalledCallback();
|
capturedDbStalledCallback();
|
||||||
EXPECT_TRUE(systemState_->isWriting); // should attempt to become writer
|
|
||||||
EXPECT_TRUE(systemState_->isWriterDecidingFallback); // fallback mode activated
|
EXPECT_TRUE(systemState_->isWriting); // should attempt to become writer
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, GiveUpWriterAfterWriteConflict)
|
TEST_F(ETLServiceTests, GiveUpWriterAfterWriteConflict)
|
||||||
@@ -527,25 +470,22 @@ TEST_F(ETLServiceTests, GiveUpWriterAfterWriteConflict)
|
|||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
EXPECT_CALL(mockMonitorRef, run);
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
||||||
.Times(2)
|
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
||||||
|
|
||||||
service_.run();
|
service_.run();
|
||||||
systemState_->isWriting = true;
|
systemState_->isWriting = true;
|
||||||
writeCommandConnection_.disconnect();
|
systemState_->writeConflict = true; // got a write conflict along the way
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StopWriting);
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
||||||
|
|
||||||
ASSERT_TRUE(capturedCallback);
|
ASSERT_TRUE(capturedCallback);
|
||||||
capturedCallback(kSEQ + 1);
|
capturedCallback(kSEQ + 1);
|
||||||
|
|
||||||
EXPECT_FALSE(systemState_->isWriting); // gives up writing
|
EXPECT_FALSE(systemState_->isWriting); // gives up writing
|
||||||
|
EXPECT_FALSE(systemState_->writeConflict); // and removes write conflict flag
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, CancelledLoadInitialLedger)
|
TEST_F(ETLServiceTests, CancelledLoadInitialLedger)
|
||||||
@@ -599,327 +539,3 @@ TEST_F(ETLServiceTests, RunStopsIfInitialLoadIsCancelledByBalancer)
|
|||||||
EXPECT_FALSE(service_.isAmendmentBlocked());
|
EXPECT_FALSE(service_.isAmendmentBlocked());
|
||||||
EXPECT_FALSE(service_.isCorruptionDetected());
|
EXPECT_FALSE(service_.isCorruptionDetected());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, DbStalledDoesNotTriggerSignalWhenStrictReadonly)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void()> capturedDbStalledCallback;
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence);
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
|
||||||
capturedDbStalledCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isStrictReadonly = true; // strict readonly mode
|
|
||||||
systemState_->isWriting = false;
|
|
||||||
|
|
||||||
// No signal should be emitted because node is in strict readonly mode
|
|
||||||
// But fallback flag should still be set
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedDbStalledCallback);
|
|
||||||
EXPECT_FALSE(systemState_->isWriterDecidingFallback);
|
|
||||||
capturedDbStalledCallback();
|
|
||||||
EXPECT_TRUE(systemState_->isWriterDecidingFallback); // fallback mode activated even in readonly
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, DbStalledDoesNotTriggerSignalWhenAlreadyWriting)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void()> capturedDbStalledCallback;
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence);
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled).WillOnce([&capturedDbStalledCallback](auto callback) {
|
|
||||||
capturedDbStalledCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isStrictReadonly = false;
|
|
||||||
systemState_->isWriting = true; // already writing
|
|
||||||
|
|
||||||
// No signal should be emitted because node is already writing
|
|
||||||
// But fallback flag should still be set
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedDbStalledCallback);
|
|
||||||
EXPECT_FALSE(systemState_->isWriterDecidingFallback);
|
|
||||||
capturedDbStalledCallback();
|
|
||||||
EXPECT_TRUE(systemState_->isWriterDecidingFallback); // fallback mode activated
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, CacheUpdatesDependOnActualCacheState_WriterMode)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void(uint32_t)> capturedCallback;
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto callback) {
|
|
||||||
capturedCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB initially to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isWriting = true; // In writer mode
|
|
||||||
|
|
||||||
// Simulate cache is behind (e.g., update failed previously)
|
|
||||||
// Cache latestLedgerSequence returns kSEQ (behind the new seq kSEQ + 1)
|
|
||||||
std::vector<data::LedgerObject> const emptyObjs = {};
|
|
||||||
backend_->cache().update(emptyObjs, kSEQ); // Set cache to kSEQ
|
|
||||||
|
|
||||||
std::vector<data::LedgerObject> const dummyDiff = {};
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ + 1, testing::_)).WillOnce(testing::Return(dummyDiff));
|
|
||||||
|
|
||||||
// Cache should be updated even though we're in writer mode
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ + 1, testing::A<std::vector<data::LedgerObject> const&>()));
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedCallback);
|
|
||||||
capturedCallback(kSEQ + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, OnlyCacheUpdatesWhenBackendIsCurrent)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void(uint32_t)> capturedCallback;
|
|
||||||
// Set cache to be in sync with DB initially to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto callback) {
|
|
||||||
capturedCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set backend range to be at kSEQ + 1 (already current)
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}))
|
|
||||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}))
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ + 1}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isWriting = false;
|
|
||||||
|
|
||||||
// Cache is behind (at kSEQ)
|
|
||||||
std::vector<data::LedgerObject> const emptyObjs = {};
|
|
||||||
backend_->cache().update(emptyObjs, kSEQ);
|
|
||||||
|
|
||||||
std::vector<data::LedgerObject> const dummyDiff = {};
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff(kSEQ + 1, testing::_)).WillOnce(testing::Return(dummyDiff));
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(kSEQ + 1, testing::A<std::vector<data::LedgerObject> const&>()));
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedCallback);
|
|
||||||
capturedCallback(kSEQ + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, NoUpdatesWhenBothCacheAndBackendAreCurrent)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void(uint32_t)> capturedCallback;
|
|
||||||
// Set cache to be in sync with DB initially to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedCallback](auto callback) {
|
|
||||||
capturedCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set backend range to be at kSEQ + 1 (already current)
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}))
|
|
||||||
.WillOnce(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}))
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ + 1}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
|
|
||||||
// Cache is current (at kSEQ + 1)
|
|
||||||
std::vector<data::LedgerObject> const emptyObjs = {};
|
|
||||||
backend_->cache().update(emptyObjs, kSEQ + 1);
|
|
||||||
|
|
||||||
// Neither should be updated
|
|
||||||
EXPECT_CALL(*backend_, fetchLedgerDiff).Times(0);
|
|
||||||
EXPECT_CALL(*cacheUpdater_, update(testing::_, testing::A<std::vector<data::LedgerObject> const&>())).Times(0);
|
|
||||||
|
|
||||||
EXPECT_CALL(*publisher_, publish(kSEQ + 1, testing::_, testing::_));
|
|
||||||
|
|
||||||
ASSERT_TRUE(capturedCallback);
|
|
||||||
capturedCallback(kSEQ + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, StopWaitsForWriteCommandHandlersToComplete)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isStrictReadonly = false;
|
|
||||||
|
|
||||||
auto mockTaskManager = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
|
||||||
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StartWriting));
|
|
||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce(testing::Return(std::move(mockTaskManager)));
|
|
||||||
|
|
||||||
// Emit a command
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StartWriting);
|
|
||||||
|
|
||||||
// The test context processes operations synchronously, so the handler should have run
|
|
||||||
// Stop should wait for the handler to complete and disconnect the subscription
|
|
||||||
service_.stop();
|
|
||||||
|
|
||||||
// Verify stop() returned, meaning all handlers completed
|
|
||||||
SUCCEED();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, WriteConflictIsHandledImmediately_NotDelayed)
|
|
||||||
{
|
|
||||||
// This test verifies that write conflicts are handled immediately via signal,
|
|
||||||
// not delayed until the next sequence notification (the old behavior)
|
|
||||||
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
auto& mockMonitorRef = *mockMonitor;
|
|
||||||
std::function<void(uint32_t)> capturedNewSeqCallback;
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToNewSequence).WillOnce([&capturedNewSeqCallback](auto callback) {
|
|
||||||
capturedNewSeqCallback = callback;
|
|
||||||
return boost::signals2::scoped_connection{};
|
|
||||||
});
|
|
||||||
EXPECT_CALL(mockMonitorRef, subscribeToDbStalled);
|
|
||||||
EXPECT_CALL(mockMonitorRef, run);
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isWriting = true;
|
|
||||||
|
|
||||||
// Emit StopWriting signal (simulating write conflict from Loader)
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StopWriting));
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StopWriting);
|
|
||||||
|
|
||||||
// The test context processes operations synchronously, so the handler should have run immediately
|
|
||||||
// Verify that isWriting is immediately set to false
|
|
||||||
EXPECT_FALSE(systemState_->isWriting);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ETLServiceTests, WriteCommandsAreSerializedOnStrand)
|
|
||||||
{
|
|
||||||
auto mockMonitor = std::make_unique<testing::NiceMock<MockMonitor>>();
|
|
||||||
|
|
||||||
EXPECT_CALL(*monitorProvider_, make).WillOnce([&mockMonitor](auto, auto, auto, auto, auto) {
|
|
||||||
return std::move(mockMonitor);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Set cache to be in sync with DB to avoid syncCacheWithDb loop
|
|
||||||
backend_->cache().update({}, kSEQ, false);
|
|
||||||
EXPECT_CALL(*backend_, hardFetchLedgerRange)
|
|
||||||
.WillRepeatedly(testing::Return(data::LedgerRange{.minSequence = 1, .maxSequence = kSEQ}));
|
|
||||||
EXPECT_CALL(*ledgers_, getMostRecent()).WillOnce(testing::Return(kSEQ));
|
|
||||||
EXPECT_CALL(*cacheLoader_, load(kSEQ));
|
|
||||||
|
|
||||||
service_.run();
|
|
||||||
systemState_->isStrictReadonly = false;
|
|
||||||
systemState_->isWriting = false;
|
|
||||||
|
|
||||||
auto mockTaskManager1 = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
|
||||||
auto mockTaskManager2 = std::make_unique<testing::NiceMock<MockTaskManager>>();
|
|
||||||
|
|
||||||
// Set up expectations for the sequence of write commands
|
|
||||||
// The signals should be processed in order: StartWriting, StopWriting, StartWriting
|
|
||||||
{
|
|
||||||
testing::InSequence const seq;
|
|
||||||
|
|
||||||
// First StartWriting
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StartWriting));
|
|
||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce(testing::Return(std::move(mockTaskManager1)));
|
|
||||||
|
|
||||||
// Then StopWriting
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StopWriting));
|
|
||||||
|
|
||||||
// Finally second StartWriting
|
|
||||||
EXPECT_CALL(mockWriteSignalCommandCallback_, Call(etl::SystemState::WriteCommand::StartWriting));
|
|
||||||
EXPECT_CALL(*taskManagerProvider_, make(testing::_, testing::_, kSEQ + 1, testing::_))
|
|
||||||
.WillOnce(testing::Return(std::move(mockTaskManager2)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit multiple signals rapidly - they should be serialized on the strand
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StartWriting);
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StopWriting);
|
|
||||||
systemState_->writeCommandSignal(etl::SystemState::WriteCommand::StartWriting);
|
|
||||||
|
|
||||||
// The test context processes operations synchronously, so all signals should have been processed
|
|
||||||
// Final state should be writing (last signal was StartWriting)
|
|
||||||
EXPECT_TRUE(systemState_->isWriting);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -216,14 +216,15 @@ TEST_F(ETLLedgerPublisherTest, PublishLedgerHeaderCloseTimeGreaterThanNow)
|
|||||||
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsTrue)
|
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsTrue)
|
||||||
{
|
{
|
||||||
auto dummyState = etl::SystemState{};
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = true;
|
||||||
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
publisher.stop();
|
|
||||||
EXPECT_FALSE(publisher.publish(kSEQ, {}));
|
EXPECT_FALSE(publisher.publish(kSEQ, {}));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttempt)
|
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttempt)
|
||||||
{
|
{
|
||||||
auto dummyState = etl::SystemState{};
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = false;
|
||||||
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
static constexpr auto kMAX_ATTEMPT = 2;
|
static constexpr auto kMAX_ATTEMPT = 2;
|
||||||
@@ -237,6 +238,7 @@ TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttempt)
|
|||||||
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsFalse)
|
TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsFalse)
|
||||||
{
|
{
|
||||||
auto dummyState = etl::SystemState{};
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = false;
|
||||||
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
auto publisher = impl::LedgerPublisher(ctx, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
LedgerRange const range{.minSequence = kSEQ, .maxSequence = kSEQ};
|
LedgerRange const range{.minSequence = kSEQ, .maxSequence = kSEQ};
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
#include "data/Types.hpp"
|
#include "data/Types.hpp"
|
||||||
#include "etl/InitialLoadObserverInterface.hpp"
|
#include "etl/InitialLoadObserverInterface.hpp"
|
||||||
#include "etl/LoaderInterface.hpp"
|
|
||||||
#include "etl/Models.hpp"
|
#include "etl/Models.hpp"
|
||||||
#include "etl/RegistryInterface.hpp"
|
#include "etl/RegistryInterface.hpp"
|
||||||
#include "etl/SystemState.hpp"
|
#include "etl/SystemState.hpp"
|
||||||
@@ -189,59 +188,3 @@ TEST_F(LoadingAssertTest, LoadInitialLedgerHasDataInDB)
|
|||||||
|
|
||||||
EXPECT_CLIO_ASSERT_FAIL({ [[maybe_unused]] auto unused = loader_.loadInitialLedger(data); });
|
EXPECT_CLIO_ASSERT_FAIL({ [[maybe_unused]] auto unused = loader_.loadInitialLedger(data); });
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LoadingTests, LoadWriteConflictEmitsStopWritingSignal)
|
|
||||||
{
|
|
||||||
state_->isWriting = true; // writer is active
|
|
||||||
auto const data = createTestData();
|
|
||||||
testing::StrictMock<testing::MockFunction<void(etl::SystemState::WriteCommand)>> mockSignalCallback;
|
|
||||||
|
|
||||||
auto connection = state_->writeCommandSignal.connect(mockSignalCallback.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockRegistryPtr_, dispatch(data));
|
|
||||||
EXPECT_CALL(*backend_, doFinishWrites()).WillOnce(testing::Return(false)); // simulate write conflict
|
|
||||||
EXPECT_CALL(mockSignalCallback, Call(etl::SystemState::WriteCommand::StopWriting));
|
|
||||||
|
|
||||||
EXPECT_FALSE(state_->isWriterDecidingFallback);
|
|
||||||
|
|
||||||
auto result = loader_.load(data);
|
|
||||||
EXPECT_FALSE(result.has_value());
|
|
||||||
EXPECT_EQ(result.error(), etl::LoaderError::WriteConflict);
|
|
||||||
EXPECT_TRUE(state_->isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(LoadingTests, LoadSuccessDoesNotEmitSignal)
|
|
||||||
{
|
|
||||||
state_->isWriting = true; // writer is active
|
|
||||||
auto const data = createTestData();
|
|
||||||
testing::StrictMock<testing::MockFunction<void(etl::SystemState::WriteCommand)>> mockSignalCallback;
|
|
||||||
|
|
||||||
auto connection = state_->writeCommandSignal.connect(mockSignalCallback.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockRegistryPtr_, dispatch(data));
|
|
||||||
EXPECT_CALL(*backend_, doFinishWrites()).WillOnce(testing::Return(true)); // success
|
|
||||||
// No signal should be emitted on success
|
|
||||||
|
|
||||||
EXPECT_FALSE(state_->isWriterDecidingFallback);
|
|
||||||
|
|
||||||
auto result = loader_.load(data);
|
|
||||||
EXPECT_TRUE(result.has_value());
|
|
||||||
EXPECT_FALSE(state_->isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(LoadingTests, LoadWhenNotWritingDoesNotCheckConflict)
|
|
||||||
{
|
|
||||||
state_->isWriting = false; // not a writer
|
|
||||||
auto const data = createTestData();
|
|
||||||
testing::StrictMock<testing::MockFunction<void(etl::SystemState::WriteCommand)>> mockSignalCallback;
|
|
||||||
|
|
||||||
auto connection = state_->writeCommandSignal.connect(mockSignalCallback.AsStdFunction());
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockRegistryPtr_, dispatch(data));
|
|
||||||
// doFinishWrites should not be called when not writing
|
|
||||||
EXPECT_CALL(*backend_, doFinishWrites()).Times(0);
|
|
||||||
// No signal should be emitted
|
|
||||||
|
|
||||||
auto result = loader_.load(data);
|
|
||||||
EXPECT_TRUE(result.has_value());
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ struct RegistryTest : util::prometheus::WithPrometheus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
etl::SystemState state_;
|
etl::SystemState state_{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|||||||
@@ -1,73 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2026, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
|
||||||
#include "util/config/ConfigDefinition.hpp"
|
|
||||||
#include "util/config/ConfigFileJson.hpp"
|
|
||||||
#include "util/config/ConfigValue.hpp"
|
|
||||||
#include "util/config/Types.hpp"
|
|
||||||
|
|
||||||
#include <boost/json/object.hpp>
|
|
||||||
#include <boost/json/parse.hpp>
|
|
||||||
#include <fmt/format.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
using namespace etl;
|
|
||||||
using namespace util::config;
|
|
||||||
|
|
||||||
struct SystemStateTest : util::prometheus::WithPrometheus {};
|
|
||||||
|
|
||||||
TEST_F(SystemStateTest, InitialValuesAreCorrect)
|
|
||||||
{
|
|
||||||
auto state = SystemState{};
|
|
||||||
|
|
||||||
EXPECT_FALSE(state.isStrictReadonly);
|
|
||||||
EXPECT_FALSE(state.isWriting);
|
|
||||||
EXPECT_TRUE(state.isLoadingCache);
|
|
||||||
EXPECT_FALSE(state.isAmendmentBlocked);
|
|
||||||
EXPECT_FALSE(state.isCorruptionDetected);
|
|
||||||
EXPECT_FALSE(state.isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct SystemStateReadOnlyTest : util::prometheus::WithPrometheus, testing::WithParamInterface<bool> {};
|
|
||||||
|
|
||||||
TEST_P(SystemStateReadOnlyTest, MakeSystemStateWithReadOnly)
|
|
||||||
{
|
|
||||||
auto const readOnlyValue = GetParam();
|
|
||||||
auto const configJson = boost::json::parse(fmt::format(R"JSON({{"read_only": {}}})JSON", readOnlyValue));
|
|
||||||
|
|
||||||
auto config = ClioConfigDefinition{{{"read_only", ConfigValue{ConfigType::Boolean}}}};
|
|
||||||
auto const configFile = ConfigFileJson{configJson.as_object()};
|
|
||||||
auto const errors = config.parse(configFile);
|
|
||||||
ASSERT_FALSE(errors.has_value());
|
|
||||||
|
|
||||||
auto state = SystemState::makeSystemState(config);
|
|
||||||
|
|
||||||
EXPECT_EQ(state->isStrictReadonly, readOnlyValue);
|
|
||||||
EXPECT_FALSE(state->isWriting);
|
|
||||||
EXPECT_TRUE(state->isLoadingCache);
|
|
||||||
EXPECT_FALSE(state->isAmendmentBlocked);
|
|
||||||
EXPECT_FALSE(state->isCorruptionDetected);
|
|
||||||
EXPECT_FALSE(state->isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(SystemStateTest, SystemStateReadOnlyTest, testing::Values(true, false));
|
|
||||||
@@ -1,162 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of clio: https://github.com/XRPLF/clio
|
|
||||||
Copyright (c) 2025, the clio developers.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include "etl/SystemState.hpp"
|
|
||||||
#include "etl/WriterState.hpp"
|
|
||||||
#include "util/MockPrometheus.hpp"
|
|
||||||
|
|
||||||
#include <gmock/gmock.h>
|
|
||||||
#include <gtest/gtest.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
using namespace etl;
|
|
||||||
using namespace testing;
|
|
||||||
|
|
||||||
struct WriterStateTest : util::prometheus::WithPrometheus {
|
|
||||||
std::shared_ptr<SystemState> systemState = std::make_shared<SystemState>();
|
|
||||||
StrictMock<MockFunction<void(SystemState::WriteCommand)>> mockWriteCommand;
|
|
||||||
WriterState writerState{systemState};
|
|
||||||
|
|
||||||
WriterStateTest()
|
|
||||||
{
|
|
||||||
systemState->writeCommandSignal.connect(mockWriteCommand.AsStdFunction());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsWritingReturnsSystemStateValue)
|
|
||||||
{
|
|
||||||
systemState->isWriting = false;
|
|
||||||
EXPECT_FALSE(writerState.isWriting());
|
|
||||||
|
|
||||||
systemState->isWriting = true;
|
|
||||||
EXPECT_TRUE(writerState.isWriting());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, StartWritingEmitsStartWritingCommand)
|
|
||||||
{
|
|
||||||
systemState->isWriting = false;
|
|
||||||
|
|
||||||
EXPECT_CALL(mockWriteCommand, Call(SystemState::WriteCommand::StartWriting));
|
|
||||||
|
|
||||||
writerState.startWriting();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, StartWritingDoesNothingWhenAlreadyWriting)
|
|
||||||
{
|
|
||||||
systemState->isWriting = true;
|
|
||||||
|
|
||||||
// No EXPECT_CALL - StrictMock will fail if any command is emitted
|
|
||||||
|
|
||||||
writerState.startWriting();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, GiveUpWritingEmitsStopWritingCommand)
|
|
||||||
{
|
|
||||||
systemState->isWriting = true;
|
|
||||||
|
|
||||||
EXPECT_CALL(mockWriteCommand, Call(SystemState::WriteCommand::StopWriting));
|
|
||||||
|
|
||||||
writerState.giveUpWriting();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, GiveUpWritingDoesNothingWhenNotWriting)
|
|
||||||
{
|
|
||||||
systemState->isWriting = false;
|
|
||||||
|
|
||||||
// No EXPECT_CALL - StrictMock will fail if any command is emitted
|
|
||||||
|
|
||||||
writerState.giveUpWriting();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsFallbackReturnsFalseByDefault)
|
|
||||||
{
|
|
||||||
EXPECT_FALSE(writerState.isFallback());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, SetWriterDecidingFallbackSetsFlag)
|
|
||||||
{
|
|
||||||
EXPECT_FALSE(systemState->isWriterDecidingFallback);
|
|
||||||
|
|
||||||
writerState.setWriterDecidingFallback();
|
|
||||||
|
|
||||||
EXPECT_TRUE(systemState->isWriterDecidingFallback);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsFallbackReturnsSystemStateValue)
|
|
||||||
{
|
|
||||||
systemState->isWriterDecidingFallback = false;
|
|
||||||
EXPECT_FALSE(writerState.isFallback());
|
|
||||||
|
|
||||||
systemState->isWriterDecidingFallback = true;
|
|
||||||
EXPECT_TRUE(writerState.isFallback());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsReadOnlyReturnsSystemStateValue)
|
|
||||||
{
|
|
||||||
systemState->isStrictReadonly = false;
|
|
||||||
EXPECT_FALSE(writerState.isReadOnly());
|
|
||||||
|
|
||||||
systemState->isStrictReadonly = true;
|
|
||||||
EXPECT_TRUE(writerState.isReadOnly());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, IsLoadingCacheReturnsSystemStateValue)
|
|
||||||
{
|
|
||||||
systemState->isLoadingCache = false;
|
|
||||||
EXPECT_FALSE(writerState.isLoadingCache());
|
|
||||||
|
|
||||||
systemState->isLoadingCache = true;
|
|
||||||
EXPECT_TRUE(writerState.isLoadingCache());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, CloneCreatesNewInstanceWithSameSystemState)
|
|
||||||
{
|
|
||||||
systemState->isWriting = true;
|
|
||||||
systemState->isStrictReadonly = true;
|
|
||||||
systemState->isLoadingCache = false;
|
|
||||||
|
|
||||||
auto cloned = writerState.clone();
|
|
||||||
|
|
||||||
ASSERT_NE(cloned.get(), &writerState);
|
|
||||||
EXPECT_TRUE(cloned->isWriting());
|
|
||||||
EXPECT_TRUE(cloned->isReadOnly());
|
|
||||||
EXPECT_FALSE(cloned->isLoadingCache());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(WriterStateTest, ClonedInstanceSharesSystemState)
|
|
||||||
{
|
|
||||||
auto cloned = writerState.clone();
|
|
||||||
|
|
||||||
systemState->isWriting = true;
|
|
||||||
|
|
||||||
EXPECT_TRUE(writerState.isWriting());
|
|
||||||
EXPECT_TRUE(cloned->isWriting());
|
|
||||||
|
|
||||||
systemState->isWriting = false;
|
|
||||||
|
|
||||||
EXPECT_FALSE(writerState.isWriting());
|
|
||||||
EXPECT_FALSE(cloned->isWriting());
|
|
||||||
|
|
||||||
EXPECT_FALSE(writerState.isFallback());
|
|
||||||
EXPECT_FALSE(cloned->isFallback());
|
|
||||||
cloned->setWriterDecidingFallback();
|
|
||||||
EXPECT_TRUE(writerState.isFallback());
|
|
||||||
EXPECT_TRUE(cloned->isFallback());
|
|
||||||
}
|
|
||||||
356
tests/unit/etlng/LedgerPublisherTests.cpp
Normal file
356
tests/unit/etlng/LedgerPublisherTests.cpp
Normal file
@@ -0,0 +1,356 @@
|
|||||||
|
//------------------------------------------------------------------------------
|
||||||
|
/*
|
||||||
|
This file is part of clio: https://github.com/XRPLF/clio
|
||||||
|
Copyright (c) 2025, the clio developers.
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
//==============================================================================
|
||||||
|
|
||||||
|
#include "data/DBHelpers.hpp"
|
||||||
|
#include "data/Types.hpp"
|
||||||
|
#include "etl/SystemState.hpp"
|
||||||
|
#include "util/AsioContextTestFixture.hpp"
|
||||||
|
#include "util/MockBackendTestFixture.hpp"
|
||||||
|
#include "util/MockPrometheus.hpp"
|
||||||
|
#include "util/MockSubscriptionManager.hpp"
|
||||||
|
#include "util/TestObject.hpp"
|
||||||
|
#include "util/config/ConfigDefinition.hpp"
|
||||||
|
|
||||||
|
#include <etlng/impl/LedgerPublisher.hpp>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <xrpl/basics/chrono.h>
|
||||||
|
#include <xrpl/protocol/Indexes.h>
|
||||||
|
#include <xrpl/protocol/LedgerHeader.h>
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <optional>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
using namespace testing;
|
||||||
|
using namespace etlng;
|
||||||
|
using namespace data;
|
||||||
|
using namespace std::chrono;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr auto kACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn";
|
||||||
|
constexpr auto kACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun";
|
||||||
|
constexpr auto kLEDGER_HASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652";
|
||||||
|
constexpr auto kSEQ = 30;
|
||||||
|
constexpr auto kAGE = 800;
|
||||||
|
constexpr auto kAMOUNT = 100;
|
||||||
|
constexpr auto kFEE = 3;
|
||||||
|
constexpr auto kFINAL_BALANCE = 110;
|
||||||
|
constexpr auto kFINAL_BALANCE2 = 30;
|
||||||
|
|
||||||
|
MATCHER_P(ledgerHeaderMatcher, expectedHeader, "Headers match")
|
||||||
|
{
|
||||||
|
return arg.seq == expectedHeader.seq && arg.hash == expectedHeader.hash &&
|
||||||
|
arg.closeTime == expectedHeader.closeTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
struct ETLLedgerPublisherNgTest : util::prometheus::WithPrometheus, MockBackendTestStrict, SyncAsioContextTest {
|
||||||
|
util::config::ClioConfigDefinition cfg{{}};
|
||||||
|
StrictMockSubscriptionManagerSharedPtr mockSubscriptionManagerPtr;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderSkipDueToAge)
|
||||||
|
{
|
||||||
|
// Use kAGE (800) which is > MAX_LEDGER_AGE_SECONDS (600) to test skipping
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
// Verify last published sequence is set immediately
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
// Since age > MAX_LEDGER_AGE_SECONDS, these should not be called
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(0);
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction).Times(0);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderWithinAgeLimit)
|
||||||
|
{
|
||||||
|
// Use age 0 which is < MAX_LEDGER_AGE_SECONDS to ensure publishing happens
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
// Verify last published sequence is set immediately
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 0));
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderIsWritingTrue)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_FALSE(backend_->fetchLedgerRange());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderInRange)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0); // age is 0
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
|
||||||
|
TransactionAndMetadata t1;
|
||||||
|
t1.transaction =
|
||||||
|
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
|
||||||
|
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2)
|
||||||
|
.getSerializer()
|
||||||
|
.peekData();
|
||||||
|
t1.ledgerSequence = kSEQ;
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger).WillOnce(Return(std::vector<TransactionAndMetadata>{t1}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 1));
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||||
|
// mock 1 transaction
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderCloseTimeGreaterThanNow)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
auto dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||||
|
auto const nowPlus10 = system_clock::now() + seconds(10);
|
||||||
|
auto const closeTime = duration_cast<seconds>(nowPlus10.time_since_epoch()).count() - kRIPPLE_EPOCH_START;
|
||||||
|
dummyLedgerHeader.closeTime = ripple::NetClock::time_point{seconds{closeTime}};
|
||||||
|
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
|
||||||
|
TransactionAndMetadata t1;
|
||||||
|
t1.transaction =
|
||||||
|
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
|
||||||
|
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2)
|
||||||
|
.getSerializer()
|
||||||
|
.peekData();
|
||||||
|
t1.ledgerSequence = kSEQ;
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{t1}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 1));
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsTrue)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = true;
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
EXPECT_FALSE(publisher.publish(kSEQ, {}));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqMaxAttempt)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = false;
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
|
static constexpr auto kMAX_ATTEMPT = 2;
|
||||||
|
|
||||||
|
LedgerRange const range{.minSequence = kSEQ - 1, .maxSequence = kSEQ - 1};
|
||||||
|
EXPECT_CALL(*backend_, hardFetchLedgerRange).Times(kMAX_ATTEMPT).WillRepeatedly(Return(range));
|
||||||
|
|
||||||
|
EXPECT_FALSE(publisher.publish(kSEQ, kMAX_ATTEMPT, std::chrono::milliseconds{1}));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsFalse)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isStopping = false;
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
|
||||||
|
LedgerRange const range{.minSequence = kSEQ, .maxSequence = kSEQ};
|
||||||
|
EXPECT_CALL(*backend_, hardFetchLedgerRange).WillOnce(Return(range));
|
||||||
|
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
|
||||||
|
EXPECT_CALL(*backend_, fetchLedgerBySequence(kSEQ, _)).WillOnce(Return(dummyLedgerHeader));
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.publish(kSEQ, {}));
|
||||||
|
ctx_.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishMultipleTxInOrder)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0); // age is 0
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
|
||||||
|
// t1 index > t2 index
|
||||||
|
TransactionAndMetadata t1;
|
||||||
|
t1.transaction =
|
||||||
|
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
|
||||||
|
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2, 2)
|
||||||
|
.getSerializer()
|
||||||
|
.peekData();
|
||||||
|
t1.ledgerSequence = kSEQ;
|
||||||
|
t1.date = 1;
|
||||||
|
TransactionAndMetadata t2;
|
||||||
|
t2.transaction =
|
||||||
|
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
|
||||||
|
t2.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2, 1)
|
||||||
|
.getSerializer()
|
||||||
|
.peekData();
|
||||||
|
t2.ledgerSequence = kSEQ;
|
||||||
|
t2.date = 2;
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{t1, t2}));
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 2));
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
|
||||||
|
|
||||||
|
Sequence const s;
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction(t2, _)).InSequence(s);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction(t1, _)).InSequence(s);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishVeryOldLedgerShouldSkip)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
// Create a ledger header with age (800) greater than MAX_LEDGER_AGE_SECONDS (600)
|
||||||
|
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 800);
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ);
|
||||||
|
|
||||||
|
publisher.publish(dummyLedgerHeader);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges).Times(0);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction).Times(0);
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(ETLLedgerPublisherNgTest, PublishMultipleLedgersInQuickSuccession)
|
||||||
|
{
|
||||||
|
auto dummyState = etl::SystemState{};
|
||||||
|
dummyState.isWriting = true;
|
||||||
|
|
||||||
|
auto const dummyLedgerHeader1 = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
|
||||||
|
auto const dummyLedgerHeader2 = createLedgerHeader(kLEDGER_HASH, kSEQ + 1, 0);
|
||||||
|
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
|
||||||
|
backend_->setRange(kSEQ - 1, kSEQ + 1);
|
||||||
|
|
||||||
|
// Publish two ledgers in quick succession
|
||||||
|
publisher.publish(dummyLedgerHeader1);
|
||||||
|
publisher.publish(dummyLedgerHeader2);
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ + 1, _))
|
||||||
|
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
|
||||||
|
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
|
||||||
|
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ + 1, _))
|
||||||
|
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
|
||||||
|
|
||||||
|
Sequence const s;
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(ledgerHeaderMatcher(dummyLedgerHeader1), _, _, _)).InSequence(s);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges(ledgerHeaderMatcher(dummyLedgerHeader1), _)).InSequence(s);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(ledgerHeaderMatcher(dummyLedgerHeader2), _, _, _)).InSequence(s);
|
||||||
|
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges(ledgerHeaderMatcher(dummyLedgerHeader2), _)).InSequence(s);
|
||||||
|
|
||||||
|
EXPECT_TRUE(publisher.getLastPublishedSequence());
|
||||||
|
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ + 1);
|
||||||
|
|
||||||
|
ctx_.run();
|
||||||
|
}
|
||||||
@@ -72,11 +72,11 @@ protected:
|
|||||||
}
|
}
|
||||||
|
|
||||||
StrictMockAmendmentCenterSharedPtr mockAmendmentCenterPtr_;
|
StrictMockAmendmentCenterSharedPtr mockAmendmentCenterPtr_;
|
||||||
|
std::shared_ptr<SubscriptionManager> subscriptionManagerPtr_ =
|
||||||
|
std::make_shared<SubscriptionManager>(Execution(2), backend_, mockAmendmentCenterPtr_);
|
||||||
web::SubscriptionContextPtr session_ = std::make_shared<MockSession>();
|
web::SubscriptionContextPtr session_ = std::make_shared<MockSession>();
|
||||||
MockSession* sessionPtr_ = dynamic_cast<MockSession*>(session_.get());
|
MockSession* sessionPtr_ = dynamic_cast<MockSession*>(session_.get());
|
||||||
uint32_t const networkID_ = 123;
|
uint32_t const networkID_ = 123;
|
||||||
std::shared_ptr<SubscriptionManager> subscriptionManagerPtr_ =
|
|
||||||
std::make_shared<SubscriptionManager>(Execution(2), backend_, mockAmendmentCenterPtr_);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
using SubscriptionManagerTest = SubscriptionManagerBaseTest<util::async::SyncExecutionContext>;
|
using SubscriptionManagerTest = SubscriptionManagerBaseTest<util::async::SyncExecutionContext>;
|
||||||
|
|||||||
@@ -187,7 +187,7 @@ TEST_P(ChannelSpawnTest, MultipleSendersMultipleReceivers)
|
|||||||
context_.withExecutor([this](auto& executor) {
|
context_.withExecutor([this](auto& executor) {
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 10);
|
auto [sender, receiver] = util::Channel<int>::create(executor, 10);
|
||||||
util::Mutex<std::vector<int>> receivedValues;
|
util::Mutex<std::vector<int>> receivedValues;
|
||||||
std::vector receivers(kNUM_RECEIVERS, receiver);
|
std::vector<decltype(receiver)> receivers(kNUM_RECEIVERS, receiver);
|
||||||
|
|
||||||
for (auto receiverId = 0uz; receiverId < kNUM_RECEIVERS; ++receiverId) {
|
for (auto receiverId = 0uz; receiverId < kNUM_RECEIVERS; ++receiverId) {
|
||||||
util::spawn(
|
util::spawn(
|
||||||
@@ -402,7 +402,7 @@ TEST_P(ChannelCallbackTest, MultipleSendersMultipleReceivers)
|
|||||||
context_.withExecutor([this](auto& executor) {
|
context_.withExecutor([this](auto& executor) {
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 10);
|
auto [sender, receiver] = util::Channel<int>::create(executor, 10);
|
||||||
util::Mutex<std::vector<int>> receivedValues;
|
util::Mutex<std::vector<int>> receivedValues;
|
||||||
std::vector receivers(kNUM_RECEIVERS, receiver);
|
std::vector<decltype(receiver)> receivers(kNUM_RECEIVERS, receiver);
|
||||||
|
|
||||||
for (auto receiverId = 0uz; receiverId < kNUM_RECEIVERS; ++receiverId) {
|
for (auto receiverId = 0uz; receiverId < kNUM_RECEIVERS; ++receiverId) {
|
||||||
auto& receiverRef = receivers[receiverId];
|
auto& receiverRef = receivers[receiverId];
|
||||||
@@ -528,8 +528,8 @@ TEST_P(ChannelCallbackTest, TryMethodsWithClosedChannel)
|
|||||||
context_.withExecutor([this](auto& executor) {
|
context_.withExecutor([this](auto& executor) {
|
||||||
std::atomic_bool testCompleted{false};
|
std::atomic_bool testCompleted{false};
|
||||||
auto [sender, receiver] = util::Channel<int>::create(executor, 3);
|
auto [sender, receiver] = util::Channel<int>::create(executor, 3);
|
||||||
auto receiverPtr = std::make_shared<util::Channel<int>::Receiver>(std::move(receiver));
|
auto receiverPtr = std::make_shared<decltype(receiver)>(std::move(receiver));
|
||||||
auto senderPtr = std::make_shared<std::optional<util::Channel<int>::Sender>>(std::move(sender));
|
auto senderPtr = std::make_shared<std::optional<decltype(sender)>>(std::move(sender));
|
||||||
|
|
||||||
boost::asio::post(executor, [receiverPtr, senderPtr, &testCompleted]() {
|
boost::asio::post(executor, [receiverPtr, senderPtr, &testCompleted]() {
|
||||||
EXPECT_TRUE(senderPtr->value().trySend(100));
|
EXPECT_TRUE(senderPtr->value().trySend(100));
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user